xref: /openbmc/qemu/target/arm/tcg/translate-a64.c (revision d2dfe0b5)
1 /*
2  *  AArch64 translation
3  *
4  *  Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "translate.h"
22 #include "translate-a64.h"
23 #include "qemu/log.h"
24 #include "disas/disas.h"
25 #include "arm_ldst.h"
26 #include "semihosting/semihost.h"
27 #include "cpregs.h"
28 
29 static TCGv_i64 cpu_X[32];
30 static TCGv_i64 cpu_pc;
31 
32 /* Load/store exclusive handling */
33 static TCGv_i64 cpu_exclusive_high;
34 
35 static const char *regnames[] = {
36     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
37     "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
38     "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
39     "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
40 };
41 
42 enum a64_shift_type {
43     A64_SHIFT_TYPE_LSL = 0,
44     A64_SHIFT_TYPE_LSR = 1,
45     A64_SHIFT_TYPE_ASR = 2,
46     A64_SHIFT_TYPE_ROR = 3
47 };
48 
49 /*
50  * Include the generated decoders.
51  */
52 
53 #include "decode-sme-fa64.c.inc"
54 #include "decode-a64.c.inc"
55 
56 /* Table based decoder typedefs - used when the relevant bits for decode
57  * are too awkwardly scattered across the instruction (eg SIMD).
58  */
59 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
60 
61 typedef struct AArch64DecodeTable {
62     uint32_t pattern;
63     uint32_t mask;
64     AArch64DecodeFn *disas_fn;
65 } AArch64DecodeTable;
66 
67 /* initialize TCG globals.  */
68 void a64_translate_init(void)
69 {
70     int i;
71 
72     cpu_pc = tcg_global_mem_new_i64(cpu_env,
73                                     offsetof(CPUARMState, pc),
74                                     "pc");
75     for (i = 0; i < 32; i++) {
76         cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
77                                           offsetof(CPUARMState, xregs[i]),
78                                           regnames[i]);
79     }
80 
81     cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
82         offsetof(CPUARMState, exclusive_high), "exclusive_high");
83 }
84 
85 /*
86  * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
87  */
88 static int get_a64_user_mem_index(DisasContext *s)
89 {
90     /*
91      * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
92      * which is the usual mmu_idx for this cpu state.
93      */
94     ARMMMUIdx useridx = s->mmu_idx;
95 
96     if (s->unpriv) {
97         /*
98          * We have pre-computed the condition for AccType_UNPRIV.
99          * Therefore we should never get here with a mmu_idx for
100          * which we do not know the corresponding user mmu_idx.
101          */
102         switch (useridx) {
103         case ARMMMUIdx_E10_1:
104         case ARMMMUIdx_E10_1_PAN:
105             useridx = ARMMMUIdx_E10_0;
106             break;
107         case ARMMMUIdx_E20_2:
108         case ARMMMUIdx_E20_2_PAN:
109             useridx = ARMMMUIdx_E20_0;
110             break;
111         default:
112             g_assert_not_reached();
113         }
114     }
115     return arm_to_core_mmu_idx(useridx);
116 }
117 
118 static void set_btype_raw(int val)
119 {
120     tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
121                    offsetof(CPUARMState, btype));
122 }
123 
124 static void set_btype(DisasContext *s, int val)
125 {
126     /* BTYPE is a 2-bit field, and 0 should be done with reset_btype.  */
127     tcg_debug_assert(val >= 1 && val <= 3);
128     set_btype_raw(val);
129     s->btype = -1;
130 }
131 
132 static void reset_btype(DisasContext *s)
133 {
134     if (s->btype != 0) {
135         set_btype_raw(0);
136         s->btype = 0;
137     }
138 }
139 
140 static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff)
141 {
142     assert(s->pc_save != -1);
143     if (tb_cflags(s->base.tb) & CF_PCREL) {
144         tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff);
145     } else {
146         tcg_gen_movi_i64(dest, s->pc_curr + diff);
147     }
148 }
149 
150 void gen_a64_update_pc(DisasContext *s, target_long diff)
151 {
152     gen_pc_plus_diff(s, cpu_pc, diff);
153     s->pc_save = s->pc_curr + diff;
154 }
155 
156 /*
157  * Handle Top Byte Ignore (TBI) bits.
158  *
159  * If address tagging is enabled via the TCR TBI bits:
160  *  + for EL2 and EL3 there is only one TBI bit, and if it is set
161  *    then the address is zero-extended, clearing bits [63:56]
162  *  + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
163  *    and TBI1 controls addressses with bit 55 == 1.
164  *    If the appropriate TBI bit is set for the address then
165  *    the address is sign-extended from bit 55 into bits [63:56]
166  *
167  * Here We have concatenated TBI{1,0} into tbi.
168  */
169 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
170                                 TCGv_i64 src, int tbi)
171 {
172     if (tbi == 0) {
173         /* Load unmodified address */
174         tcg_gen_mov_i64(dst, src);
175     } else if (!regime_has_2_ranges(s->mmu_idx)) {
176         /* Force tag byte to all zero */
177         tcg_gen_extract_i64(dst, src, 0, 56);
178     } else {
179         /* Sign-extend from bit 55.  */
180         tcg_gen_sextract_i64(dst, src, 0, 56);
181 
182         switch (tbi) {
183         case 1:
184             /* tbi0 but !tbi1: only use the extension if positive */
185             tcg_gen_and_i64(dst, dst, src);
186             break;
187         case 2:
188             /* !tbi0 but tbi1: only use the extension if negative */
189             tcg_gen_or_i64(dst, dst, src);
190             break;
191         case 3:
192             /* tbi0 and tbi1: always use the extension */
193             break;
194         default:
195             g_assert_not_reached();
196         }
197     }
198 }
199 
200 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
201 {
202     /*
203      * If address tagging is enabled for instructions via the TCR TBI bits,
204      * then loading an address into the PC will clear out any tag.
205      */
206     gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
207     s->pc_save = -1;
208 }
209 
210 /*
211  * Handle MTE and/or TBI.
212  *
213  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
214  * for the tag to be present in the FAR_ELx register.  But for user-only
215  * mode we do not have a TLB with which to implement this, so we must
216  * remove the top byte now.
217  *
218  * Always return a fresh temporary that we can increment independently
219  * of the write-back address.
220  */
221 
222 TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
223 {
224     TCGv_i64 clean = tcg_temp_new_i64();
225 #ifdef CONFIG_USER_ONLY
226     gen_top_byte_ignore(s, clean, addr, s->tbid);
227 #else
228     tcg_gen_mov_i64(clean, addr);
229 #endif
230     return clean;
231 }
232 
233 /* Insert a zero tag into src, with the result at dst. */
234 static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
235 {
236     tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
237 }
238 
239 static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
240                              MMUAccessType acc, int log2_size)
241 {
242     gen_helper_probe_access(cpu_env, ptr,
243                             tcg_constant_i32(acc),
244                             tcg_constant_i32(get_mem_index(s)),
245                             tcg_constant_i32(1 << log2_size));
246 }
247 
248 /*
249  * For MTE, check a single logical or atomic access.  This probes a single
250  * address, the exact one specified.  The size and alignment of the access
251  * is not relevant to MTE, per se, but watchpoints do require the size,
252  * and we want to recognize those before making any other changes to state.
253  */
254 static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
255                                       bool is_write, bool tag_checked,
256                                       int log2_size, bool is_unpriv,
257                                       int core_idx)
258 {
259     if (tag_checked && s->mte_active[is_unpriv]) {
260         TCGv_i64 ret;
261         int desc = 0;
262 
263         desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
264         desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
265         desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
266         desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
267         desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1);
268 
269         ret = tcg_temp_new_i64();
270         gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
271 
272         return ret;
273     }
274     return clean_data_tbi(s, addr);
275 }
276 
277 TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
278                         bool tag_checked, int log2_size)
279 {
280     return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size,
281                                  false, get_mem_index(s));
282 }
283 
284 /*
285  * For MTE, check multiple logical sequential accesses.
286  */
287 TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
288                         bool tag_checked, int size)
289 {
290     if (tag_checked && s->mte_active[0]) {
291         TCGv_i64 ret;
292         int desc = 0;
293 
294         desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
295         desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
296         desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
297         desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
298         desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1);
299 
300         ret = tcg_temp_new_i64();
301         gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
302 
303         return ret;
304     }
305     return clean_data_tbi(s, addr);
306 }
307 
308 typedef struct DisasCompare64 {
309     TCGCond cond;
310     TCGv_i64 value;
311 } DisasCompare64;
312 
313 static void a64_test_cc(DisasCompare64 *c64, int cc)
314 {
315     DisasCompare c32;
316 
317     arm_test_cc(&c32, cc);
318 
319     /*
320      * Sign-extend the 32-bit value so that the GE/LT comparisons work
321      * properly.  The NE/EQ comparisons are also fine with this choice.
322       */
323     c64->cond = c32.cond;
324     c64->value = tcg_temp_new_i64();
325     tcg_gen_ext_i32_i64(c64->value, c32.value);
326 }
327 
328 static void gen_rebuild_hflags(DisasContext *s)
329 {
330     gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el));
331 }
332 
333 static void gen_exception_internal(int excp)
334 {
335     assert(excp_is_internal(excp));
336     gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
337 }
338 
339 static void gen_exception_internal_insn(DisasContext *s, int excp)
340 {
341     gen_a64_update_pc(s, 0);
342     gen_exception_internal(excp);
343     s->base.is_jmp = DISAS_NORETURN;
344 }
345 
346 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
347 {
348     gen_a64_update_pc(s, 0);
349     gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
350     s->base.is_jmp = DISAS_NORETURN;
351 }
352 
353 static void gen_step_complete_exception(DisasContext *s)
354 {
355     /* We just completed step of an insn. Move from Active-not-pending
356      * to Active-pending, and then also take the swstep exception.
357      * This corresponds to making the (IMPDEF) choice to prioritize
358      * swstep exceptions over asynchronous exceptions taken to an exception
359      * level where debug is disabled. This choice has the advantage that
360      * we do not need to maintain internal state corresponding to the
361      * ISV/EX syndrome bits between completion of the step and generation
362      * of the exception, and our syndrome information is always correct.
363      */
364     gen_ss_advance(s);
365     gen_swstep_exception(s, 1, s->is_ldex);
366     s->base.is_jmp = DISAS_NORETURN;
367 }
368 
369 static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
370 {
371     if (s->ss_active) {
372         return false;
373     }
374     return translator_use_goto_tb(&s->base, dest);
375 }
376 
377 static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
378 {
379     if (use_goto_tb(s, s->pc_curr + diff)) {
380         /*
381          * For pcrel, the pc must always be up-to-date on entry to
382          * the linked TB, so that it can use simple additions for all
383          * further adjustments.  For !pcrel, the linked TB is compiled
384          * to know its full virtual address, so we can delay the
385          * update to pc to the unlinked path.  A long chain of links
386          * can thus avoid many updates to the PC.
387          */
388         if (tb_cflags(s->base.tb) & CF_PCREL) {
389             gen_a64_update_pc(s, diff);
390             tcg_gen_goto_tb(n);
391         } else {
392             tcg_gen_goto_tb(n);
393             gen_a64_update_pc(s, diff);
394         }
395         tcg_gen_exit_tb(s->base.tb, n);
396         s->base.is_jmp = DISAS_NORETURN;
397     } else {
398         gen_a64_update_pc(s, diff);
399         if (s->ss_active) {
400             gen_step_complete_exception(s);
401         } else {
402             tcg_gen_lookup_and_goto_ptr();
403             s->base.is_jmp = DISAS_NORETURN;
404         }
405     }
406 }
407 
408 /*
409  * Register access functions
410  *
411  * These functions are used for directly accessing a register in where
412  * changes to the final register value are likely to be made. If you
413  * need to use a register for temporary calculation (e.g. index type
414  * operations) use the read_* form.
415  *
416  * B1.2.1 Register mappings
417  *
418  * In instruction register encoding 31 can refer to ZR (zero register) or
419  * the SP (stack pointer) depending on context. In QEMU's case we map SP
420  * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
421  * This is the point of the _sp forms.
422  */
423 TCGv_i64 cpu_reg(DisasContext *s, int reg)
424 {
425     if (reg == 31) {
426         TCGv_i64 t = tcg_temp_new_i64();
427         tcg_gen_movi_i64(t, 0);
428         return t;
429     } else {
430         return cpu_X[reg];
431     }
432 }
433 
434 /* register access for when 31 == SP */
435 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
436 {
437     return cpu_X[reg];
438 }
439 
440 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
441  * representing the register contents. This TCGv is an auto-freed
442  * temporary so it need not be explicitly freed, and may be modified.
443  */
444 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
445 {
446     TCGv_i64 v = tcg_temp_new_i64();
447     if (reg != 31) {
448         if (sf) {
449             tcg_gen_mov_i64(v, cpu_X[reg]);
450         } else {
451             tcg_gen_ext32u_i64(v, cpu_X[reg]);
452         }
453     } else {
454         tcg_gen_movi_i64(v, 0);
455     }
456     return v;
457 }
458 
459 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
460 {
461     TCGv_i64 v = tcg_temp_new_i64();
462     if (sf) {
463         tcg_gen_mov_i64(v, cpu_X[reg]);
464     } else {
465         tcg_gen_ext32u_i64(v, cpu_X[reg]);
466     }
467     return v;
468 }
469 
470 /* Return the offset into CPUARMState of a slice (from
471  * the least significant end) of FP register Qn (ie
472  * Dn, Sn, Hn or Bn).
473  * (Note that this is not the same mapping as for A32; see cpu.h)
474  */
475 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
476 {
477     return vec_reg_offset(s, regno, 0, size);
478 }
479 
480 /* Offset of the high half of the 128 bit vector Qn */
481 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
482 {
483     return vec_reg_offset(s, regno, 1, MO_64);
484 }
485 
486 /* Convenience accessors for reading and writing single and double
487  * FP registers. Writing clears the upper parts of the associated
488  * 128 bit vector register, as required by the architecture.
489  * Note that unlike the GP register accessors, the values returned
490  * by the read functions must be manually freed.
491  */
492 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
493 {
494     TCGv_i64 v = tcg_temp_new_i64();
495 
496     tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
497     return v;
498 }
499 
500 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
501 {
502     TCGv_i32 v = tcg_temp_new_i32();
503 
504     tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
505     return v;
506 }
507 
508 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
509 {
510     TCGv_i32 v = tcg_temp_new_i32();
511 
512     tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
513     return v;
514 }
515 
516 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
517  * If SVE is not enabled, then there are only 128 bits in the vector.
518  */
519 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
520 {
521     unsigned ofs = fp_reg_offset(s, rd, MO_64);
522     unsigned vsz = vec_full_reg_size(s);
523 
524     /* Nop move, with side effect of clearing the tail. */
525     tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
526 }
527 
528 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
529 {
530     unsigned ofs = fp_reg_offset(s, reg, MO_64);
531 
532     tcg_gen_st_i64(v, cpu_env, ofs);
533     clear_vec_high(s, false, reg);
534 }
535 
536 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
537 {
538     TCGv_i64 tmp = tcg_temp_new_i64();
539 
540     tcg_gen_extu_i32_i64(tmp, v);
541     write_fp_dreg(s, reg, tmp);
542 }
543 
544 /* Expand a 2-operand AdvSIMD vector operation using an expander function.  */
545 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
546                          GVecGen2Fn *gvec_fn, int vece)
547 {
548     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
549             is_q ? 16 : 8, vec_full_reg_size(s));
550 }
551 
552 /* Expand a 2-operand + immediate AdvSIMD vector operation using
553  * an expander function.
554  */
555 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
556                           int64_t imm, GVecGen2iFn *gvec_fn, int vece)
557 {
558     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
559             imm, is_q ? 16 : 8, vec_full_reg_size(s));
560 }
561 
562 /* Expand a 3-operand AdvSIMD vector operation using an expander function.  */
563 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
564                          GVecGen3Fn *gvec_fn, int vece)
565 {
566     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
567             vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
568 }
569 
570 /* Expand a 4-operand AdvSIMD vector operation using an expander function.  */
571 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
572                          int rx, GVecGen4Fn *gvec_fn, int vece)
573 {
574     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
575             vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
576             is_q ? 16 : 8, vec_full_reg_size(s));
577 }
578 
579 /* Expand a 2-operand operation using an out-of-line helper.  */
580 static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
581                              int rn, int data, gen_helper_gvec_2 *fn)
582 {
583     tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
584                        vec_full_reg_offset(s, rn),
585                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
586 }
587 
588 /* Expand a 3-operand operation using an out-of-line helper.  */
589 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
590                              int rn, int rm, int data, gen_helper_gvec_3 *fn)
591 {
592     tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
593                        vec_full_reg_offset(s, rn),
594                        vec_full_reg_offset(s, rm),
595                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
596 }
597 
598 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
599  * an out-of-line helper.
600  */
601 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
602                               int rm, bool is_fp16, int data,
603                               gen_helper_gvec_3_ptr *fn)
604 {
605     TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
606     tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
607                        vec_full_reg_offset(s, rn),
608                        vec_full_reg_offset(s, rm), fpst,
609                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
610 }
611 
612 /* Expand a 3-operand + qc + operation using an out-of-line helper.  */
613 static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
614                             int rm, gen_helper_gvec_3_ptr *fn)
615 {
616     TCGv_ptr qc_ptr = tcg_temp_new_ptr();
617 
618     tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
619     tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
620                        vec_full_reg_offset(s, rn),
621                        vec_full_reg_offset(s, rm), qc_ptr,
622                        is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
623 }
624 
625 /* Expand a 4-operand operation using an out-of-line helper.  */
626 static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
627                              int rm, int ra, int data, gen_helper_gvec_4 *fn)
628 {
629     tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
630                        vec_full_reg_offset(s, rn),
631                        vec_full_reg_offset(s, rm),
632                        vec_full_reg_offset(s, ra),
633                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
634 }
635 
636 /*
637  * Expand a 4-operand + fpstatus pointer + simd data value operation using
638  * an out-of-line helper.
639  */
640 static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
641                               int rm, int ra, bool is_fp16, int data,
642                               gen_helper_gvec_4_ptr *fn)
643 {
644     TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
645     tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
646                        vec_full_reg_offset(s, rn),
647                        vec_full_reg_offset(s, rm),
648                        vec_full_reg_offset(s, ra), fpst,
649                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
650 }
651 
652 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
653  * than the 32 bit equivalent.
654  */
655 static inline void gen_set_NZ64(TCGv_i64 result)
656 {
657     tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
658     tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
659 }
660 
661 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
662 static inline void gen_logic_CC(int sf, TCGv_i64 result)
663 {
664     if (sf) {
665         gen_set_NZ64(result);
666     } else {
667         tcg_gen_extrl_i64_i32(cpu_ZF, result);
668         tcg_gen_mov_i32(cpu_NF, cpu_ZF);
669     }
670     tcg_gen_movi_i32(cpu_CF, 0);
671     tcg_gen_movi_i32(cpu_VF, 0);
672 }
673 
674 /* dest = T0 + T1; compute C, N, V and Z flags */
675 static void gen_add64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
676 {
677     TCGv_i64 result, flag, tmp;
678     result = tcg_temp_new_i64();
679     flag = tcg_temp_new_i64();
680     tmp = tcg_temp_new_i64();
681 
682     tcg_gen_movi_i64(tmp, 0);
683     tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
684 
685     tcg_gen_extrl_i64_i32(cpu_CF, flag);
686 
687     gen_set_NZ64(result);
688 
689     tcg_gen_xor_i64(flag, result, t0);
690     tcg_gen_xor_i64(tmp, t0, t1);
691     tcg_gen_andc_i64(flag, flag, tmp);
692     tcg_gen_extrh_i64_i32(cpu_VF, flag);
693 
694     tcg_gen_mov_i64(dest, result);
695 }
696 
697 static void gen_add32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
698 {
699     TCGv_i32 t0_32 = tcg_temp_new_i32();
700     TCGv_i32 t1_32 = tcg_temp_new_i32();
701     TCGv_i32 tmp = tcg_temp_new_i32();
702 
703     tcg_gen_movi_i32(tmp, 0);
704     tcg_gen_extrl_i64_i32(t0_32, t0);
705     tcg_gen_extrl_i64_i32(t1_32, t1);
706     tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
707     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
708     tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
709     tcg_gen_xor_i32(tmp, t0_32, t1_32);
710     tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
711     tcg_gen_extu_i32_i64(dest, cpu_NF);
712 }
713 
714 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
715 {
716     if (sf) {
717         gen_add64_CC(dest, t0, t1);
718     } else {
719         gen_add32_CC(dest, t0, t1);
720     }
721 }
722 
723 /* dest = T0 - T1; compute C, N, V and Z flags */
724 static void gen_sub64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
725 {
726     /* 64 bit arithmetic */
727     TCGv_i64 result, flag, tmp;
728 
729     result = tcg_temp_new_i64();
730     flag = tcg_temp_new_i64();
731     tcg_gen_sub_i64(result, t0, t1);
732 
733     gen_set_NZ64(result);
734 
735     tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
736     tcg_gen_extrl_i64_i32(cpu_CF, flag);
737 
738     tcg_gen_xor_i64(flag, result, t0);
739     tmp = tcg_temp_new_i64();
740     tcg_gen_xor_i64(tmp, t0, t1);
741     tcg_gen_and_i64(flag, flag, tmp);
742     tcg_gen_extrh_i64_i32(cpu_VF, flag);
743     tcg_gen_mov_i64(dest, result);
744 }
745 
746 static void gen_sub32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
747 {
748     /* 32 bit arithmetic */
749     TCGv_i32 t0_32 = tcg_temp_new_i32();
750     TCGv_i32 t1_32 = tcg_temp_new_i32();
751     TCGv_i32 tmp;
752 
753     tcg_gen_extrl_i64_i32(t0_32, t0);
754     tcg_gen_extrl_i64_i32(t1_32, t1);
755     tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
756     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
757     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
758     tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
759     tmp = tcg_temp_new_i32();
760     tcg_gen_xor_i32(tmp, t0_32, t1_32);
761     tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
762     tcg_gen_extu_i32_i64(dest, cpu_NF);
763 }
764 
765 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
766 {
767     if (sf) {
768         gen_sub64_CC(dest, t0, t1);
769     } else {
770         gen_sub32_CC(dest, t0, t1);
771     }
772 }
773 
774 /* dest = T0 + T1 + CF; do not compute flags. */
775 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
776 {
777     TCGv_i64 flag = tcg_temp_new_i64();
778     tcg_gen_extu_i32_i64(flag, cpu_CF);
779     tcg_gen_add_i64(dest, t0, t1);
780     tcg_gen_add_i64(dest, dest, flag);
781 
782     if (!sf) {
783         tcg_gen_ext32u_i64(dest, dest);
784     }
785 }
786 
787 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
788 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
789 {
790     if (sf) {
791         TCGv_i64 result = tcg_temp_new_i64();
792         TCGv_i64 cf_64 = tcg_temp_new_i64();
793         TCGv_i64 vf_64 = tcg_temp_new_i64();
794         TCGv_i64 tmp = tcg_temp_new_i64();
795         TCGv_i64 zero = tcg_constant_i64(0);
796 
797         tcg_gen_extu_i32_i64(cf_64, cpu_CF);
798         tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
799         tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
800         tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
801         gen_set_NZ64(result);
802 
803         tcg_gen_xor_i64(vf_64, result, t0);
804         tcg_gen_xor_i64(tmp, t0, t1);
805         tcg_gen_andc_i64(vf_64, vf_64, tmp);
806         tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
807 
808         tcg_gen_mov_i64(dest, result);
809     } else {
810         TCGv_i32 t0_32 = tcg_temp_new_i32();
811         TCGv_i32 t1_32 = tcg_temp_new_i32();
812         TCGv_i32 tmp = tcg_temp_new_i32();
813         TCGv_i32 zero = tcg_constant_i32(0);
814 
815         tcg_gen_extrl_i64_i32(t0_32, t0);
816         tcg_gen_extrl_i64_i32(t1_32, t1);
817         tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
818         tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
819 
820         tcg_gen_mov_i32(cpu_ZF, cpu_NF);
821         tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
822         tcg_gen_xor_i32(tmp, t0_32, t1_32);
823         tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
824         tcg_gen_extu_i32_i64(dest, cpu_NF);
825     }
826 }
827 
828 /*
829  * Load/Store generators
830  */
831 
832 /*
833  * Store from GPR register to memory.
834  */
835 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
836                              TCGv_i64 tcg_addr, MemOp memop, int memidx,
837                              bool iss_valid,
838                              unsigned int iss_srt,
839                              bool iss_sf, bool iss_ar)
840 {
841     memop = finalize_memop(s, memop);
842     tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
843 
844     if (iss_valid) {
845         uint32_t syn;
846 
847         syn = syn_data_abort_with_iss(0,
848                                       (memop & MO_SIZE),
849                                       false,
850                                       iss_srt,
851                                       iss_sf,
852                                       iss_ar,
853                                       0, 0, 0, 0, 0, false);
854         disas_set_insn_syndrome(s, syn);
855     }
856 }
857 
858 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
859                       TCGv_i64 tcg_addr, MemOp memop,
860                       bool iss_valid,
861                       unsigned int iss_srt,
862                       bool iss_sf, bool iss_ar)
863 {
864     do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
865                      iss_valid, iss_srt, iss_sf, iss_ar);
866 }
867 
868 /*
869  * Load from memory to GPR register
870  */
871 static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
872                              MemOp memop, bool extend, int memidx,
873                              bool iss_valid, unsigned int iss_srt,
874                              bool iss_sf, bool iss_ar)
875 {
876     memop = finalize_memop(s, memop);
877     tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
878 
879     if (extend && (memop & MO_SIGN)) {
880         g_assert((memop & MO_SIZE) <= MO_32);
881         tcg_gen_ext32u_i64(dest, dest);
882     }
883 
884     if (iss_valid) {
885         uint32_t syn;
886 
887         syn = syn_data_abort_with_iss(0,
888                                       (memop & MO_SIZE),
889                                       (memop & MO_SIGN) != 0,
890                                       iss_srt,
891                                       iss_sf,
892                                       iss_ar,
893                                       0, 0, 0, 0, 0, false);
894         disas_set_insn_syndrome(s, syn);
895     }
896 }
897 
898 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
899                       MemOp memop, bool extend,
900                       bool iss_valid, unsigned int iss_srt,
901                       bool iss_sf, bool iss_ar)
902 {
903     do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
904                      iss_valid, iss_srt, iss_sf, iss_ar);
905 }
906 
907 /*
908  * Store from FP register to memory
909  */
910 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
911 {
912     /* This writes the bottom N bits of a 128 bit wide vector to memory */
913     TCGv_i64 tmplo = tcg_temp_new_i64();
914     MemOp mop;
915 
916     tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
917 
918     if (size < 4) {
919         mop = finalize_memop(s, size);
920         tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
921     } else {
922         bool be = s->be_data == MO_BE;
923         TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
924         TCGv_i64 tmphi = tcg_temp_new_i64();
925 
926         tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
927 
928         mop = s->be_data | MO_UQ;
929         tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
930                             mop | (s->align_mem ? MO_ALIGN_16 : 0));
931         tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
932         tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr,
933                             get_mem_index(s), mop);
934     }
935 }
936 
937 /*
938  * Load from memory to FP register
939  */
940 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
941 {
942     /* This always zero-extends and writes to a full 128 bit wide vector */
943     TCGv_i64 tmplo = tcg_temp_new_i64();
944     TCGv_i64 tmphi = NULL;
945     MemOp mop;
946 
947     if (size < 4) {
948         mop = finalize_memop(s, size);
949         tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
950     } else {
951         bool be = s->be_data == MO_BE;
952         TCGv_i64 tcg_hiaddr;
953 
954         tmphi = tcg_temp_new_i64();
955         tcg_hiaddr = tcg_temp_new_i64();
956 
957         mop = s->be_data | MO_UQ;
958         tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
959                             mop | (s->align_mem ? MO_ALIGN_16 : 0));
960         tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
961         tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr,
962                             get_mem_index(s), mop);
963     }
964 
965     tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
966 
967     if (tmphi) {
968         tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
969     }
970     clear_vec_high(s, tmphi != NULL, destidx);
971 }
972 
973 /*
974  * Vector load/store helpers.
975  *
976  * The principal difference between this and a FP load is that we don't
977  * zero extend as we are filling a partial chunk of the vector register.
978  * These functions don't support 128 bit loads/stores, which would be
979  * normal load/store operations.
980  *
981  * The _i32 versions are useful when operating on 32 bit quantities
982  * (eg for floating point single or using Neon helper functions).
983  */
984 
985 /* Get value of an element within a vector register */
986 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
987                              int element, MemOp memop)
988 {
989     int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
990     switch ((unsigned)memop) {
991     case MO_8:
992         tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
993         break;
994     case MO_16:
995         tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
996         break;
997     case MO_32:
998         tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
999         break;
1000     case MO_8|MO_SIGN:
1001         tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1002         break;
1003     case MO_16|MO_SIGN:
1004         tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1005         break;
1006     case MO_32|MO_SIGN:
1007         tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1008         break;
1009     case MO_64:
1010     case MO_64|MO_SIGN:
1011         tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1012         break;
1013     default:
1014         g_assert_not_reached();
1015     }
1016 }
1017 
1018 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1019                                  int element, MemOp memop)
1020 {
1021     int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1022     switch (memop) {
1023     case MO_8:
1024         tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1025         break;
1026     case MO_16:
1027         tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1028         break;
1029     case MO_8|MO_SIGN:
1030         tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1031         break;
1032     case MO_16|MO_SIGN:
1033         tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1034         break;
1035     case MO_32:
1036     case MO_32|MO_SIGN:
1037         tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1038         break;
1039     default:
1040         g_assert_not_reached();
1041     }
1042 }
1043 
1044 /* Set value of an element within a vector register */
1045 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1046                               int element, MemOp memop)
1047 {
1048     int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1049     switch (memop) {
1050     case MO_8:
1051         tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1052         break;
1053     case MO_16:
1054         tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1055         break;
1056     case MO_32:
1057         tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1058         break;
1059     case MO_64:
1060         tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1061         break;
1062     default:
1063         g_assert_not_reached();
1064     }
1065 }
1066 
1067 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1068                                   int destidx, int element, MemOp memop)
1069 {
1070     int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1071     switch (memop) {
1072     case MO_8:
1073         tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1074         break;
1075     case MO_16:
1076         tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1077         break;
1078     case MO_32:
1079         tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1080         break;
1081     default:
1082         g_assert_not_reached();
1083     }
1084 }
1085 
1086 /* Store from vector register to memory */
1087 static void do_vec_st(DisasContext *s, int srcidx, int element,
1088                       TCGv_i64 tcg_addr, MemOp mop)
1089 {
1090     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1091 
1092     read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1093     tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1094 }
1095 
1096 /* Load from memory to vector register */
1097 static void do_vec_ld(DisasContext *s, int destidx, int element,
1098                       TCGv_i64 tcg_addr, MemOp mop)
1099 {
1100     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1101 
1102     tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1103     write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1104 }
1105 
1106 /* Check that FP/Neon access is enabled. If it is, return
1107  * true. If not, emit code to generate an appropriate exception,
1108  * and return false; the caller should not emit any code for
1109  * the instruction. Note that this check must happen after all
1110  * unallocated-encoding checks (otherwise the syndrome information
1111  * for the resulting exception will be incorrect).
1112  */
1113 static bool fp_access_check_only(DisasContext *s)
1114 {
1115     if (s->fp_excp_el) {
1116         assert(!s->fp_access_checked);
1117         s->fp_access_checked = true;
1118 
1119         gen_exception_insn_el(s, 0, EXCP_UDEF,
1120                               syn_fp_access_trap(1, 0xe, false, 0),
1121                               s->fp_excp_el);
1122         return false;
1123     }
1124     s->fp_access_checked = true;
1125     return true;
1126 }
1127 
1128 static bool fp_access_check(DisasContext *s)
1129 {
1130     if (!fp_access_check_only(s)) {
1131         return false;
1132     }
1133     if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
1134         gen_exception_insn(s, 0, EXCP_UDEF,
1135                            syn_smetrap(SME_ET_Streaming, false));
1136         return false;
1137     }
1138     return true;
1139 }
1140 
1141 /*
1142  * Check that SVE access is enabled.  If it is, return true.
1143  * If not, emit code to generate an appropriate exception and return false.
1144  * This function corresponds to CheckSVEEnabled().
1145  */
1146 bool sve_access_check(DisasContext *s)
1147 {
1148     if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
1149         assert(dc_isar_feature(aa64_sme, s));
1150         if (!sme_sm_enabled_check(s)) {
1151             goto fail_exit;
1152         }
1153     } else if (s->sve_excp_el) {
1154         gen_exception_insn_el(s, 0, EXCP_UDEF,
1155                               syn_sve_access_trap(), s->sve_excp_el);
1156         goto fail_exit;
1157     }
1158     s->sve_access_checked = true;
1159     return fp_access_check(s);
1160 
1161  fail_exit:
1162     /* Assert that we only raise one exception per instruction. */
1163     assert(!s->sve_access_checked);
1164     s->sve_access_checked = true;
1165     return false;
1166 }
1167 
1168 /*
1169  * Check that SME access is enabled, raise an exception if not.
1170  * Note that this function corresponds to CheckSMEAccess and is
1171  * only used directly for cpregs.
1172  */
1173 static bool sme_access_check(DisasContext *s)
1174 {
1175     if (s->sme_excp_el) {
1176         gen_exception_insn_el(s, 0, EXCP_UDEF,
1177                               syn_smetrap(SME_ET_AccessTrap, false),
1178                               s->sme_excp_el);
1179         return false;
1180     }
1181     return true;
1182 }
1183 
1184 /* This function corresponds to CheckSMEEnabled. */
1185 bool sme_enabled_check(DisasContext *s)
1186 {
1187     /*
1188      * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1189      * to be zero when fp_excp_el has priority.  This is because we need
1190      * sme_excp_el by itself for cpregs access checks.
1191      */
1192     if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
1193         s->fp_access_checked = true;
1194         return sme_access_check(s);
1195     }
1196     return fp_access_check_only(s);
1197 }
1198 
1199 /* Common subroutine for CheckSMEAnd*Enabled. */
1200 bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
1201 {
1202     if (!sme_enabled_check(s)) {
1203         return false;
1204     }
1205     if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
1206         gen_exception_insn(s, 0, EXCP_UDEF,
1207                            syn_smetrap(SME_ET_NotStreaming, false));
1208         return false;
1209     }
1210     if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
1211         gen_exception_insn(s, 0, EXCP_UDEF,
1212                            syn_smetrap(SME_ET_InactiveZA, false));
1213         return false;
1214     }
1215     return true;
1216 }
1217 
1218 /*
1219  * This utility function is for doing register extension with an
1220  * optional shift. You will likely want to pass a temporary for the
1221  * destination register. See DecodeRegExtend() in the ARM ARM.
1222  */
1223 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1224                               int option, unsigned int shift)
1225 {
1226     int extsize = extract32(option, 0, 2);
1227     bool is_signed = extract32(option, 2, 1);
1228 
1229     if (is_signed) {
1230         switch (extsize) {
1231         case 0:
1232             tcg_gen_ext8s_i64(tcg_out, tcg_in);
1233             break;
1234         case 1:
1235             tcg_gen_ext16s_i64(tcg_out, tcg_in);
1236             break;
1237         case 2:
1238             tcg_gen_ext32s_i64(tcg_out, tcg_in);
1239             break;
1240         case 3:
1241             tcg_gen_mov_i64(tcg_out, tcg_in);
1242             break;
1243         }
1244     } else {
1245         switch (extsize) {
1246         case 0:
1247             tcg_gen_ext8u_i64(tcg_out, tcg_in);
1248             break;
1249         case 1:
1250             tcg_gen_ext16u_i64(tcg_out, tcg_in);
1251             break;
1252         case 2:
1253             tcg_gen_ext32u_i64(tcg_out, tcg_in);
1254             break;
1255         case 3:
1256             tcg_gen_mov_i64(tcg_out, tcg_in);
1257             break;
1258         }
1259     }
1260 
1261     if (shift) {
1262         tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1263     }
1264 }
1265 
1266 static inline void gen_check_sp_alignment(DisasContext *s)
1267 {
1268     /* The AArch64 architecture mandates that (if enabled via PSTATE
1269      * or SCTLR bits) there is a check that SP is 16-aligned on every
1270      * SP-relative load or store (with an exception generated if it is not).
1271      * In line with general QEMU practice regarding misaligned accesses,
1272      * we omit these checks for the sake of guest program performance.
1273      * This function is provided as a hook so we can more easily add these
1274      * checks in future (possibly as a "favour catching guest program bugs
1275      * over speed" user selectable option).
1276      */
1277 }
1278 
1279 /*
1280  * This provides a simple table based table lookup decoder. It is
1281  * intended to be used when the relevant bits for decode are too
1282  * awkwardly placed and switch/if based logic would be confusing and
1283  * deeply nested. Since it's a linear search through the table, tables
1284  * should be kept small.
1285  *
1286  * It returns the first handler where insn & mask == pattern, or
1287  * NULL if there is no match.
1288  * The table is terminated by an empty mask (i.e. 0)
1289  */
1290 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1291                                                uint32_t insn)
1292 {
1293     const AArch64DecodeTable *tptr = table;
1294 
1295     while (tptr->mask) {
1296         if ((insn & tptr->mask) == tptr->pattern) {
1297             return tptr->disas_fn;
1298         }
1299         tptr++;
1300     }
1301     return NULL;
1302 }
1303 
1304 /*
1305  * The instruction disassembly implemented here matches
1306  * the instruction encoding classifications in chapter C4
1307  * of the ARM Architecture Reference Manual (DDI0487B_a);
1308  * classification names and decode diagrams here should generally
1309  * match up with those in the manual.
1310  */
1311 
1312 static bool trans_B(DisasContext *s, arg_i *a)
1313 {
1314     reset_btype(s);
1315     gen_goto_tb(s, 0, a->imm);
1316     return true;
1317 }
1318 
1319 static bool trans_BL(DisasContext *s, arg_i *a)
1320 {
1321     gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
1322     reset_btype(s);
1323     gen_goto_tb(s, 0, a->imm);
1324     return true;
1325 }
1326 
1327 
1328 static bool trans_CBZ(DisasContext *s, arg_cbz *a)
1329 {
1330     DisasLabel match;
1331     TCGv_i64 tcg_cmp;
1332 
1333     tcg_cmp = read_cpu_reg(s, a->rt, a->sf);
1334     reset_btype(s);
1335 
1336     match = gen_disas_label(s);
1337     tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
1338                         tcg_cmp, 0, match.label);
1339     gen_goto_tb(s, 0, 4);
1340     set_disas_label(s, match);
1341     gen_goto_tb(s, 1, a->imm);
1342     return true;
1343 }
1344 
1345 static bool trans_TBZ(DisasContext *s, arg_tbz *a)
1346 {
1347     DisasLabel match;
1348     TCGv_i64 tcg_cmp;
1349 
1350     tcg_cmp = tcg_temp_new_i64();
1351     tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, a->rt), 1ULL << a->bitpos);
1352 
1353     reset_btype(s);
1354 
1355     match = gen_disas_label(s);
1356     tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
1357                         tcg_cmp, 0, match.label);
1358     gen_goto_tb(s, 0, 4);
1359     set_disas_label(s, match);
1360     gen_goto_tb(s, 1, a->imm);
1361     return true;
1362 }
1363 
1364 static bool trans_B_cond(DisasContext *s, arg_B_cond *a)
1365 {
1366     reset_btype(s);
1367     if (a->cond < 0x0e) {
1368         /* genuinely conditional branches */
1369         DisasLabel match = gen_disas_label(s);
1370         arm_gen_test_cc(a->cond, match.label);
1371         gen_goto_tb(s, 0, 4);
1372         set_disas_label(s, match);
1373         gen_goto_tb(s, 1, a->imm);
1374     } else {
1375         /* 0xe and 0xf are both "always" conditions */
1376         gen_goto_tb(s, 0, a->imm);
1377     }
1378     return true;
1379 }
1380 
1381 static void set_btype_for_br(DisasContext *s, int rn)
1382 {
1383     if (dc_isar_feature(aa64_bti, s)) {
1384         /* BR to {x16,x17} or !guard -> 1, else 3.  */
1385         set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
1386     }
1387 }
1388 
1389 static void set_btype_for_blr(DisasContext *s)
1390 {
1391     if (dc_isar_feature(aa64_bti, s)) {
1392         /* BLR sets BTYPE to 2, regardless of source guarded page.  */
1393         set_btype(s, 2);
1394     }
1395 }
1396 
1397 static bool trans_BR(DisasContext *s, arg_r *a)
1398 {
1399     gen_a64_set_pc(s, cpu_reg(s, a->rn));
1400     set_btype_for_br(s, a->rn);
1401     s->base.is_jmp = DISAS_JUMP;
1402     return true;
1403 }
1404 
1405 static bool trans_BLR(DisasContext *s, arg_r *a)
1406 {
1407     TCGv_i64 dst = cpu_reg(s, a->rn);
1408     TCGv_i64 lr = cpu_reg(s, 30);
1409     if (dst == lr) {
1410         TCGv_i64 tmp = tcg_temp_new_i64();
1411         tcg_gen_mov_i64(tmp, dst);
1412         dst = tmp;
1413     }
1414     gen_pc_plus_diff(s, lr, curr_insn_len(s));
1415     gen_a64_set_pc(s, dst);
1416     set_btype_for_blr(s);
1417     s->base.is_jmp = DISAS_JUMP;
1418     return true;
1419 }
1420 
1421 static bool trans_RET(DisasContext *s, arg_r *a)
1422 {
1423     gen_a64_set_pc(s, cpu_reg(s, a->rn));
1424     s->base.is_jmp = DISAS_JUMP;
1425     return true;
1426 }
1427 
1428 static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst,
1429                                    TCGv_i64 modifier, bool use_key_a)
1430 {
1431     TCGv_i64 truedst;
1432     /*
1433      * Return the branch target for a BRAA/RETA/etc, which is either
1434      * just the destination dst, or that value with the pauth check
1435      * done and the code removed from the high bits.
1436      */
1437     if (!s->pauth_active) {
1438         return dst;
1439     }
1440 
1441     truedst = tcg_temp_new_i64();
1442     if (use_key_a) {
1443         gen_helper_autia(truedst, cpu_env, dst, modifier);
1444     } else {
1445         gen_helper_autib(truedst, cpu_env, dst, modifier);
1446     }
1447     return truedst;
1448 }
1449 
1450 static bool trans_BRAZ(DisasContext *s, arg_braz *a)
1451 {
1452     TCGv_i64 dst;
1453 
1454     if (!dc_isar_feature(aa64_pauth, s)) {
1455         return false;
1456     }
1457 
1458     dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
1459     gen_a64_set_pc(s, dst);
1460     set_btype_for_br(s, a->rn);
1461     s->base.is_jmp = DISAS_JUMP;
1462     return true;
1463 }
1464 
1465 static bool trans_BLRAZ(DisasContext *s, arg_braz *a)
1466 {
1467     TCGv_i64 dst, lr;
1468 
1469     if (!dc_isar_feature(aa64_pauth, s)) {
1470         return false;
1471     }
1472 
1473     dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
1474     lr = cpu_reg(s, 30);
1475     if (dst == lr) {
1476         TCGv_i64 tmp = tcg_temp_new_i64();
1477         tcg_gen_mov_i64(tmp, dst);
1478         dst = tmp;
1479     }
1480     gen_pc_plus_diff(s, lr, curr_insn_len(s));
1481     gen_a64_set_pc(s, dst);
1482     set_btype_for_blr(s);
1483     s->base.is_jmp = DISAS_JUMP;
1484     return true;
1485 }
1486 
1487 static bool trans_RETA(DisasContext *s, arg_reta *a)
1488 {
1489     TCGv_i64 dst;
1490 
1491     dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
1492     gen_a64_set_pc(s, dst);
1493     s->base.is_jmp = DISAS_JUMP;
1494     return true;
1495 }
1496 
1497 static bool trans_BRA(DisasContext *s, arg_bra *a)
1498 {
1499     TCGv_i64 dst;
1500 
1501     if (!dc_isar_feature(aa64_pauth, s)) {
1502         return false;
1503     }
1504     dst = auth_branch_target(s, cpu_reg(s,a->rn), cpu_reg_sp(s, a->rm), !a->m);
1505     gen_a64_set_pc(s, dst);
1506     set_btype_for_br(s, a->rn);
1507     s->base.is_jmp = DISAS_JUMP;
1508     return true;
1509 }
1510 
1511 static bool trans_BLRA(DisasContext *s, arg_bra *a)
1512 {
1513     TCGv_i64 dst, lr;
1514 
1515     if (!dc_isar_feature(aa64_pauth, s)) {
1516         return false;
1517     }
1518     dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m);
1519     lr = cpu_reg(s, 30);
1520     if (dst == lr) {
1521         TCGv_i64 tmp = tcg_temp_new_i64();
1522         tcg_gen_mov_i64(tmp, dst);
1523         dst = tmp;
1524     }
1525     gen_pc_plus_diff(s, lr, curr_insn_len(s));
1526     gen_a64_set_pc(s, dst);
1527     set_btype_for_blr(s);
1528     s->base.is_jmp = DISAS_JUMP;
1529     return true;
1530 }
1531 
1532 static bool trans_ERET(DisasContext *s, arg_ERET *a)
1533 {
1534     TCGv_i64 dst;
1535 
1536     if (s->current_el == 0) {
1537         return false;
1538     }
1539     if (s->fgt_eret) {
1540         gen_exception_insn_el(s, 0, EXCP_UDEF, 0, 2);
1541         return true;
1542     }
1543     dst = tcg_temp_new_i64();
1544     tcg_gen_ld_i64(dst, cpu_env,
1545                    offsetof(CPUARMState, elr_el[s->current_el]));
1546 
1547     translator_io_start(&s->base);
1548 
1549     gen_helper_exception_return(cpu_env, dst);
1550     /* Must exit loop to check un-masked IRQs */
1551     s->base.is_jmp = DISAS_EXIT;
1552     return true;
1553 }
1554 
1555 static bool trans_ERETA(DisasContext *s, arg_reta *a)
1556 {
1557     TCGv_i64 dst;
1558 
1559     if (!dc_isar_feature(aa64_pauth, s)) {
1560         return false;
1561     }
1562     if (s->current_el == 0) {
1563         return false;
1564     }
1565     /* The FGT trap takes precedence over an auth trap. */
1566     if (s->fgt_eret) {
1567         gen_exception_insn_el(s, 0, EXCP_UDEF, a->m ? 3 : 2, 2);
1568         return true;
1569     }
1570     dst = tcg_temp_new_i64();
1571     tcg_gen_ld_i64(dst, cpu_env,
1572                    offsetof(CPUARMState, elr_el[s->current_el]));
1573 
1574     dst = auth_branch_target(s, dst, cpu_X[31], !a->m);
1575 
1576     translator_io_start(&s->base);
1577 
1578     gen_helper_exception_return(cpu_env, dst);
1579     /* Must exit loop to check un-masked IRQs */
1580     s->base.is_jmp = DISAS_EXIT;
1581     return true;
1582 }
1583 
1584 /* HINT instruction group, including various allocated HINTs */
1585 static void handle_hint(DisasContext *s, uint32_t insn,
1586                         unsigned int op1, unsigned int op2, unsigned int crm)
1587 {
1588     unsigned int selector = crm << 3 | op2;
1589 
1590     if (op1 != 3) {
1591         unallocated_encoding(s);
1592         return;
1593     }
1594 
1595     switch (selector) {
1596     case 0b00000: /* NOP */
1597         break;
1598     case 0b00011: /* WFI */
1599         s->base.is_jmp = DISAS_WFI;
1600         break;
1601     case 0b00001: /* YIELD */
1602         /* When running in MTTCG we don't generate jumps to the yield and
1603          * WFE helpers as it won't affect the scheduling of other vCPUs.
1604          * If we wanted to more completely model WFE/SEV so we don't busy
1605          * spin unnecessarily we would need to do something more involved.
1606          */
1607         if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1608             s->base.is_jmp = DISAS_YIELD;
1609         }
1610         break;
1611     case 0b00010: /* WFE */
1612         if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1613             s->base.is_jmp = DISAS_WFE;
1614         }
1615         break;
1616     case 0b00100: /* SEV */
1617     case 0b00101: /* SEVL */
1618     case 0b00110: /* DGH */
1619         /* we treat all as NOP at least for now */
1620         break;
1621     case 0b00111: /* XPACLRI */
1622         if (s->pauth_active) {
1623             gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1624         }
1625         break;
1626     case 0b01000: /* PACIA1716 */
1627         if (s->pauth_active) {
1628             gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1629         }
1630         break;
1631     case 0b01010: /* PACIB1716 */
1632         if (s->pauth_active) {
1633             gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1634         }
1635         break;
1636     case 0b01100: /* AUTIA1716 */
1637         if (s->pauth_active) {
1638             gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1639         }
1640         break;
1641     case 0b01110: /* AUTIB1716 */
1642         if (s->pauth_active) {
1643             gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1644         }
1645         break;
1646     case 0b10000: /* ESB */
1647         /* Without RAS, we must implement this as NOP. */
1648         if (dc_isar_feature(aa64_ras, s)) {
1649             /*
1650              * QEMU does not have a source of physical SErrors,
1651              * so we are only concerned with virtual SErrors.
1652              * The pseudocode in the ARM for this case is
1653              *   if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1654              *      AArch64.vESBOperation();
1655              * Most of the condition can be evaluated at translation time.
1656              * Test for EL2 present, and defer test for SEL2 to runtime.
1657              */
1658             if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
1659                 gen_helper_vesb(cpu_env);
1660             }
1661         }
1662         break;
1663     case 0b11000: /* PACIAZ */
1664         if (s->pauth_active) {
1665             gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30],
1666                              tcg_constant_i64(0));
1667         }
1668         break;
1669     case 0b11001: /* PACIASP */
1670         if (s->pauth_active) {
1671             gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1672         }
1673         break;
1674     case 0b11010: /* PACIBZ */
1675         if (s->pauth_active) {
1676             gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30],
1677                              tcg_constant_i64(0));
1678         }
1679         break;
1680     case 0b11011: /* PACIBSP */
1681         if (s->pauth_active) {
1682             gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1683         }
1684         break;
1685     case 0b11100: /* AUTIAZ */
1686         if (s->pauth_active) {
1687             gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30],
1688                              tcg_constant_i64(0));
1689         }
1690         break;
1691     case 0b11101: /* AUTIASP */
1692         if (s->pauth_active) {
1693             gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1694         }
1695         break;
1696     case 0b11110: /* AUTIBZ */
1697         if (s->pauth_active) {
1698             gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30],
1699                              tcg_constant_i64(0));
1700         }
1701         break;
1702     case 0b11111: /* AUTIBSP */
1703         if (s->pauth_active) {
1704             gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1705         }
1706         break;
1707     default:
1708         /* default specified as NOP equivalent */
1709         break;
1710     }
1711 }
1712 
1713 static void gen_clrex(DisasContext *s, uint32_t insn)
1714 {
1715     tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1716 }
1717 
1718 /* CLREX, DSB, DMB, ISB */
1719 static void handle_sync(DisasContext *s, uint32_t insn,
1720                         unsigned int op1, unsigned int op2, unsigned int crm)
1721 {
1722     TCGBar bar;
1723 
1724     if (op1 != 3) {
1725         unallocated_encoding(s);
1726         return;
1727     }
1728 
1729     switch (op2) {
1730     case 2: /* CLREX */
1731         gen_clrex(s, insn);
1732         return;
1733     case 4: /* DSB */
1734     case 5: /* DMB */
1735         switch (crm & 3) {
1736         case 1: /* MBReqTypes_Reads */
1737             bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1738             break;
1739         case 2: /* MBReqTypes_Writes */
1740             bar = TCG_BAR_SC | TCG_MO_ST_ST;
1741             break;
1742         default: /* MBReqTypes_All */
1743             bar = TCG_BAR_SC | TCG_MO_ALL;
1744             break;
1745         }
1746         tcg_gen_mb(bar);
1747         return;
1748     case 6: /* ISB */
1749         /* We need to break the TB after this insn to execute
1750          * a self-modified code correctly and also to take
1751          * any pending interrupts immediately.
1752          */
1753         reset_btype(s);
1754         gen_goto_tb(s, 0, 4);
1755         return;
1756 
1757     case 7: /* SB */
1758         if (crm != 0 || !dc_isar_feature(aa64_sb, s)) {
1759             goto do_unallocated;
1760         }
1761         /*
1762          * TODO: There is no speculation barrier opcode for TCG;
1763          * MB and end the TB instead.
1764          */
1765         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1766         gen_goto_tb(s, 0, 4);
1767         return;
1768 
1769     default:
1770     do_unallocated:
1771         unallocated_encoding(s);
1772         return;
1773     }
1774 }
1775 
1776 static void gen_xaflag(void)
1777 {
1778     TCGv_i32 z = tcg_temp_new_i32();
1779 
1780     tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1781 
1782     /*
1783      * (!C & !Z) << 31
1784      * (!(C | Z)) << 31
1785      * ~((C | Z) << 31)
1786      * ~-(C | Z)
1787      * (C | Z) - 1
1788      */
1789     tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1790     tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1791 
1792     /* !(Z & C) */
1793     tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1794     tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1795 
1796     /* (!C & Z) << 31 -> -(Z & ~C) */
1797     tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1798     tcg_gen_neg_i32(cpu_VF, cpu_VF);
1799 
1800     /* C | Z */
1801     tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1802 }
1803 
1804 static void gen_axflag(void)
1805 {
1806     tcg_gen_sari_i32(cpu_VF, cpu_VF, 31);         /* V ? -1 : 0 */
1807     tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF);     /* C & !V */
1808 
1809     /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1810     tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1811 
1812     tcg_gen_movi_i32(cpu_NF, 0);
1813     tcg_gen_movi_i32(cpu_VF, 0);
1814 }
1815 
1816 /* MSR (immediate) - move immediate to processor state field */
1817 static void handle_msr_i(DisasContext *s, uint32_t insn,
1818                          unsigned int op1, unsigned int op2, unsigned int crm)
1819 {
1820     int op = op1 << 3 | op2;
1821 
1822     /* End the TB by default, chaining is ok.  */
1823     s->base.is_jmp = DISAS_TOO_MANY;
1824 
1825     switch (op) {
1826     case 0x00: /* CFINV */
1827         if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) {
1828             goto do_unallocated;
1829         }
1830         tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1831         s->base.is_jmp = DISAS_NEXT;
1832         break;
1833 
1834     case 0x01: /* XAFlag */
1835         if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1836             goto do_unallocated;
1837         }
1838         gen_xaflag();
1839         s->base.is_jmp = DISAS_NEXT;
1840         break;
1841 
1842     case 0x02: /* AXFlag */
1843         if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) {
1844             goto do_unallocated;
1845         }
1846         gen_axflag();
1847         s->base.is_jmp = DISAS_NEXT;
1848         break;
1849 
1850     case 0x03: /* UAO */
1851         if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1852             goto do_unallocated;
1853         }
1854         if (crm & 1) {
1855             set_pstate_bits(PSTATE_UAO);
1856         } else {
1857             clear_pstate_bits(PSTATE_UAO);
1858         }
1859         gen_rebuild_hflags(s);
1860         break;
1861 
1862     case 0x04: /* PAN */
1863         if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1864             goto do_unallocated;
1865         }
1866         if (crm & 1) {
1867             set_pstate_bits(PSTATE_PAN);
1868         } else {
1869             clear_pstate_bits(PSTATE_PAN);
1870         }
1871         gen_rebuild_hflags(s);
1872         break;
1873 
1874     case 0x05: /* SPSel */
1875         if (s->current_el == 0) {
1876             goto do_unallocated;
1877         }
1878         gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(crm & PSTATE_SP));
1879         break;
1880 
1881     case 0x19: /* SSBS */
1882         if (!dc_isar_feature(aa64_ssbs, s)) {
1883             goto do_unallocated;
1884         }
1885         if (crm & 1) {
1886             set_pstate_bits(PSTATE_SSBS);
1887         } else {
1888             clear_pstate_bits(PSTATE_SSBS);
1889         }
1890         /* Don't need to rebuild hflags since SSBS is a nop */
1891         break;
1892 
1893     case 0x1a: /* DIT */
1894         if (!dc_isar_feature(aa64_dit, s)) {
1895             goto do_unallocated;
1896         }
1897         if (crm & 1) {
1898             set_pstate_bits(PSTATE_DIT);
1899         } else {
1900             clear_pstate_bits(PSTATE_DIT);
1901         }
1902         /* There's no need to rebuild hflags because DIT is a nop */
1903         break;
1904 
1905     case 0x1e: /* DAIFSet */
1906         gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(crm));
1907         break;
1908 
1909     case 0x1f: /* DAIFClear */
1910         gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(crm));
1911         /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs.  */
1912         s->base.is_jmp = DISAS_UPDATE_EXIT;
1913         break;
1914 
1915     case 0x1c: /* TCO */
1916         if (dc_isar_feature(aa64_mte, s)) {
1917             /* Full MTE is enabled -- set the TCO bit as directed. */
1918             if (crm & 1) {
1919                 set_pstate_bits(PSTATE_TCO);
1920             } else {
1921                 clear_pstate_bits(PSTATE_TCO);
1922             }
1923             gen_rebuild_hflags(s);
1924             /* Many factors, including TCO, go into MTE_ACTIVE. */
1925             s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
1926         } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
1927             /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI.  */
1928             s->base.is_jmp = DISAS_NEXT;
1929         } else {
1930             goto do_unallocated;
1931         }
1932         break;
1933 
1934     case 0x1b: /* SVCR* */
1935         if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) {
1936             goto do_unallocated;
1937         }
1938         if (sme_access_check(s)) {
1939             int old = s->pstate_sm | (s->pstate_za << 1);
1940             int new = (crm & 1) * 3;
1941             int msk = (crm >> 1) & 3;
1942 
1943             if ((old ^ new) & msk) {
1944                 /* At least one bit changes. */
1945                 gen_helper_set_svcr(cpu_env, tcg_constant_i32(new),
1946                                     tcg_constant_i32(msk));
1947             } else {
1948                 s->base.is_jmp = DISAS_NEXT;
1949             }
1950         }
1951         break;
1952 
1953     default:
1954     do_unallocated:
1955         unallocated_encoding(s);
1956         return;
1957     }
1958 }
1959 
1960 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1961 {
1962     TCGv_i32 tmp = tcg_temp_new_i32();
1963     TCGv_i32 nzcv = tcg_temp_new_i32();
1964 
1965     /* build bit 31, N */
1966     tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1967     /* build bit 30, Z */
1968     tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1969     tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1970     /* build bit 29, C */
1971     tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1972     /* build bit 28, V */
1973     tcg_gen_shri_i32(tmp, cpu_VF, 31);
1974     tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1975     /* generate result */
1976     tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1977 }
1978 
1979 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1980 {
1981     TCGv_i32 nzcv = tcg_temp_new_i32();
1982 
1983     /* take NZCV from R[t] */
1984     tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1985 
1986     /* bit 31, N */
1987     tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1988     /* bit 30, Z */
1989     tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1990     tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1991     /* bit 29, C */
1992     tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1993     tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1994     /* bit 28, V */
1995     tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1996     tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1997 }
1998 
1999 static void gen_sysreg_undef(DisasContext *s, bool isread,
2000                              uint8_t op0, uint8_t op1, uint8_t op2,
2001                              uint8_t crn, uint8_t crm, uint8_t rt)
2002 {
2003     /*
2004      * Generate code to emit an UNDEF with correct syndrome
2005      * information for a failed system register access.
2006      * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
2007      * but if FEAT_IDST is implemented then read accesses to registers
2008      * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
2009      * syndrome.
2010      */
2011     uint32_t syndrome;
2012 
2013     if (isread && dc_isar_feature(aa64_ids, s) &&
2014         arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
2015         syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
2016     } else {
2017         syndrome = syn_uncategorized();
2018     }
2019     gen_exception_insn(s, 0, EXCP_UDEF, syndrome);
2020 }
2021 
2022 /* MRS - move from system register
2023  * MSR (register) - move to system register
2024  * SYS
2025  * SYSL
2026  * These are all essentially the same insn in 'read' and 'write'
2027  * versions, with varying op0 fields.
2028  */
2029 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
2030                        unsigned int op0, unsigned int op1, unsigned int op2,
2031                        unsigned int crn, unsigned int crm, unsigned int rt)
2032 {
2033     uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
2034                                       crn, crm, op0, op1, op2);
2035     const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
2036     bool need_exit_tb = false;
2037     TCGv_ptr tcg_ri = NULL;
2038     TCGv_i64 tcg_rt;
2039 
2040     if (!ri) {
2041         /* Unknown register; this might be a guest error or a QEMU
2042          * unimplemented feature.
2043          */
2044         qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
2045                       "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
2046                       isread ? "read" : "write", op0, op1, crn, crm, op2);
2047         gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
2048         return;
2049     }
2050 
2051     /* Check access permissions */
2052     if (!cp_access_ok(s->current_el, ri, isread)) {
2053         gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
2054         return;
2055     }
2056 
2057     if (ri->accessfn || (ri->fgt && s->fgt_active)) {
2058         /* Emit code to perform further access permissions checks at
2059          * runtime; this may result in an exception.
2060          */
2061         uint32_t syndrome;
2062 
2063         syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
2064         gen_a64_update_pc(s, 0);
2065         tcg_ri = tcg_temp_new_ptr();
2066         gen_helper_access_check_cp_reg(tcg_ri, cpu_env,
2067                                        tcg_constant_i32(key),
2068                                        tcg_constant_i32(syndrome),
2069                                        tcg_constant_i32(isread));
2070     } else if (ri->type & ARM_CP_RAISES_EXC) {
2071         /*
2072          * The readfn or writefn might raise an exception;
2073          * synchronize the CPU state in case it does.
2074          */
2075         gen_a64_update_pc(s, 0);
2076     }
2077 
2078     /* Handle special cases first */
2079     switch (ri->type & ARM_CP_SPECIAL_MASK) {
2080     case 0:
2081         break;
2082     case ARM_CP_NOP:
2083         return;
2084     case ARM_CP_NZCV:
2085         tcg_rt = cpu_reg(s, rt);
2086         if (isread) {
2087             gen_get_nzcv(tcg_rt);
2088         } else {
2089             gen_set_nzcv(tcg_rt);
2090         }
2091         return;
2092     case ARM_CP_CURRENTEL:
2093         /* Reads as current EL value from pstate, which is
2094          * guaranteed to be constant by the tb flags.
2095          */
2096         tcg_rt = cpu_reg(s, rt);
2097         tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
2098         return;
2099     case ARM_CP_DC_ZVA:
2100         /* Writes clear the aligned block of memory which rt points into. */
2101         if (s->mte_active[0]) {
2102             int desc = 0;
2103 
2104             desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
2105             desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
2106             desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
2107 
2108             tcg_rt = tcg_temp_new_i64();
2109             gen_helper_mte_check_zva(tcg_rt, cpu_env,
2110                                      tcg_constant_i32(desc), cpu_reg(s, rt));
2111         } else {
2112             tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
2113         }
2114         gen_helper_dc_zva(cpu_env, tcg_rt);
2115         return;
2116     case ARM_CP_DC_GVA:
2117         {
2118             TCGv_i64 clean_addr, tag;
2119 
2120             /*
2121              * DC_GVA, like DC_ZVA, requires that we supply the original
2122              * pointer for an invalid page.  Probe that address first.
2123              */
2124             tcg_rt = cpu_reg(s, rt);
2125             clean_addr = clean_data_tbi(s, tcg_rt);
2126             gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
2127 
2128             if (s->ata) {
2129                 /* Extract the tag from the register to match STZGM.  */
2130                 tag = tcg_temp_new_i64();
2131                 tcg_gen_shri_i64(tag, tcg_rt, 56);
2132                 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2133             }
2134         }
2135         return;
2136     case ARM_CP_DC_GZVA:
2137         {
2138             TCGv_i64 clean_addr, tag;
2139 
2140             /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2141             tcg_rt = cpu_reg(s, rt);
2142             clean_addr = clean_data_tbi(s, tcg_rt);
2143             gen_helper_dc_zva(cpu_env, clean_addr);
2144 
2145             if (s->ata) {
2146                 /* Extract the tag from the register to match STZGM.  */
2147                 tag = tcg_temp_new_i64();
2148                 tcg_gen_shri_i64(tag, tcg_rt, 56);
2149                 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2150             }
2151         }
2152         return;
2153     default:
2154         g_assert_not_reached();
2155     }
2156     if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
2157         return;
2158     } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
2159         return;
2160     } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
2161         return;
2162     }
2163 
2164     if (ri->type & ARM_CP_IO) {
2165         /* I/O operations must end the TB here (whether read or write) */
2166         need_exit_tb = translator_io_start(&s->base);
2167     }
2168 
2169     tcg_rt = cpu_reg(s, rt);
2170 
2171     if (isread) {
2172         if (ri->type & ARM_CP_CONST) {
2173             tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
2174         } else if (ri->readfn) {
2175             if (!tcg_ri) {
2176                 tcg_ri = gen_lookup_cp_reg(key);
2177             }
2178             gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_ri);
2179         } else {
2180             tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
2181         }
2182     } else {
2183         if (ri->type & ARM_CP_CONST) {
2184             /* If not forbidden by access permissions, treat as WI */
2185             return;
2186         } else if (ri->writefn) {
2187             if (!tcg_ri) {
2188                 tcg_ri = gen_lookup_cp_reg(key);
2189             }
2190             gen_helper_set_cp_reg64(cpu_env, tcg_ri, tcg_rt);
2191         } else {
2192             tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
2193         }
2194     }
2195 
2196     if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
2197         /*
2198          * A write to any coprocessor regiser that ends a TB
2199          * must rebuild the hflags for the next TB.
2200          */
2201         gen_rebuild_hflags(s);
2202         /*
2203          * We default to ending the TB on a coprocessor register write,
2204          * but allow this to be suppressed by the register definition
2205          * (usually only necessary to work around guest bugs).
2206          */
2207         need_exit_tb = true;
2208     }
2209     if (need_exit_tb) {
2210         s->base.is_jmp = DISAS_UPDATE_EXIT;
2211     }
2212 }
2213 
2214 /* System
2215  *  31                 22 21  20 19 18 16 15   12 11    8 7   5 4    0
2216  * +---------------------+---+-----+-----+-------+-------+-----+------+
2217  * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 |  CRn  |  CRm  | op2 |  Rt  |
2218  * +---------------------+---+-----+-----+-------+-------+-----+------+
2219  */
2220 static void disas_system(DisasContext *s, uint32_t insn)
2221 {
2222     unsigned int l, op0, op1, crn, crm, op2, rt;
2223     l = extract32(insn, 21, 1);
2224     op0 = extract32(insn, 19, 2);
2225     op1 = extract32(insn, 16, 3);
2226     crn = extract32(insn, 12, 4);
2227     crm = extract32(insn, 8, 4);
2228     op2 = extract32(insn, 5, 3);
2229     rt = extract32(insn, 0, 5);
2230 
2231     if (op0 == 0) {
2232         if (l || rt != 31) {
2233             unallocated_encoding(s);
2234             return;
2235         }
2236         switch (crn) {
2237         case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
2238             handle_hint(s, insn, op1, op2, crm);
2239             break;
2240         case 3: /* CLREX, DSB, DMB, ISB */
2241             handle_sync(s, insn, op1, op2, crm);
2242             break;
2243         case 4: /* MSR (immediate) */
2244             handle_msr_i(s, insn, op1, op2, crm);
2245             break;
2246         default:
2247             unallocated_encoding(s);
2248             break;
2249         }
2250         return;
2251     }
2252     handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
2253 }
2254 
2255 /* Exception generation
2256  *
2257  *  31             24 23 21 20                     5 4   2 1  0
2258  * +-----------------+-----+------------------------+-----+----+
2259  * | 1 1 0 1 0 1 0 0 | opc |          imm16         | op2 | LL |
2260  * +-----------------------+------------------------+----------+
2261  */
2262 static void disas_exc(DisasContext *s, uint32_t insn)
2263 {
2264     int opc = extract32(insn, 21, 3);
2265     int op2_ll = extract32(insn, 0, 5);
2266     int imm16 = extract32(insn, 5, 16);
2267     uint32_t syndrome;
2268 
2269     switch (opc) {
2270     case 0:
2271         /* For SVC, HVC and SMC we advance the single-step state
2272          * machine before taking the exception. This is architecturally
2273          * mandated, to ensure that single-stepping a system call
2274          * instruction works properly.
2275          */
2276         switch (op2_ll) {
2277         case 1:                                                     /* SVC */
2278             syndrome = syn_aa64_svc(imm16);
2279             if (s->fgt_svc) {
2280                 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
2281                 break;
2282             }
2283             gen_ss_advance(s);
2284             gen_exception_insn(s, 4, EXCP_SWI, syndrome);
2285             break;
2286         case 2:                                                     /* HVC */
2287             if (s->current_el == 0) {
2288                 unallocated_encoding(s);
2289                 break;
2290             }
2291             /* The pre HVC helper handles cases when HVC gets trapped
2292              * as an undefined insn by runtime configuration.
2293              */
2294             gen_a64_update_pc(s, 0);
2295             gen_helper_pre_hvc(cpu_env);
2296             gen_ss_advance(s);
2297             gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(imm16), 2);
2298             break;
2299         case 3:                                                     /* SMC */
2300             if (s->current_el == 0) {
2301                 unallocated_encoding(s);
2302                 break;
2303             }
2304             gen_a64_update_pc(s, 0);
2305             gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16)));
2306             gen_ss_advance(s);
2307             gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(imm16), 3);
2308             break;
2309         default:
2310             unallocated_encoding(s);
2311             break;
2312         }
2313         break;
2314     case 1:
2315         if (op2_ll != 0) {
2316             unallocated_encoding(s);
2317             break;
2318         }
2319         /* BRK */
2320         gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16));
2321         break;
2322     case 2:
2323         if (op2_ll != 0) {
2324             unallocated_encoding(s);
2325             break;
2326         }
2327         /* HLT. This has two purposes.
2328          * Architecturally, it is an external halting debug instruction.
2329          * Since QEMU doesn't implement external debug, we treat this as
2330          * it is required for halting debug disabled: it will UNDEF.
2331          * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2332          */
2333         if (semihosting_enabled(s->current_el == 0) && imm16 == 0xf000) {
2334             gen_exception_internal_insn(s, EXCP_SEMIHOST);
2335         } else {
2336             unallocated_encoding(s);
2337         }
2338         break;
2339     case 5:
2340         if (op2_ll < 1 || op2_ll > 3) {
2341             unallocated_encoding(s);
2342             break;
2343         }
2344         /* DCPS1, DCPS2, DCPS3 */
2345         unallocated_encoding(s);
2346         break;
2347     default:
2348         unallocated_encoding(s);
2349         break;
2350     }
2351 }
2352 
2353 /* Branches, exception generating and system instructions */
2354 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
2355 {
2356     switch (extract32(insn, 25, 7)) {
2357     case 0x6a: /* Exception generation / System */
2358         if (insn & (1 << 24)) {
2359             if (extract32(insn, 22, 2) == 0) {
2360                 disas_system(s, insn);
2361             } else {
2362                 unallocated_encoding(s);
2363             }
2364         } else {
2365             disas_exc(s, insn);
2366         }
2367         break;
2368     default:
2369         unallocated_encoding(s);
2370         break;
2371     }
2372 }
2373 
2374 /*
2375  * Load/Store exclusive instructions are implemented by remembering
2376  * the value/address loaded, and seeing if these are the same
2377  * when the store is performed. This is not actually the architecturally
2378  * mandated semantics, but it works for typical guest code sequences
2379  * and avoids having to monitor regular stores.
2380  *
2381  * The store exclusive uses the atomic cmpxchg primitives to avoid
2382  * races in multi-threaded linux-user and when MTTCG softmmu is
2383  * enabled.
2384  */
2385 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2386                                TCGv_i64 addr, int size, bool is_pair)
2387 {
2388     int idx = get_mem_index(s);
2389     MemOp memop = s->be_data;
2390 
2391     g_assert(size <= 3);
2392     if (is_pair) {
2393         g_assert(size >= 2);
2394         if (size == 2) {
2395             /* The pair must be single-copy atomic for the doubleword.  */
2396             memop |= MO_64 | MO_ALIGN;
2397             tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2398             if (s->be_data == MO_LE) {
2399                 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2400                 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2401             } else {
2402                 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2403                 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2404             }
2405         } else {
2406             /* The pair must be single-copy atomic for *each* doubleword, not
2407                the entire quadword, however it must be quadword aligned.  */
2408             memop |= MO_64;
2409             tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2410                                 memop | MO_ALIGN_16);
2411 
2412             TCGv_i64 addr2 = tcg_temp_new_i64();
2413             tcg_gen_addi_i64(addr2, addr, 8);
2414             tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2415 
2416             tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2417             tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2418         }
2419     } else {
2420         memop |= size | MO_ALIGN;
2421         tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2422         tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2423     }
2424     tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2425 }
2426 
2427 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2428                                 TCGv_i64 addr, int size, int is_pair)
2429 {
2430     /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2431      *     && (!is_pair || env->exclusive_high == [addr + datasize])) {
2432      *     [addr] = {Rt};
2433      *     if (is_pair) {
2434      *         [addr + datasize] = {Rt2};
2435      *     }
2436      *     {Rd} = 0;
2437      * } else {
2438      *     {Rd} = 1;
2439      * }
2440      * env->exclusive_addr = -1;
2441      */
2442     TCGLabel *fail_label = gen_new_label();
2443     TCGLabel *done_label = gen_new_label();
2444     TCGv_i64 tmp;
2445 
2446     tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2447 
2448     tmp = tcg_temp_new_i64();
2449     if (is_pair) {
2450         if (size == 2) {
2451             if (s->be_data == MO_LE) {
2452                 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2453             } else {
2454                 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2455             }
2456             tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2457                                        cpu_exclusive_val, tmp,
2458                                        get_mem_index(s),
2459                                        MO_64 | MO_ALIGN | s->be_data);
2460             tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2461         } else {
2462             TCGv_i128 t16 = tcg_temp_new_i128();
2463             TCGv_i128 c16 = tcg_temp_new_i128();
2464             TCGv_i64 a, b;
2465 
2466             if (s->be_data == MO_LE) {
2467                 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt), cpu_reg(s, rt2));
2468                 tcg_gen_concat_i64_i128(c16, cpu_exclusive_val,
2469                                         cpu_exclusive_high);
2470             } else {
2471                 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt2), cpu_reg(s, rt));
2472                 tcg_gen_concat_i64_i128(c16, cpu_exclusive_high,
2473                                         cpu_exclusive_val);
2474             }
2475 
2476             tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16,
2477                                         get_mem_index(s),
2478                                         MO_128 | MO_ALIGN | s->be_data);
2479 
2480             a = tcg_temp_new_i64();
2481             b = tcg_temp_new_i64();
2482             if (s->be_data == MO_LE) {
2483                 tcg_gen_extr_i128_i64(a, b, t16);
2484             } else {
2485                 tcg_gen_extr_i128_i64(b, a, t16);
2486             }
2487 
2488             tcg_gen_xor_i64(a, a, cpu_exclusive_val);
2489             tcg_gen_xor_i64(b, b, cpu_exclusive_high);
2490             tcg_gen_or_i64(tmp, a, b);
2491 
2492             tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0);
2493         }
2494     } else {
2495         tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2496                                    cpu_reg(s, rt), get_mem_index(s),
2497                                    size | MO_ALIGN | s->be_data);
2498         tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2499     }
2500     tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2501     tcg_gen_br(done_label);
2502 
2503     gen_set_label(fail_label);
2504     tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2505     gen_set_label(done_label);
2506     tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2507 }
2508 
2509 static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2510                                  int rn, int size)
2511 {
2512     TCGv_i64 tcg_rs = cpu_reg(s, rs);
2513     TCGv_i64 tcg_rt = cpu_reg(s, rt);
2514     int memidx = get_mem_index(s);
2515     TCGv_i64 clean_addr;
2516 
2517     if (rn == 31) {
2518         gen_check_sp_alignment(s);
2519     }
2520     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size);
2521     tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx,
2522                                size | MO_ALIGN | s->be_data);
2523 }
2524 
2525 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2526                                       int rn, int size)
2527 {
2528     TCGv_i64 s1 = cpu_reg(s, rs);
2529     TCGv_i64 s2 = cpu_reg(s, rs + 1);
2530     TCGv_i64 t1 = cpu_reg(s, rt);
2531     TCGv_i64 t2 = cpu_reg(s, rt + 1);
2532     TCGv_i64 clean_addr;
2533     int memidx = get_mem_index(s);
2534 
2535     if (rn == 31) {
2536         gen_check_sp_alignment(s);
2537     }
2538 
2539     /* This is a single atomic access, despite the "pair". */
2540     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1);
2541 
2542     if (size == 2) {
2543         TCGv_i64 cmp = tcg_temp_new_i64();
2544         TCGv_i64 val = tcg_temp_new_i64();
2545 
2546         if (s->be_data == MO_LE) {
2547             tcg_gen_concat32_i64(val, t1, t2);
2548             tcg_gen_concat32_i64(cmp, s1, s2);
2549         } else {
2550             tcg_gen_concat32_i64(val, t2, t1);
2551             tcg_gen_concat32_i64(cmp, s2, s1);
2552         }
2553 
2554         tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx,
2555                                    MO_64 | MO_ALIGN | s->be_data);
2556 
2557         if (s->be_data == MO_LE) {
2558             tcg_gen_extr32_i64(s1, s2, cmp);
2559         } else {
2560             tcg_gen_extr32_i64(s2, s1, cmp);
2561         }
2562     } else {
2563         TCGv_i128 cmp = tcg_temp_new_i128();
2564         TCGv_i128 val = tcg_temp_new_i128();
2565 
2566         if (s->be_data == MO_LE) {
2567             tcg_gen_concat_i64_i128(val, t1, t2);
2568             tcg_gen_concat_i64_i128(cmp, s1, s2);
2569         } else {
2570             tcg_gen_concat_i64_i128(val, t2, t1);
2571             tcg_gen_concat_i64_i128(cmp, s2, s1);
2572         }
2573 
2574         tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx,
2575                                     MO_128 | MO_ALIGN | s->be_data);
2576 
2577         if (s->be_data == MO_LE) {
2578             tcg_gen_extr_i128_i64(s1, s2, cmp);
2579         } else {
2580             tcg_gen_extr_i128_i64(s2, s1, cmp);
2581         }
2582     }
2583 }
2584 
2585 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2586  * from the ARMv8 specs for LDR (Shared decode for all encodings).
2587  */
2588 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2589 {
2590     int opc0 = extract32(opc, 0, 1);
2591     int regsize;
2592 
2593     if (is_signed) {
2594         regsize = opc0 ? 32 : 64;
2595     } else {
2596         regsize = size == 3 ? 64 : 32;
2597     }
2598     return regsize == 64;
2599 }
2600 
2601 /* Load/store exclusive
2602  *
2603  *  31 30 29         24  23  22   21  20  16  15  14   10 9    5 4    0
2604  * +-----+-------------+----+---+----+------+----+-------+------+------+
2605  * | sz  | 0 0 1 0 0 0 | o2 | L | o1 |  Rs  | o0 |  Rt2  |  Rn  | Rt   |
2606  * +-----+-------------+----+---+----+------+----+-------+------+------+
2607  *
2608  *  sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2609  *   L: 0 -> store, 1 -> load
2610  *  o2: 0 -> exclusive, 1 -> not
2611  *  o1: 0 -> single register, 1 -> register pair
2612  *  o0: 1 -> load-acquire/store-release, 0 -> not
2613  */
2614 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2615 {
2616     int rt = extract32(insn, 0, 5);
2617     int rn = extract32(insn, 5, 5);
2618     int rt2 = extract32(insn, 10, 5);
2619     int rs = extract32(insn, 16, 5);
2620     int is_lasr = extract32(insn, 15, 1);
2621     int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2622     int size = extract32(insn, 30, 2);
2623     TCGv_i64 clean_addr;
2624 
2625     switch (o2_L_o1_o0) {
2626     case 0x0: /* STXR */
2627     case 0x1: /* STLXR */
2628         if (rn == 31) {
2629             gen_check_sp_alignment(s);
2630         }
2631         if (is_lasr) {
2632             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2633         }
2634         clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2635                                     true, rn != 31, size);
2636         gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false);
2637         return;
2638 
2639     case 0x4: /* LDXR */
2640     case 0x5: /* LDAXR */
2641         if (rn == 31) {
2642             gen_check_sp_alignment(s);
2643         }
2644         clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2645                                     false, rn != 31, size);
2646         s->is_ldex = true;
2647         gen_load_exclusive(s, rt, rt2, clean_addr, size, false);
2648         if (is_lasr) {
2649             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2650         }
2651         return;
2652 
2653     case 0x8: /* STLLR */
2654         if (!dc_isar_feature(aa64_lor, s)) {
2655             break;
2656         }
2657         /* StoreLORelease is the same as Store-Release for QEMU.  */
2658         /* fall through */
2659     case 0x9: /* STLR */
2660         /* Generate ISS for non-exclusive accesses including LASR.  */
2661         if (rn == 31) {
2662             gen_check_sp_alignment(s);
2663         }
2664         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2665         clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2666                                     true, rn != 31, size);
2667         /* TODO: ARMv8.4-LSE SCTLR.nAA */
2668         do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt,
2669                   disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2670         return;
2671 
2672     case 0xc: /* LDLAR */
2673         if (!dc_isar_feature(aa64_lor, s)) {
2674             break;
2675         }
2676         /* LoadLOAcquire is the same as Load-Acquire for QEMU.  */
2677         /* fall through */
2678     case 0xd: /* LDAR */
2679         /* Generate ISS for non-exclusive accesses including LASR.  */
2680         if (rn == 31) {
2681             gen_check_sp_alignment(s);
2682         }
2683         clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2684                                     false, rn != 31, size);
2685         /* TODO: ARMv8.4-LSE SCTLR.nAA */
2686         do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true,
2687                   rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2688         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2689         return;
2690 
2691     case 0x2: case 0x3: /* CASP / STXP */
2692         if (size & 2) { /* STXP / STLXP */
2693             if (rn == 31) {
2694                 gen_check_sp_alignment(s);
2695             }
2696             if (is_lasr) {
2697                 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2698             }
2699             clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2700                                         true, rn != 31, size);
2701             gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true);
2702             return;
2703         }
2704         if (rt2 == 31
2705             && ((rt | rs) & 1) == 0
2706             && dc_isar_feature(aa64_atomics, s)) {
2707             /* CASP / CASPL */
2708             gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2709             return;
2710         }
2711         break;
2712 
2713     case 0x6: case 0x7: /* CASPA / LDXP */
2714         if (size & 2) { /* LDXP / LDAXP */
2715             if (rn == 31) {
2716                 gen_check_sp_alignment(s);
2717             }
2718             clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn),
2719                                         false, rn != 31, size);
2720             s->is_ldex = true;
2721             gen_load_exclusive(s, rt, rt2, clean_addr, size, true);
2722             if (is_lasr) {
2723                 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2724             }
2725             return;
2726         }
2727         if (rt2 == 31
2728             && ((rt | rs) & 1) == 0
2729             && dc_isar_feature(aa64_atomics, s)) {
2730             /* CASPA / CASPAL */
2731             gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2732             return;
2733         }
2734         break;
2735 
2736     case 0xa: /* CAS */
2737     case 0xb: /* CASL */
2738     case 0xe: /* CASA */
2739     case 0xf: /* CASAL */
2740         if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
2741             gen_compare_and_swap(s, rs, rt, rn, size);
2742             return;
2743         }
2744         break;
2745     }
2746     unallocated_encoding(s);
2747 }
2748 
2749 /*
2750  * Load register (literal)
2751  *
2752  *  31 30 29   27  26 25 24 23                5 4     0
2753  * +-----+-------+---+-----+-------------------+-------+
2754  * | opc | 0 1 1 | V | 0 0 |     imm19         |  Rt   |
2755  * +-----+-------+---+-----+-------------------+-------+
2756  *
2757  * V: 1 -> vector (simd/fp)
2758  * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2759  *                   10-> 32 bit signed, 11 -> prefetch
2760  * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2761  */
2762 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2763 {
2764     int rt = extract32(insn, 0, 5);
2765     int64_t imm = sextract32(insn, 5, 19) << 2;
2766     bool is_vector = extract32(insn, 26, 1);
2767     int opc = extract32(insn, 30, 2);
2768     bool is_signed = false;
2769     int size = 2;
2770     TCGv_i64 tcg_rt, clean_addr;
2771 
2772     if (is_vector) {
2773         if (opc == 3) {
2774             unallocated_encoding(s);
2775             return;
2776         }
2777         size = 2 + opc;
2778         if (!fp_access_check(s)) {
2779             return;
2780         }
2781     } else {
2782         if (opc == 3) {
2783             /* PRFM (literal) : prefetch */
2784             return;
2785         }
2786         size = 2 + extract32(opc, 0, 1);
2787         is_signed = extract32(opc, 1, 1);
2788     }
2789 
2790     tcg_rt = cpu_reg(s, rt);
2791 
2792     clean_addr = tcg_temp_new_i64();
2793     gen_pc_plus_diff(s, clean_addr, imm);
2794     if (is_vector) {
2795         do_fp_ld(s, rt, clean_addr, size);
2796     } else {
2797         /* Only unsigned 32bit loads target 32bit registers.  */
2798         bool iss_sf = opc != 0;
2799 
2800         do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
2801                   false, true, rt, iss_sf, false);
2802     }
2803 }
2804 
2805 /*
2806  * LDNP (Load Pair - non-temporal hint)
2807  * LDP (Load Pair - non vector)
2808  * LDPSW (Load Pair Signed Word - non vector)
2809  * STNP (Store Pair - non-temporal hint)
2810  * STP (Store Pair - non vector)
2811  * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2812  * LDP (Load Pair of SIMD&FP)
2813  * STNP (Store Pair of SIMD&FP - non-temporal hint)
2814  * STP (Store Pair of SIMD&FP)
2815  *
2816  *  31 30 29   27  26  25 24   23  22 21   15 14   10 9    5 4    0
2817  * +-----+-------+---+---+-------+---+-----------------------------+
2818  * | opc | 1 0 1 | V | 0 | index | L |  imm7 |  Rt2  |  Rn  | Rt   |
2819  * +-----+-------+---+---+-------+---+-------+-------+------+------+
2820  *
2821  * opc: LDP/STP/LDNP/STNP        00 -> 32 bit, 10 -> 64 bit
2822  *      LDPSW/STGP               01
2823  *      LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2824  *   V: 0 -> GPR, 1 -> Vector
2825  * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2826  *      10 -> signed offset, 11 -> pre-index
2827  *   L: 0 -> Store 1 -> Load
2828  *
2829  * Rt, Rt2 = GPR or SIMD registers to be stored
2830  * Rn = general purpose register containing address
2831  * imm7 = signed offset (multiple of 4 or 8 depending on size)
2832  */
2833 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2834 {
2835     int rt = extract32(insn, 0, 5);
2836     int rn = extract32(insn, 5, 5);
2837     int rt2 = extract32(insn, 10, 5);
2838     uint64_t offset = sextract64(insn, 15, 7);
2839     int index = extract32(insn, 23, 2);
2840     bool is_vector = extract32(insn, 26, 1);
2841     bool is_load = extract32(insn, 22, 1);
2842     int opc = extract32(insn, 30, 2);
2843 
2844     bool is_signed = false;
2845     bool postindex = false;
2846     bool wback = false;
2847     bool set_tag = false;
2848 
2849     TCGv_i64 clean_addr, dirty_addr;
2850 
2851     int size;
2852 
2853     if (opc == 3) {
2854         unallocated_encoding(s);
2855         return;
2856     }
2857 
2858     if (is_vector) {
2859         size = 2 + opc;
2860     } else if (opc == 1 && !is_load) {
2861         /* STGP */
2862         if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) {
2863             unallocated_encoding(s);
2864             return;
2865         }
2866         size = 3;
2867         set_tag = true;
2868     } else {
2869         size = 2 + extract32(opc, 1, 1);
2870         is_signed = extract32(opc, 0, 1);
2871         if (!is_load && is_signed) {
2872             unallocated_encoding(s);
2873             return;
2874         }
2875     }
2876 
2877     switch (index) {
2878     case 1: /* post-index */
2879         postindex = true;
2880         wback = true;
2881         break;
2882     case 0:
2883         /* signed offset with "non-temporal" hint. Since we don't emulate
2884          * caches we don't care about hints to the cache system about
2885          * data access patterns, and handle this identically to plain
2886          * signed offset.
2887          */
2888         if (is_signed) {
2889             /* There is no non-temporal-hint version of LDPSW */
2890             unallocated_encoding(s);
2891             return;
2892         }
2893         postindex = false;
2894         break;
2895     case 2: /* signed offset, rn not updated */
2896         postindex = false;
2897         break;
2898     case 3: /* pre-index */
2899         postindex = false;
2900         wback = true;
2901         break;
2902     }
2903 
2904     if (is_vector && !fp_access_check(s)) {
2905         return;
2906     }
2907 
2908     offset <<= (set_tag ? LOG2_TAG_GRANULE : size);
2909 
2910     if (rn == 31) {
2911         gen_check_sp_alignment(s);
2912     }
2913 
2914     dirty_addr = read_cpu_reg_sp(s, rn, 1);
2915     if (!postindex) {
2916         tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2917     }
2918 
2919     if (set_tag) {
2920         if (!s->ata) {
2921             /*
2922              * TODO: We could rely on the stores below, at least for
2923              * system mode, if we arrange to add MO_ALIGN_16.
2924              */
2925             gen_helper_stg_stub(cpu_env, dirty_addr);
2926         } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2927             gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
2928         } else {
2929             gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
2930         }
2931     }
2932 
2933     clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
2934                                 (wback || rn != 31) && !set_tag, 2 << size);
2935 
2936     if (is_vector) {
2937         if (is_load) {
2938             do_fp_ld(s, rt, clean_addr, size);
2939         } else {
2940             do_fp_st(s, rt, clean_addr, size);
2941         }
2942         tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2943         if (is_load) {
2944             do_fp_ld(s, rt2, clean_addr, size);
2945         } else {
2946             do_fp_st(s, rt2, clean_addr, size);
2947         }
2948     } else {
2949         TCGv_i64 tcg_rt = cpu_reg(s, rt);
2950         TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2951 
2952         if (is_load) {
2953             TCGv_i64 tmp = tcg_temp_new_i64();
2954 
2955             /* Do not modify tcg_rt before recognizing any exception
2956              * from the second load.
2957              */
2958             do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN,
2959                       false, false, 0, false, false);
2960             tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2961             do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN,
2962                       false, false, 0, false, false);
2963 
2964             tcg_gen_mov_i64(tcg_rt, tmp);
2965         } else {
2966             do_gpr_st(s, tcg_rt, clean_addr, size,
2967                       false, 0, false, false);
2968             tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size);
2969             do_gpr_st(s, tcg_rt2, clean_addr, size,
2970                       false, 0, false, false);
2971         }
2972     }
2973 
2974     if (wback) {
2975         if (postindex) {
2976             tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2977         }
2978         tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
2979     }
2980 }
2981 
2982 /*
2983  * Load/store (immediate post-indexed)
2984  * Load/store (immediate pre-indexed)
2985  * Load/store (unscaled immediate)
2986  *
2987  * 31 30 29   27  26 25 24 23 22 21  20    12 11 10 9    5 4    0
2988  * +----+-------+---+-----+-----+---+--------+-----+------+------+
2989  * |size| 1 1 1 | V | 0 0 | opc | 0 |  imm9  | idx |  Rn  |  Rt  |
2990  * +----+-------+---+-----+-----+---+--------+-----+------+------+
2991  *
2992  * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2993          10 -> unprivileged
2994  * V = 0 -> non-vector
2995  * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2996  * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2997  */
2998 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2999                                 int opc,
3000                                 int size,
3001                                 int rt,
3002                                 bool is_vector)
3003 {
3004     int rn = extract32(insn, 5, 5);
3005     int imm9 = sextract32(insn, 12, 9);
3006     int idx = extract32(insn, 10, 2);
3007     bool is_signed = false;
3008     bool is_store = false;
3009     bool is_extended = false;
3010     bool is_unpriv = (idx == 2);
3011     bool iss_valid;
3012     bool post_index;
3013     bool writeback;
3014     int memidx;
3015 
3016     TCGv_i64 clean_addr, dirty_addr;
3017 
3018     if (is_vector) {
3019         size |= (opc & 2) << 1;
3020         if (size > 4 || is_unpriv) {
3021             unallocated_encoding(s);
3022             return;
3023         }
3024         is_store = ((opc & 1) == 0);
3025         if (!fp_access_check(s)) {
3026             return;
3027         }
3028     } else {
3029         if (size == 3 && opc == 2) {
3030             /* PRFM - prefetch */
3031             if (idx != 0) {
3032                 unallocated_encoding(s);
3033                 return;
3034             }
3035             return;
3036         }
3037         if (opc == 3 && size > 1) {
3038             unallocated_encoding(s);
3039             return;
3040         }
3041         is_store = (opc == 0);
3042         is_signed = extract32(opc, 1, 1);
3043         is_extended = (size < 3) && extract32(opc, 0, 1);
3044     }
3045 
3046     switch (idx) {
3047     case 0:
3048     case 2:
3049         post_index = false;
3050         writeback = false;
3051         break;
3052     case 1:
3053         post_index = true;
3054         writeback = true;
3055         break;
3056     case 3:
3057         post_index = false;
3058         writeback = true;
3059         break;
3060     default:
3061         g_assert_not_reached();
3062     }
3063 
3064     iss_valid = !is_vector && !writeback;
3065 
3066     if (rn == 31) {
3067         gen_check_sp_alignment(s);
3068     }
3069 
3070     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3071     if (!post_index) {
3072         tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3073     }
3074 
3075     memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3076     clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store,
3077                                        writeback || rn != 31,
3078                                        size, is_unpriv, memidx);
3079 
3080     if (is_vector) {
3081         if (is_store) {
3082             do_fp_st(s, rt, clean_addr, size);
3083         } else {
3084             do_fp_ld(s, rt, clean_addr, size);
3085         }
3086     } else {
3087         TCGv_i64 tcg_rt = cpu_reg(s, rt);
3088         bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3089 
3090         if (is_store) {
3091             do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx,
3092                              iss_valid, rt, iss_sf, false);
3093         } else {
3094             do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3095                              is_extended, memidx,
3096                              iss_valid, rt, iss_sf, false);
3097         }
3098     }
3099 
3100     if (writeback) {
3101         TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3102         if (post_index) {
3103             tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9);
3104         }
3105         tcg_gen_mov_i64(tcg_rn, dirty_addr);
3106     }
3107 }
3108 
3109 /*
3110  * Load/store (register offset)
3111  *
3112  * 31 30 29   27  26 25 24 23 22 21  20  16 15 13 12 11 10 9  5 4  0
3113  * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3114  * |size| 1 1 1 | V | 0 0 | opc | 1 |  Rm  | opt | S| 1 0 | Rn | Rt |
3115  * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
3116  *
3117  * For non-vector:
3118  *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3119  *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3120  * For vector:
3121  *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3122  *   opc<0>: 0 -> store, 1 -> load
3123  * V: 1 -> vector/simd
3124  * opt: extend encoding (see DecodeRegExtend)
3125  * S: if S=1 then scale (essentially index by sizeof(size))
3126  * Rt: register to transfer into/out of
3127  * Rn: address register or SP for base
3128  * Rm: offset register or ZR for offset
3129  */
3130 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
3131                                    int opc,
3132                                    int size,
3133                                    int rt,
3134                                    bool is_vector)
3135 {
3136     int rn = extract32(insn, 5, 5);
3137     int shift = extract32(insn, 12, 1);
3138     int rm = extract32(insn, 16, 5);
3139     int opt = extract32(insn, 13, 3);
3140     bool is_signed = false;
3141     bool is_store = false;
3142     bool is_extended = false;
3143 
3144     TCGv_i64 tcg_rm, clean_addr, dirty_addr;
3145 
3146     if (extract32(opt, 1, 1) == 0) {
3147         unallocated_encoding(s);
3148         return;
3149     }
3150 
3151     if (is_vector) {
3152         size |= (opc & 2) << 1;
3153         if (size > 4) {
3154             unallocated_encoding(s);
3155             return;
3156         }
3157         is_store = !extract32(opc, 0, 1);
3158         if (!fp_access_check(s)) {
3159             return;
3160         }
3161     } else {
3162         if (size == 3 && opc == 2) {
3163             /* PRFM - prefetch */
3164             return;
3165         }
3166         if (opc == 3 && size > 1) {
3167             unallocated_encoding(s);
3168             return;
3169         }
3170         is_store = (opc == 0);
3171         is_signed = extract32(opc, 1, 1);
3172         is_extended = (size < 3) && extract32(opc, 0, 1);
3173     }
3174 
3175     if (rn == 31) {
3176         gen_check_sp_alignment(s);
3177     }
3178     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3179 
3180     tcg_rm = read_cpu_reg(s, rm, 1);
3181     ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
3182 
3183     tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm);
3184     clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size);
3185 
3186     if (is_vector) {
3187         if (is_store) {
3188             do_fp_st(s, rt, clean_addr, size);
3189         } else {
3190             do_fp_ld(s, rt, clean_addr, size);
3191         }
3192     } else {
3193         TCGv_i64 tcg_rt = cpu_reg(s, rt);
3194         bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3195         if (is_store) {
3196             do_gpr_st(s, tcg_rt, clean_addr, size,
3197                       true, rt, iss_sf, false);
3198         } else {
3199             do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3200                       is_extended, true, rt, iss_sf, false);
3201         }
3202     }
3203 }
3204 
3205 /*
3206  * Load/store (unsigned immediate)
3207  *
3208  * 31 30 29   27  26 25 24 23 22 21        10 9     5
3209  * +----+-------+---+-----+-----+------------+-------+------+
3210  * |size| 1 1 1 | V | 0 1 | opc |   imm12    |  Rn   |  Rt  |
3211  * +----+-------+---+-----+-----+------------+-------+------+
3212  *
3213  * For non-vector:
3214  *   size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
3215  *   opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
3216  * For vector:
3217  *   size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
3218  *   opc<0>: 0 -> store, 1 -> load
3219  * Rn: base address register (inc SP)
3220  * Rt: target register
3221  */
3222 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
3223                                         int opc,
3224                                         int size,
3225                                         int rt,
3226                                         bool is_vector)
3227 {
3228     int rn = extract32(insn, 5, 5);
3229     unsigned int imm12 = extract32(insn, 10, 12);
3230     unsigned int offset;
3231 
3232     TCGv_i64 clean_addr, dirty_addr;
3233 
3234     bool is_store;
3235     bool is_signed = false;
3236     bool is_extended = false;
3237 
3238     if (is_vector) {
3239         size |= (opc & 2) << 1;
3240         if (size > 4) {
3241             unallocated_encoding(s);
3242             return;
3243         }
3244         is_store = !extract32(opc, 0, 1);
3245         if (!fp_access_check(s)) {
3246             return;
3247         }
3248     } else {
3249         if (size == 3 && opc == 2) {
3250             /* PRFM - prefetch */
3251             return;
3252         }
3253         if (opc == 3 && size > 1) {
3254             unallocated_encoding(s);
3255             return;
3256         }
3257         is_store = (opc == 0);
3258         is_signed = extract32(opc, 1, 1);
3259         is_extended = (size < 3) && extract32(opc, 0, 1);
3260     }
3261 
3262     if (rn == 31) {
3263         gen_check_sp_alignment(s);
3264     }
3265     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3266     offset = imm12 << size;
3267     tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3268     clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size);
3269 
3270     if (is_vector) {
3271         if (is_store) {
3272             do_fp_st(s, rt, clean_addr, size);
3273         } else {
3274             do_fp_ld(s, rt, clean_addr, size);
3275         }
3276     } else {
3277         TCGv_i64 tcg_rt = cpu_reg(s, rt);
3278         bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
3279         if (is_store) {
3280             do_gpr_st(s, tcg_rt, clean_addr, size,
3281                       true, rt, iss_sf, false);
3282         } else {
3283             do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN,
3284                       is_extended, true, rt, iss_sf, false);
3285         }
3286     }
3287 }
3288 
3289 /* Atomic memory operations
3290  *
3291  *  31  30      27  26    24    22  21   16   15    12    10    5     0
3292  * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
3293  * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn |  Rt |
3294  * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
3295  *
3296  * Rt: the result register
3297  * Rn: base address or SP
3298  * Rs: the source register for the operation
3299  * V: vector flag (always 0 as of v8.3)
3300  * A: acquire flag
3301  * R: release flag
3302  */
3303 static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
3304                               int size, int rt, bool is_vector)
3305 {
3306     int rs = extract32(insn, 16, 5);
3307     int rn = extract32(insn, 5, 5);
3308     int o3_opc = extract32(insn, 12, 4);
3309     bool r = extract32(insn, 22, 1);
3310     bool a = extract32(insn, 23, 1);
3311     TCGv_i64 tcg_rs, tcg_rt, clean_addr;
3312     AtomicThreeOpFn *fn = NULL;
3313     MemOp mop = s->be_data | size | MO_ALIGN;
3314 
3315     if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
3316         unallocated_encoding(s);
3317         return;
3318     }
3319     switch (o3_opc) {
3320     case 000: /* LDADD */
3321         fn = tcg_gen_atomic_fetch_add_i64;
3322         break;
3323     case 001: /* LDCLR */
3324         fn = tcg_gen_atomic_fetch_and_i64;
3325         break;
3326     case 002: /* LDEOR */
3327         fn = tcg_gen_atomic_fetch_xor_i64;
3328         break;
3329     case 003: /* LDSET */
3330         fn = tcg_gen_atomic_fetch_or_i64;
3331         break;
3332     case 004: /* LDSMAX */
3333         fn = tcg_gen_atomic_fetch_smax_i64;
3334         mop |= MO_SIGN;
3335         break;
3336     case 005: /* LDSMIN */
3337         fn = tcg_gen_atomic_fetch_smin_i64;
3338         mop |= MO_SIGN;
3339         break;
3340     case 006: /* LDUMAX */
3341         fn = tcg_gen_atomic_fetch_umax_i64;
3342         break;
3343     case 007: /* LDUMIN */
3344         fn = tcg_gen_atomic_fetch_umin_i64;
3345         break;
3346     case 010: /* SWP */
3347         fn = tcg_gen_atomic_xchg_i64;
3348         break;
3349     case 014: /* LDAPR, LDAPRH, LDAPRB */
3350         if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
3351             rs != 31 || a != 1 || r != 0) {
3352             unallocated_encoding(s);
3353             return;
3354         }
3355         break;
3356     default:
3357         unallocated_encoding(s);
3358         return;
3359     }
3360 
3361     if (rn == 31) {
3362         gen_check_sp_alignment(s);
3363     }
3364     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size);
3365 
3366     if (o3_opc == 014) {
3367         /*
3368          * LDAPR* are a special case because they are a simple load, not a
3369          * fetch-and-do-something op.
3370          * The architectural consistency requirements here are weaker than
3371          * full load-acquire (we only need "load-acquire processor consistent"),
3372          * but we choose to implement them as full LDAQ.
3373          */
3374         do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false,
3375                   true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
3376         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3377         return;
3378     }
3379 
3380     tcg_rs = read_cpu_reg(s, rs, true);
3381     tcg_rt = cpu_reg(s, rt);
3382 
3383     if (o3_opc == 1) { /* LDCLR */
3384         tcg_gen_not_i64(tcg_rs, tcg_rs);
3385     }
3386 
3387     /* The tcg atomic primitives are all full barriers.  Therefore we
3388      * can ignore the Acquire and Release bits of this instruction.
3389      */
3390     fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3391 
3392     if ((mop & MO_SIGN) && size != MO_64) {
3393         tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3394     }
3395 }
3396 
3397 /*
3398  * PAC memory operations
3399  *
3400  *  31  30      27  26    24    22  21       12  11  10    5     0
3401  * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3402  * | size | 1 1 1 | V | 0 0 | M S | 1 |  imm9  | W | 1 | Rn |  Rt |
3403  * +------+-------+---+-----+-----+---+--------+---+---+----+-----+
3404  *
3405  * Rt: the result register
3406  * Rn: base address or SP
3407  * V: vector flag (always 0 as of v8.3)
3408  * M: clear for key DA, set for key DB
3409  * W: pre-indexing flag
3410  * S: sign for imm9.
3411  */
3412 static void disas_ldst_pac(DisasContext *s, uint32_t insn,
3413                            int size, int rt, bool is_vector)
3414 {
3415     int rn = extract32(insn, 5, 5);
3416     bool is_wback = extract32(insn, 11, 1);
3417     bool use_key_a = !extract32(insn, 23, 1);
3418     int offset;
3419     TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3420 
3421     if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) {
3422         unallocated_encoding(s);
3423         return;
3424     }
3425 
3426     if (rn == 31) {
3427         gen_check_sp_alignment(s);
3428     }
3429     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3430 
3431     if (s->pauth_active) {
3432         if (use_key_a) {
3433             gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3434                              tcg_constant_i64(0));
3435         } else {
3436             gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3437                              tcg_constant_i64(0));
3438         }
3439     }
3440 
3441     /* Form the 10-bit signed, scaled offset.  */
3442     offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9);
3443     offset = sextract32(offset << size, 0, 10 + size);
3444     tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3445 
3446     /* Note that "clean" and "dirty" here refer to TBI not PAC.  */
3447     clean_addr = gen_mte_check1(s, dirty_addr, false,
3448                                 is_wback || rn != 31, size);
3449 
3450     tcg_rt = cpu_reg(s, rt);
3451     do_gpr_ld(s, tcg_rt, clean_addr, size,
3452               /* extend */ false, /* iss_valid */ !is_wback,
3453               /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false);
3454 
3455     if (is_wback) {
3456         tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr);
3457     }
3458 }
3459 
3460 /*
3461  * LDAPR/STLR (unscaled immediate)
3462  *
3463  *  31  30            24    22  21       12    10    5     0
3464  * +------+-------------+-----+---+--------+-----+----+-----+
3465  * | size | 0 1 1 0 0 1 | opc | 0 |  imm9  | 0 0 | Rn |  Rt |
3466  * +------+-------------+-----+---+--------+-----+----+-----+
3467  *
3468  * Rt: source or destination register
3469  * Rn: base register
3470  * imm9: unscaled immediate offset
3471  * opc: 00: STLUR*, 01/10/11: various LDAPUR*
3472  * size: size of load/store
3473  */
3474 static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn)
3475 {
3476     int rt = extract32(insn, 0, 5);
3477     int rn = extract32(insn, 5, 5);
3478     int offset = sextract32(insn, 12, 9);
3479     int opc = extract32(insn, 22, 2);
3480     int size = extract32(insn, 30, 2);
3481     TCGv_i64 clean_addr, dirty_addr;
3482     bool is_store = false;
3483     bool extend = false;
3484     bool iss_sf;
3485     MemOp mop;
3486 
3487     if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3488         unallocated_encoding(s);
3489         return;
3490     }
3491 
3492     /* TODO: ARMv8.4-LSE SCTLR.nAA */
3493     mop = size | MO_ALIGN;
3494 
3495     switch (opc) {
3496     case 0: /* STLURB */
3497         is_store = true;
3498         break;
3499     case 1: /* LDAPUR* */
3500         break;
3501     case 2: /* LDAPURS* 64-bit variant */
3502         if (size == 3) {
3503             unallocated_encoding(s);
3504             return;
3505         }
3506         mop |= MO_SIGN;
3507         break;
3508     case 3: /* LDAPURS* 32-bit variant */
3509         if (size > 1) {
3510             unallocated_encoding(s);
3511             return;
3512         }
3513         mop |= MO_SIGN;
3514         extend = true; /* zero-extend 32->64 after signed load */
3515         break;
3516     default:
3517         g_assert_not_reached();
3518     }
3519 
3520     iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc);
3521 
3522     if (rn == 31) {
3523         gen_check_sp_alignment(s);
3524     }
3525 
3526     dirty_addr = read_cpu_reg_sp(s, rn, 1);
3527     tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3528     clean_addr = clean_data_tbi(s, dirty_addr);
3529 
3530     if (is_store) {
3531         /* Store-Release semantics */
3532         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3533         do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true);
3534     } else {
3535         /*
3536          * Load-AcquirePC semantics; we implement as the slightly more
3537          * restrictive Load-Acquire.
3538          */
3539         do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop,
3540                   extend, true, rt, iss_sf, true);
3541         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3542     }
3543 }
3544 
3545 /* Load/store register (all forms) */
3546 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
3547 {
3548     int rt = extract32(insn, 0, 5);
3549     int opc = extract32(insn, 22, 2);
3550     bool is_vector = extract32(insn, 26, 1);
3551     int size = extract32(insn, 30, 2);
3552 
3553     switch (extract32(insn, 24, 2)) {
3554     case 0:
3555         if (extract32(insn, 21, 1) == 0) {
3556             /* Load/store register (unscaled immediate)
3557              * Load/store immediate pre/post-indexed
3558              * Load/store register unprivileged
3559              */
3560             disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
3561             return;
3562         }
3563         switch (extract32(insn, 10, 2)) {
3564         case 0:
3565             disas_ldst_atomic(s, insn, size, rt, is_vector);
3566             return;
3567         case 2:
3568             disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
3569             return;
3570         default:
3571             disas_ldst_pac(s, insn, size, rt, is_vector);
3572             return;
3573         }
3574         break;
3575     case 1:
3576         disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
3577         return;
3578     }
3579     unallocated_encoding(s);
3580 }
3581 
3582 /* AdvSIMD load/store multiple structures
3583  *
3584  *  31  30  29           23 22  21         16 15    12 11  10 9    5 4    0
3585  * +---+---+---------------+---+-------------+--------+------+------+------+
3586  * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size |  Rn  |  Rt  |
3587  * +---+---+---------------+---+-------------+--------+------+------+------+
3588  *
3589  * AdvSIMD load/store multiple structures (post-indexed)
3590  *
3591  *  31  30  29           23 22  21  20     16 15    12 11  10 9    5 4    0
3592  * +---+---+---------------+---+---+---------+--------+------+------+------+
3593  * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 |   Rm    | opcode | size |  Rn  |  Rt  |
3594  * +---+---+---------------+---+---+---------+--------+------+------+------+
3595  *
3596  * Rt: first (or only) SIMD&FP register to be transferred
3597  * Rn: base address or SP
3598  * Rm (post-index only): post-index register (when !31) or size dependent #imm
3599  */
3600 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3601 {
3602     int rt = extract32(insn, 0, 5);
3603     int rn = extract32(insn, 5, 5);
3604     int rm = extract32(insn, 16, 5);
3605     int size = extract32(insn, 10, 2);
3606     int opcode = extract32(insn, 12, 4);
3607     bool is_store = !extract32(insn, 22, 1);
3608     bool is_postidx = extract32(insn, 23, 1);
3609     bool is_q = extract32(insn, 30, 1);
3610     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3611     MemOp endian, align, mop;
3612 
3613     int total;    /* total bytes */
3614     int elements; /* elements per vector */
3615     int rpt;    /* num iterations */
3616     int selem;  /* structure elements */
3617     int r;
3618 
3619     if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3620         unallocated_encoding(s);
3621         return;
3622     }
3623 
3624     if (!is_postidx && rm != 0) {
3625         unallocated_encoding(s);
3626         return;
3627     }
3628 
3629     /* From the shared decode logic */
3630     switch (opcode) {
3631     case 0x0:
3632         rpt = 1;
3633         selem = 4;
3634         break;
3635     case 0x2:
3636         rpt = 4;
3637         selem = 1;
3638         break;
3639     case 0x4:
3640         rpt = 1;
3641         selem = 3;
3642         break;
3643     case 0x6:
3644         rpt = 3;
3645         selem = 1;
3646         break;
3647     case 0x7:
3648         rpt = 1;
3649         selem = 1;
3650         break;
3651     case 0x8:
3652         rpt = 1;
3653         selem = 2;
3654         break;
3655     case 0xa:
3656         rpt = 2;
3657         selem = 1;
3658         break;
3659     default:
3660         unallocated_encoding(s);
3661         return;
3662     }
3663 
3664     if (size == 3 && !is_q && selem != 1) {
3665         /* reserved */
3666         unallocated_encoding(s);
3667         return;
3668     }
3669 
3670     if (!fp_access_check(s)) {
3671         return;
3672     }
3673 
3674     if (rn == 31) {
3675         gen_check_sp_alignment(s);
3676     }
3677 
3678     /* For our purposes, bytes are always little-endian.  */
3679     endian = s->be_data;
3680     if (size == 0) {
3681         endian = MO_LE;
3682     }
3683 
3684     total = rpt * selem * (is_q ? 16 : 8);
3685     tcg_rn = cpu_reg_sp(s, rn);
3686 
3687     /*
3688      * Issue the MTE check vs the logical repeat count, before we
3689      * promote consecutive little-endian elements below.
3690      */
3691     clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
3692                                 total);
3693 
3694     /*
3695      * Consecutive little-endian elements from a single register
3696      * can be promoted to a larger little-endian operation.
3697      */
3698     align = MO_ALIGN;
3699     if (selem == 1 && endian == MO_LE) {
3700         align = pow2_align(size);
3701         size = 3;
3702     }
3703     if (!s->align_mem) {
3704         align = 0;
3705     }
3706     mop = endian | size | align;
3707 
3708     elements = (is_q ? 16 : 8) >> size;
3709     tcg_ebytes = tcg_constant_i64(1 << size);
3710     for (r = 0; r < rpt; r++) {
3711         int e;
3712         for (e = 0; e < elements; e++) {
3713             int xs;
3714             for (xs = 0; xs < selem; xs++) {
3715                 int tt = (rt + r + xs) % 32;
3716                 if (is_store) {
3717                     do_vec_st(s, tt, e, clean_addr, mop);
3718                 } else {
3719                     do_vec_ld(s, tt, e, clean_addr, mop);
3720                 }
3721                 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3722             }
3723         }
3724     }
3725 
3726     if (!is_store) {
3727         /* For non-quad operations, setting a slice of the low
3728          * 64 bits of the register clears the high 64 bits (in
3729          * the ARM ARM pseudocode this is implicit in the fact
3730          * that 'rval' is a 64 bit wide variable).
3731          * For quad operations, we might still need to zero the
3732          * high bits of SVE.
3733          */
3734         for (r = 0; r < rpt * selem; r++) {
3735             int tt = (rt + r) % 32;
3736             clear_vec_high(s, is_q, tt);
3737         }
3738     }
3739 
3740     if (is_postidx) {
3741         if (rm == 31) {
3742             tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3743         } else {
3744             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3745         }
3746     }
3747 }
3748 
3749 /* AdvSIMD load/store single structure
3750  *
3751  *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
3752  * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3753  * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size |  Rn  |  Rt  |
3754  * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3755  *
3756  * AdvSIMD load/store single structure (post-indexed)
3757  *
3758  *  31  30  29           23 22 21 20       16 15 13 12  11  10 9    5 4    0
3759  * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3760  * | 0 | Q | 0 0 1 1 0 1 1 | L R |     Rm    | opc | S | size |  Rn  |  Rt  |
3761  * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3762  *
3763  * Rt: first (or only) SIMD&FP register to be transferred
3764  * Rn: base address or SP
3765  * Rm (post-index only): post-index register (when !31) or size dependent #imm
3766  * index = encoded in Q:S:size dependent on size
3767  *
3768  * lane_size = encoded in R, opc
3769  * transfer width = encoded in opc, S, size
3770  */
3771 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3772 {
3773     int rt = extract32(insn, 0, 5);
3774     int rn = extract32(insn, 5, 5);
3775     int rm = extract32(insn, 16, 5);
3776     int size = extract32(insn, 10, 2);
3777     int S = extract32(insn, 12, 1);
3778     int opc = extract32(insn, 13, 3);
3779     int R = extract32(insn, 21, 1);
3780     int is_load = extract32(insn, 22, 1);
3781     int is_postidx = extract32(insn, 23, 1);
3782     int is_q = extract32(insn, 30, 1);
3783 
3784     int scale = extract32(opc, 1, 2);
3785     int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3786     bool replicate = false;
3787     int index = is_q << 3 | S << 2 | size;
3788     int xs, total;
3789     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3790     MemOp mop;
3791 
3792     if (extract32(insn, 31, 1)) {
3793         unallocated_encoding(s);
3794         return;
3795     }
3796     if (!is_postidx && rm != 0) {
3797         unallocated_encoding(s);
3798         return;
3799     }
3800 
3801     switch (scale) {
3802     case 3:
3803         if (!is_load || S) {
3804             unallocated_encoding(s);
3805             return;
3806         }
3807         scale = size;
3808         replicate = true;
3809         break;
3810     case 0:
3811         break;
3812     case 1:
3813         if (extract32(size, 0, 1)) {
3814             unallocated_encoding(s);
3815             return;
3816         }
3817         index >>= 1;
3818         break;
3819     case 2:
3820         if (extract32(size, 1, 1)) {
3821             unallocated_encoding(s);
3822             return;
3823         }
3824         if (!extract32(size, 0, 1)) {
3825             index >>= 2;
3826         } else {
3827             if (S) {
3828                 unallocated_encoding(s);
3829                 return;
3830             }
3831             index >>= 3;
3832             scale = 3;
3833         }
3834         break;
3835     default:
3836         g_assert_not_reached();
3837     }
3838 
3839     if (!fp_access_check(s)) {
3840         return;
3841     }
3842 
3843     if (rn == 31) {
3844         gen_check_sp_alignment(s);
3845     }
3846 
3847     total = selem << scale;
3848     tcg_rn = cpu_reg_sp(s, rn);
3849 
3850     clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
3851                                 total);
3852     mop = finalize_memop(s, scale);
3853 
3854     tcg_ebytes = tcg_constant_i64(1 << scale);
3855     for (xs = 0; xs < selem; xs++) {
3856         if (replicate) {
3857             /* Load and replicate to all elements */
3858             TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3859 
3860             tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3861             tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
3862                                  (is_q + 1) * 8, vec_full_reg_size(s),
3863                                  tcg_tmp);
3864         } else {
3865             /* Load/store one element per register */
3866             if (is_load) {
3867                 do_vec_ld(s, rt, index, clean_addr, mop);
3868             } else {
3869                 do_vec_st(s, rt, index, clean_addr, mop);
3870             }
3871         }
3872         tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3873         rt = (rt + 1) % 32;
3874     }
3875 
3876     if (is_postidx) {
3877         if (rm == 31) {
3878             tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3879         } else {
3880             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3881         }
3882     }
3883 }
3884 
3885 /*
3886  * Load/Store memory tags
3887  *
3888  *  31 30 29         24     22  21     12    10      5      0
3889  * +-----+-------------+-----+---+------+-----+------+------+
3890  * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 |  Rn  |  Rt  |
3891  * +-----+-------------+-----+---+------+-----+------+------+
3892  */
3893 static void disas_ldst_tag(DisasContext *s, uint32_t insn)
3894 {
3895     int rt = extract32(insn, 0, 5);
3896     int rn = extract32(insn, 5, 5);
3897     uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE;
3898     int op2 = extract32(insn, 10, 2);
3899     int op1 = extract32(insn, 22, 2);
3900     bool is_load = false, is_pair = false, is_zero = false, is_mult = false;
3901     int index = 0;
3902     TCGv_i64 addr, clean_addr, tcg_rt;
3903 
3904     /* We checked insn bits [29:24,21] in the caller.  */
3905     if (extract32(insn, 30, 2) != 3) {
3906         goto do_unallocated;
3907     }
3908 
3909     /*
3910      * @index is a tri-state variable which has 3 states:
3911      * < 0 : post-index, writeback
3912      * = 0 : signed offset
3913      * > 0 : pre-index, writeback
3914      */
3915     switch (op1) {
3916     case 0:
3917         if (op2 != 0) {
3918             /* STG */
3919             index = op2 - 2;
3920         } else {
3921             /* STZGM */
3922             if (s->current_el == 0 || offset != 0) {
3923                 goto do_unallocated;
3924             }
3925             is_mult = is_zero = true;
3926         }
3927         break;
3928     case 1:
3929         if (op2 != 0) {
3930             /* STZG */
3931             is_zero = true;
3932             index = op2 - 2;
3933         } else {
3934             /* LDG */
3935             is_load = true;
3936         }
3937         break;
3938     case 2:
3939         if (op2 != 0) {
3940             /* ST2G */
3941             is_pair = true;
3942             index = op2 - 2;
3943         } else {
3944             /* STGM */
3945             if (s->current_el == 0 || offset != 0) {
3946                 goto do_unallocated;
3947             }
3948             is_mult = true;
3949         }
3950         break;
3951     case 3:
3952         if (op2 != 0) {
3953             /* STZ2G */
3954             is_pair = is_zero = true;
3955             index = op2 - 2;
3956         } else {
3957             /* LDGM */
3958             if (s->current_el == 0 || offset != 0) {
3959                 goto do_unallocated;
3960             }
3961             is_mult = is_load = true;
3962         }
3963         break;
3964 
3965     default:
3966     do_unallocated:
3967         unallocated_encoding(s);
3968         return;
3969     }
3970 
3971     if (is_mult
3972         ? !dc_isar_feature(aa64_mte, s)
3973         : !dc_isar_feature(aa64_mte_insn_reg, s)) {
3974         goto do_unallocated;
3975     }
3976 
3977     if (rn == 31) {
3978         gen_check_sp_alignment(s);
3979     }
3980 
3981     addr = read_cpu_reg_sp(s, rn, true);
3982     if (index >= 0) {
3983         /* pre-index or signed offset */
3984         tcg_gen_addi_i64(addr, addr, offset);
3985     }
3986 
3987     if (is_mult) {
3988         tcg_rt = cpu_reg(s, rt);
3989 
3990         if (is_zero) {
3991             int size = 4 << s->dcz_blocksize;
3992 
3993             if (s->ata) {
3994                 gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
3995             }
3996             /*
3997              * The non-tags portion of STZGM is mostly like DC_ZVA,
3998              * except the alignment happens before the access.
3999              */
4000             clean_addr = clean_data_tbi(s, addr);
4001             tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4002             gen_helper_dc_zva(cpu_env, clean_addr);
4003         } else if (s->ata) {
4004             if (is_load) {
4005                 gen_helper_ldgm(tcg_rt, cpu_env, addr);
4006             } else {
4007                 gen_helper_stgm(cpu_env, addr, tcg_rt);
4008             }
4009         } else {
4010             MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE;
4011             int size = 4 << GMID_EL1_BS;
4012 
4013             clean_addr = clean_data_tbi(s, addr);
4014             tcg_gen_andi_i64(clean_addr, clean_addr, -size);
4015             gen_probe_access(s, clean_addr, acc, size);
4016 
4017             if (is_load) {
4018                 /* The result tags are zeros.  */
4019                 tcg_gen_movi_i64(tcg_rt, 0);
4020             }
4021         }
4022         return;
4023     }
4024 
4025     if (is_load) {
4026         tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
4027         tcg_rt = cpu_reg(s, rt);
4028         if (s->ata) {
4029             gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
4030         } else {
4031             clean_addr = clean_data_tbi(s, addr);
4032             gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
4033             gen_address_with_allocation_tag0(tcg_rt, addr);
4034         }
4035     } else {
4036         tcg_rt = cpu_reg_sp(s, rt);
4037         if (!s->ata) {
4038             /*
4039              * For STG and ST2G, we need to check alignment and probe memory.
4040              * TODO: For STZG and STZ2G, we could rely on the stores below,
4041              * at least for system mode; user-only won't enforce alignment.
4042              */
4043             if (is_pair) {
4044                 gen_helper_st2g_stub(cpu_env, addr);
4045             } else {
4046                 gen_helper_stg_stub(cpu_env, addr);
4047             }
4048         } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
4049             if (is_pair) {
4050                 gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
4051             } else {
4052                 gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
4053             }
4054         } else {
4055             if (is_pair) {
4056                 gen_helper_st2g(cpu_env, addr, tcg_rt);
4057             } else {
4058                 gen_helper_stg(cpu_env, addr, tcg_rt);
4059             }
4060         }
4061     }
4062 
4063     if (is_zero) {
4064         TCGv_i64 clean_addr = clean_data_tbi(s, addr);
4065         TCGv_i64 tcg_zero = tcg_constant_i64(0);
4066         int mem_index = get_mem_index(s);
4067         int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
4068 
4069         tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
4070                             MO_UQ | MO_ALIGN_16);
4071         for (i = 8; i < n; i += 8) {
4072             tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4073             tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ);
4074         }
4075     }
4076 
4077     if (index != 0) {
4078         /* pre-index or post-index */
4079         if (index < 0) {
4080             /* post-index */
4081             tcg_gen_addi_i64(addr, addr, offset);
4082         }
4083         tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr);
4084     }
4085 }
4086 
4087 /* Loads and stores */
4088 static void disas_ldst(DisasContext *s, uint32_t insn)
4089 {
4090     switch (extract32(insn, 24, 6)) {
4091     case 0x08: /* Load/store exclusive */
4092         disas_ldst_excl(s, insn);
4093         break;
4094     case 0x18: case 0x1c: /* Load register (literal) */
4095         disas_ld_lit(s, insn);
4096         break;
4097     case 0x28: case 0x29:
4098     case 0x2c: case 0x2d: /* Load/store pair (all forms) */
4099         disas_ldst_pair(s, insn);
4100         break;
4101     case 0x38: case 0x39:
4102     case 0x3c: case 0x3d: /* Load/store register (all forms) */
4103         disas_ldst_reg(s, insn);
4104         break;
4105     case 0x0c: /* AdvSIMD load/store multiple structures */
4106         disas_ldst_multiple_struct(s, insn);
4107         break;
4108     case 0x0d: /* AdvSIMD load/store single structure */
4109         disas_ldst_single_struct(s, insn);
4110         break;
4111     case 0x19:
4112         if (extract32(insn, 21, 1) != 0) {
4113             disas_ldst_tag(s, insn);
4114         } else if (extract32(insn, 10, 2) == 0) {
4115             disas_ldst_ldapr_stlr(s, insn);
4116         } else {
4117             unallocated_encoding(s);
4118         }
4119         break;
4120     default:
4121         unallocated_encoding(s);
4122         break;
4123     }
4124 }
4125 
4126 typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
4127 
4128 static bool gen_rri(DisasContext *s, arg_rri_sf *a,
4129                     bool rd_sp, bool rn_sp, ArithTwoOp *fn)
4130 {
4131     TCGv_i64 tcg_rn = rn_sp ? cpu_reg_sp(s, a->rn) : cpu_reg(s, a->rn);
4132     TCGv_i64 tcg_rd = rd_sp ? cpu_reg_sp(s, a->rd) : cpu_reg(s, a->rd);
4133     TCGv_i64 tcg_imm = tcg_constant_i64(a->imm);
4134 
4135     fn(tcg_rd, tcg_rn, tcg_imm);
4136     if (!a->sf) {
4137         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4138     }
4139     return true;
4140 }
4141 
4142 /*
4143  * PC-rel. addressing
4144  */
4145 
4146 static bool trans_ADR(DisasContext *s, arg_ri *a)
4147 {
4148     gen_pc_plus_diff(s, cpu_reg(s, a->rd), a->imm);
4149     return true;
4150 }
4151 
4152 static bool trans_ADRP(DisasContext *s, arg_ri *a)
4153 {
4154     int64_t offset = (int64_t)a->imm << 12;
4155 
4156     /* The page offset is ok for CF_PCREL. */
4157     offset -= s->pc_curr & 0xfff;
4158     gen_pc_plus_diff(s, cpu_reg(s, a->rd), offset);
4159     return true;
4160 }
4161 
4162 /*
4163  * Add/subtract (immediate)
4164  */
4165 TRANS(ADD_i, gen_rri, a, 1, 1, tcg_gen_add_i64)
4166 TRANS(SUB_i, gen_rri, a, 1, 1, tcg_gen_sub_i64)
4167 TRANS(ADDS_i, gen_rri, a, 0, 1, a->sf ? gen_add64_CC : gen_add32_CC)
4168 TRANS(SUBS_i, gen_rri, a, 0, 1, a->sf ? gen_sub64_CC : gen_sub32_CC)
4169 
4170 /*
4171  * Add/subtract (immediate, with tags)
4172  */
4173 
4174 static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a,
4175                                       bool sub_op)
4176 {
4177     TCGv_i64 tcg_rn, tcg_rd;
4178     int imm;
4179 
4180     imm = a->uimm6 << LOG2_TAG_GRANULE;
4181     if (sub_op) {
4182         imm = -imm;
4183     }
4184 
4185     tcg_rn = cpu_reg_sp(s, a->rn);
4186     tcg_rd = cpu_reg_sp(s, a->rd);
4187 
4188     if (s->ata) {
4189         gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
4190                            tcg_constant_i32(imm),
4191                            tcg_constant_i32(a->uimm4));
4192     } else {
4193         tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4194         gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4195     }
4196     return true;
4197 }
4198 
4199 TRANS_FEAT(ADDG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, false)
4200 TRANS_FEAT(SUBG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, true)
4201 
4202 /* The input should be a value in the bottom e bits (with higher
4203  * bits zero); returns that value replicated into every element
4204  * of size e in a 64 bit integer.
4205  */
4206 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4207 {
4208     assert(e != 0);
4209     while (e < 64) {
4210         mask |= mask << e;
4211         e *= 2;
4212     }
4213     return mask;
4214 }
4215 
4216 /*
4217  * Logical (immediate)
4218  */
4219 
4220 /*
4221  * Simplified variant of pseudocode DecodeBitMasks() for the case where we
4222  * only require the wmask. Returns false if the imms/immr/immn are a reserved
4223  * value (ie should cause a guest UNDEF exception), and true if they are
4224  * valid, in which case the decoded bit pattern is written to result.
4225  */
4226 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4227                             unsigned int imms, unsigned int immr)
4228 {
4229     uint64_t mask;
4230     unsigned e, levels, s, r;
4231     int len;
4232 
4233     assert(immn < 2 && imms < 64 && immr < 64);
4234 
4235     /* The bit patterns we create here are 64 bit patterns which
4236      * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4237      * 64 bits each. Each element contains the same value: a run
4238      * of between 1 and e-1 non-zero bits, rotated within the
4239      * element by between 0 and e-1 bits.
4240      *
4241      * The element size and run length are encoded into immn (1 bit)
4242      * and imms (6 bits) as follows:
4243      * 64 bit elements: immn = 1, imms = <length of run - 1>
4244      * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4245      * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4246      *  8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4247      *  4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4248      *  2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4249      * Notice that immn = 0, imms = 11111x is the only combination
4250      * not covered by one of the above options; this is reserved.
4251      * Further, <length of run - 1> all-ones is a reserved pattern.
4252      *
4253      * In all cases the rotation is by immr % e (and immr is 6 bits).
4254      */
4255 
4256     /* First determine the element size */
4257     len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4258     if (len < 1) {
4259         /* This is the immn == 0, imms == 0x11111x case */
4260         return false;
4261     }
4262     e = 1 << len;
4263 
4264     levels = e - 1;
4265     s = imms & levels;
4266     r = immr & levels;
4267 
4268     if (s == levels) {
4269         /* <length of run - 1> mustn't be all-ones. */
4270         return false;
4271     }
4272 
4273     /* Create the value of one element: s+1 set bits rotated
4274      * by r within the element (which is e bits wide)...
4275      */
4276     mask = MAKE_64BIT_MASK(0, s + 1);
4277     if (r) {
4278         mask = (mask >> r) | (mask << (e - r));
4279         mask &= MAKE_64BIT_MASK(0, e);
4280     }
4281     /* ...then replicate the element over the whole 64 bit value */
4282     mask = bitfield_replicate(mask, e);
4283     *result = mask;
4284     return true;
4285 }
4286 
4287 static bool gen_rri_log(DisasContext *s, arg_rri_log *a, bool set_cc,
4288                         void (*fn)(TCGv_i64, TCGv_i64, int64_t))
4289 {
4290     TCGv_i64 tcg_rd, tcg_rn;
4291     uint64_t imm;
4292 
4293     /* Some immediate field values are reserved. */
4294     if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
4295                                 extract32(a->dbm, 0, 6),
4296                                 extract32(a->dbm, 6, 6))) {
4297         return false;
4298     }
4299     if (!a->sf) {
4300         imm &= 0xffffffffull;
4301     }
4302 
4303     tcg_rd = set_cc ? cpu_reg(s, a->rd) : cpu_reg_sp(s, a->rd);
4304     tcg_rn = cpu_reg(s, a->rn);
4305 
4306     fn(tcg_rd, tcg_rn, imm);
4307     if (set_cc) {
4308         gen_logic_CC(a->sf, tcg_rd);
4309     }
4310     if (!a->sf) {
4311         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4312     }
4313     return true;
4314 }
4315 
4316 TRANS(AND_i, gen_rri_log, a, false, tcg_gen_andi_i64)
4317 TRANS(ORR_i, gen_rri_log, a, false, tcg_gen_ori_i64)
4318 TRANS(EOR_i, gen_rri_log, a, false, tcg_gen_xori_i64)
4319 TRANS(ANDS_i, gen_rri_log, a, true, tcg_gen_andi_i64)
4320 
4321 /*
4322  * Move wide (immediate)
4323  */
4324 
4325 static bool trans_MOVZ(DisasContext *s, arg_movw *a)
4326 {
4327     int pos = a->hw << 4;
4328     tcg_gen_movi_i64(cpu_reg(s, a->rd), (uint64_t)a->imm << pos);
4329     return true;
4330 }
4331 
4332 static bool trans_MOVN(DisasContext *s, arg_movw *a)
4333 {
4334     int pos = a->hw << 4;
4335     uint64_t imm = a->imm;
4336 
4337     imm = ~(imm << pos);
4338     if (!a->sf) {
4339         imm = (uint32_t)imm;
4340     }
4341     tcg_gen_movi_i64(cpu_reg(s, a->rd), imm);
4342     return true;
4343 }
4344 
4345 static bool trans_MOVK(DisasContext *s, arg_movw *a)
4346 {
4347     int pos = a->hw << 4;
4348     TCGv_i64 tcg_rd, tcg_im;
4349 
4350     tcg_rd = cpu_reg(s, a->rd);
4351     tcg_im = tcg_constant_i64(a->imm);
4352     tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_im, pos, 16);
4353     if (!a->sf) {
4354         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4355     }
4356     return true;
4357 }
4358 
4359 /*
4360  * Bitfield
4361  */
4362 
4363 static bool trans_SBFM(DisasContext *s, arg_SBFM *a)
4364 {
4365     TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4366     TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4367     unsigned int bitsize = a->sf ? 64 : 32;
4368     unsigned int ri = a->immr;
4369     unsigned int si = a->imms;
4370     unsigned int pos, len;
4371 
4372     if (si >= ri) {
4373         /* Wd<s-r:0> = Wn<s:r> */
4374         len = (si - ri) + 1;
4375         tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4376         if (!a->sf) {
4377             tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4378         }
4379     } else {
4380         /* Wd<32+s-r,32-r> = Wn<s:0> */
4381         len = si + 1;
4382         pos = (bitsize - ri) & (bitsize - 1);
4383 
4384         if (len < ri) {
4385             /*
4386              * Sign extend the destination field from len to fill the
4387              * balance of the word.  Let the deposit below insert all
4388              * of those sign bits.
4389              */
4390             tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4391             len = ri;
4392         }
4393 
4394         /*
4395          * We start with zero, and we haven't modified any bits outside
4396          * bitsize, therefore no final zero-extension is unneeded for !sf.
4397          */
4398         tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4399     }
4400     return true;
4401 }
4402 
4403 static bool trans_UBFM(DisasContext *s, arg_UBFM *a)
4404 {
4405     TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4406     TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4407     unsigned int bitsize = a->sf ? 64 : 32;
4408     unsigned int ri = a->immr;
4409     unsigned int si = a->imms;
4410     unsigned int pos, len;
4411 
4412     tcg_rd = cpu_reg(s, a->rd);
4413     tcg_tmp = read_cpu_reg(s, a->rn, 1);
4414 
4415     if (si >= ri) {
4416         /* Wd<s-r:0> = Wn<s:r> */
4417         len = (si - ri) + 1;
4418         tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4419     } else {
4420         /* Wd<32+s-r,32-r> = Wn<s:0> */
4421         len = si + 1;
4422         pos = (bitsize - ri) & (bitsize - 1);
4423         tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4424     }
4425     return true;
4426 }
4427 
4428 static bool trans_BFM(DisasContext *s, arg_BFM *a)
4429 {
4430     TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4431     TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4432     unsigned int bitsize = a->sf ? 64 : 32;
4433     unsigned int ri = a->immr;
4434     unsigned int si = a->imms;
4435     unsigned int pos, len;
4436 
4437     tcg_rd = cpu_reg(s, a->rd);
4438     tcg_tmp = read_cpu_reg(s, a->rn, 1);
4439 
4440     if (si >= ri) {
4441         /* Wd<s-r:0> = Wn<s:r> */
4442         tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4443         len = (si - ri) + 1;
4444         pos = 0;
4445     } else {
4446         /* Wd<32+s-r,32-r> = Wn<s:0> */
4447         len = si + 1;
4448         pos = (bitsize - ri) & (bitsize - 1);
4449     }
4450 
4451     tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4452     if (!a->sf) {
4453         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4454     }
4455     return true;
4456 }
4457 
4458 static bool trans_EXTR(DisasContext *s, arg_extract *a)
4459 {
4460     TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4461 
4462     tcg_rd = cpu_reg(s, a->rd);
4463 
4464     if (unlikely(a->imm == 0)) {
4465         /*
4466          * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4467          * so an extract from bit 0 is a special case.
4468          */
4469         if (a->sf) {
4470             tcg_gen_mov_i64(tcg_rd, cpu_reg(s, a->rm));
4471         } else {
4472             tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, a->rm));
4473         }
4474     } else {
4475         tcg_rm = cpu_reg(s, a->rm);
4476         tcg_rn = cpu_reg(s, a->rn);
4477 
4478         if (a->sf) {
4479             /* Specialization to ROR happens in EXTRACT2.  */
4480             tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, a->imm);
4481         } else {
4482             TCGv_i32 t0 = tcg_temp_new_i32();
4483 
4484             tcg_gen_extrl_i64_i32(t0, tcg_rm);
4485             if (a->rm == a->rn) {
4486                 tcg_gen_rotri_i32(t0, t0, a->imm);
4487             } else {
4488                 TCGv_i32 t1 = tcg_temp_new_i32();
4489                 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4490                 tcg_gen_extract2_i32(t0, t0, t1, a->imm);
4491             }
4492             tcg_gen_extu_i32_i64(tcg_rd, t0);
4493         }
4494     }
4495     return true;
4496 }
4497 
4498 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4499  * Note that it is the caller's responsibility to ensure that the
4500  * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4501  * mandated semantics for out of range shifts.
4502  */
4503 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4504                       enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4505 {
4506     switch (shift_type) {
4507     case A64_SHIFT_TYPE_LSL:
4508         tcg_gen_shl_i64(dst, src, shift_amount);
4509         break;
4510     case A64_SHIFT_TYPE_LSR:
4511         tcg_gen_shr_i64(dst, src, shift_amount);
4512         break;
4513     case A64_SHIFT_TYPE_ASR:
4514         if (!sf) {
4515             tcg_gen_ext32s_i64(dst, src);
4516         }
4517         tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4518         break;
4519     case A64_SHIFT_TYPE_ROR:
4520         if (sf) {
4521             tcg_gen_rotr_i64(dst, src, shift_amount);
4522         } else {
4523             TCGv_i32 t0, t1;
4524             t0 = tcg_temp_new_i32();
4525             t1 = tcg_temp_new_i32();
4526             tcg_gen_extrl_i64_i32(t0, src);
4527             tcg_gen_extrl_i64_i32(t1, shift_amount);
4528             tcg_gen_rotr_i32(t0, t0, t1);
4529             tcg_gen_extu_i32_i64(dst, t0);
4530         }
4531         break;
4532     default:
4533         assert(FALSE); /* all shift types should be handled */
4534         break;
4535     }
4536 
4537     if (!sf) { /* zero extend final result */
4538         tcg_gen_ext32u_i64(dst, dst);
4539     }
4540 }
4541 
4542 /* Shift a TCGv src by immediate, put result in dst.
4543  * The shift amount must be in range (this should always be true as the
4544  * relevant instructions will UNDEF on bad shift immediates).
4545  */
4546 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4547                           enum a64_shift_type shift_type, unsigned int shift_i)
4548 {
4549     assert(shift_i < (sf ? 64 : 32));
4550 
4551     if (shift_i == 0) {
4552         tcg_gen_mov_i64(dst, src);
4553     } else {
4554         shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
4555     }
4556 }
4557 
4558 /* Logical (shifted register)
4559  *   31  30 29 28       24 23   22 21  20  16 15    10 9    5 4    0
4560  * +----+-----+-----------+-------+---+------+--------+------+------+
4561  * | sf | opc | 0 1 0 1 0 | shift | N |  Rm  |  imm6  |  Rn  |  Rd  |
4562  * +----+-----+-----------+-------+---+------+--------+------+------+
4563  */
4564 static void disas_logic_reg(DisasContext *s, uint32_t insn)
4565 {
4566     TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4567     unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4568 
4569     sf = extract32(insn, 31, 1);
4570     opc = extract32(insn, 29, 2);
4571     shift_type = extract32(insn, 22, 2);
4572     invert = extract32(insn, 21, 1);
4573     rm = extract32(insn, 16, 5);
4574     shift_amount = extract32(insn, 10, 6);
4575     rn = extract32(insn, 5, 5);
4576     rd = extract32(insn, 0, 5);
4577 
4578     if (!sf && (shift_amount & (1 << 5))) {
4579         unallocated_encoding(s);
4580         return;
4581     }
4582 
4583     tcg_rd = cpu_reg(s, rd);
4584 
4585     if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4586         /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4587          * register-register MOV and MVN, so it is worth special casing.
4588          */
4589         tcg_rm = cpu_reg(s, rm);
4590         if (invert) {
4591             tcg_gen_not_i64(tcg_rd, tcg_rm);
4592             if (!sf) {
4593                 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4594             }
4595         } else {
4596             if (sf) {
4597                 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4598             } else {
4599                 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4600             }
4601         }
4602         return;
4603     }
4604 
4605     tcg_rm = read_cpu_reg(s, rm, sf);
4606 
4607     if (shift_amount) {
4608         shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4609     }
4610 
4611     tcg_rn = cpu_reg(s, rn);
4612 
4613     switch (opc | (invert << 2)) {
4614     case 0: /* AND */
4615     case 3: /* ANDS */
4616         tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4617         break;
4618     case 1: /* ORR */
4619         tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4620         break;
4621     case 2: /* EOR */
4622         tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4623         break;
4624     case 4: /* BIC */
4625     case 7: /* BICS */
4626         tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4627         break;
4628     case 5: /* ORN */
4629         tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4630         break;
4631     case 6: /* EON */
4632         tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4633         break;
4634     default:
4635         assert(FALSE);
4636         break;
4637     }
4638 
4639     if (!sf) {
4640         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4641     }
4642 
4643     if (opc == 3) {
4644         gen_logic_CC(sf, tcg_rd);
4645     }
4646 }
4647 
4648 /*
4649  * Add/subtract (extended register)
4650  *
4651  *  31|30|29|28       24|23 22|21|20   16|15  13|12  10|9  5|4  0|
4652  * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4653  * |sf|op| S| 0 1 0 1 1 | opt | 1|  Rm   |option| imm3 | Rn | Rd |
4654  * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4655  *
4656  *  sf: 0 -> 32bit, 1 -> 64bit
4657  *  op: 0 -> add  , 1 -> sub
4658  *   S: 1 -> set flags
4659  * opt: 00
4660  * option: extension type (see DecodeRegExtend)
4661  * imm3: optional shift to Rm
4662  *
4663  * Rd = Rn + LSL(extend(Rm), amount)
4664  */
4665 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4666 {
4667     int rd = extract32(insn, 0, 5);
4668     int rn = extract32(insn, 5, 5);
4669     int imm3 = extract32(insn, 10, 3);
4670     int option = extract32(insn, 13, 3);
4671     int rm = extract32(insn, 16, 5);
4672     int opt = extract32(insn, 22, 2);
4673     bool setflags = extract32(insn, 29, 1);
4674     bool sub_op = extract32(insn, 30, 1);
4675     bool sf = extract32(insn, 31, 1);
4676 
4677     TCGv_i64 tcg_rm, tcg_rn; /* temps */
4678     TCGv_i64 tcg_rd;
4679     TCGv_i64 tcg_result;
4680 
4681     if (imm3 > 4 || opt != 0) {
4682         unallocated_encoding(s);
4683         return;
4684     }
4685 
4686     /* non-flag setting ops may use SP */
4687     if (!setflags) {
4688         tcg_rd = cpu_reg_sp(s, rd);
4689     } else {
4690         tcg_rd = cpu_reg(s, rd);
4691     }
4692     tcg_rn = read_cpu_reg_sp(s, rn, sf);
4693 
4694     tcg_rm = read_cpu_reg(s, rm, sf);
4695     ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4696 
4697     tcg_result = tcg_temp_new_i64();
4698 
4699     if (!setflags) {
4700         if (sub_op) {
4701             tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4702         } else {
4703             tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4704         }
4705     } else {
4706         if (sub_op) {
4707             gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4708         } else {
4709             gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4710         }
4711     }
4712 
4713     if (sf) {
4714         tcg_gen_mov_i64(tcg_rd, tcg_result);
4715     } else {
4716         tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4717     }
4718 }
4719 
4720 /*
4721  * Add/subtract (shifted register)
4722  *
4723  *  31 30 29 28       24 23 22 21 20   16 15     10 9    5 4    0
4724  * +--+--+--+-----------+-----+--+-------+---------+------+------+
4725  * |sf|op| S| 0 1 0 1 1 |shift| 0|  Rm   |  imm6   |  Rn  |  Rd  |
4726  * +--+--+--+-----------+-----+--+-------+---------+------+------+
4727  *
4728  *    sf: 0 -> 32bit, 1 -> 64bit
4729  *    op: 0 -> add  , 1 -> sub
4730  *     S: 1 -> set flags
4731  * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4732  *  imm6: Shift amount to apply to Rm before the add/sub
4733  */
4734 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4735 {
4736     int rd = extract32(insn, 0, 5);
4737     int rn = extract32(insn, 5, 5);
4738     int imm6 = extract32(insn, 10, 6);
4739     int rm = extract32(insn, 16, 5);
4740     int shift_type = extract32(insn, 22, 2);
4741     bool setflags = extract32(insn, 29, 1);
4742     bool sub_op = extract32(insn, 30, 1);
4743     bool sf = extract32(insn, 31, 1);
4744 
4745     TCGv_i64 tcg_rd = cpu_reg(s, rd);
4746     TCGv_i64 tcg_rn, tcg_rm;
4747     TCGv_i64 tcg_result;
4748 
4749     if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4750         unallocated_encoding(s);
4751         return;
4752     }
4753 
4754     tcg_rn = read_cpu_reg(s, rn, sf);
4755     tcg_rm = read_cpu_reg(s, rm, sf);
4756 
4757     shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4758 
4759     tcg_result = tcg_temp_new_i64();
4760 
4761     if (!setflags) {
4762         if (sub_op) {
4763             tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4764         } else {
4765             tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4766         }
4767     } else {
4768         if (sub_op) {
4769             gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4770         } else {
4771             gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4772         }
4773     }
4774 
4775     if (sf) {
4776         tcg_gen_mov_i64(tcg_rd, tcg_result);
4777     } else {
4778         tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4779     }
4780 }
4781 
4782 /* Data-processing (3 source)
4783  *
4784  *    31 30  29 28       24 23 21  20  16  15  14  10 9    5 4    0
4785  *  +--+------+-----------+------+------+----+------+------+------+
4786  *  |sf| op54 | 1 1 0 1 1 | op31 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
4787  *  +--+------+-----------+------+------+----+------+------+------+
4788  */
4789 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4790 {
4791     int rd = extract32(insn, 0, 5);
4792     int rn = extract32(insn, 5, 5);
4793     int ra = extract32(insn, 10, 5);
4794     int rm = extract32(insn, 16, 5);
4795     int op_id = (extract32(insn, 29, 3) << 4) |
4796         (extract32(insn, 21, 3) << 1) |
4797         extract32(insn, 15, 1);
4798     bool sf = extract32(insn, 31, 1);
4799     bool is_sub = extract32(op_id, 0, 1);
4800     bool is_high = extract32(op_id, 2, 1);
4801     bool is_signed = false;
4802     TCGv_i64 tcg_op1;
4803     TCGv_i64 tcg_op2;
4804     TCGv_i64 tcg_tmp;
4805 
4806     /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4807     switch (op_id) {
4808     case 0x42: /* SMADDL */
4809     case 0x43: /* SMSUBL */
4810     case 0x44: /* SMULH */
4811         is_signed = true;
4812         break;
4813     case 0x0: /* MADD (32bit) */
4814     case 0x1: /* MSUB (32bit) */
4815     case 0x40: /* MADD (64bit) */
4816     case 0x41: /* MSUB (64bit) */
4817     case 0x4a: /* UMADDL */
4818     case 0x4b: /* UMSUBL */
4819     case 0x4c: /* UMULH */
4820         break;
4821     default:
4822         unallocated_encoding(s);
4823         return;
4824     }
4825 
4826     if (is_high) {
4827         TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
4828         TCGv_i64 tcg_rd = cpu_reg(s, rd);
4829         TCGv_i64 tcg_rn = cpu_reg(s, rn);
4830         TCGv_i64 tcg_rm = cpu_reg(s, rm);
4831 
4832         if (is_signed) {
4833             tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4834         } else {
4835             tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4836         }
4837         return;
4838     }
4839 
4840     tcg_op1 = tcg_temp_new_i64();
4841     tcg_op2 = tcg_temp_new_i64();
4842     tcg_tmp = tcg_temp_new_i64();
4843 
4844     if (op_id < 0x42) {
4845         tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4846         tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4847     } else {
4848         if (is_signed) {
4849             tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4850             tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4851         } else {
4852             tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4853             tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4854         }
4855     }
4856 
4857     if (ra == 31 && !is_sub) {
4858         /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4859         tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4860     } else {
4861         tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4862         if (is_sub) {
4863             tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4864         } else {
4865             tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4866         }
4867     }
4868 
4869     if (!sf) {
4870         tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4871     }
4872 }
4873 
4874 /* Add/subtract (with carry)
4875  *  31 30 29 28 27 26 25 24 23 22 21  20  16  15       10  9    5 4   0
4876  * +--+--+--+------------------------+------+-------------+------+-----+
4877  * |sf|op| S| 1  1  0  1  0  0  0  0 |  rm  | 0 0 0 0 0 0 |  Rn  |  Rd |
4878  * +--+--+--+------------------------+------+-------------+------+-----+
4879  */
4880 
4881 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4882 {
4883     unsigned int sf, op, setflags, rm, rn, rd;
4884     TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4885 
4886     sf = extract32(insn, 31, 1);
4887     op = extract32(insn, 30, 1);
4888     setflags = extract32(insn, 29, 1);
4889     rm = extract32(insn, 16, 5);
4890     rn = extract32(insn, 5, 5);
4891     rd = extract32(insn, 0, 5);
4892 
4893     tcg_rd = cpu_reg(s, rd);
4894     tcg_rn = cpu_reg(s, rn);
4895 
4896     if (op) {
4897         tcg_y = tcg_temp_new_i64();
4898         tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4899     } else {
4900         tcg_y = cpu_reg(s, rm);
4901     }
4902 
4903     if (setflags) {
4904         gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4905     } else {
4906         gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4907     }
4908 }
4909 
4910 /*
4911  * Rotate right into flags
4912  *  31 30 29                21       15          10      5  4      0
4913  * +--+--+--+-----------------+--------+-----------+------+--+------+
4914  * |sf|op| S| 1 1 0 1 0 0 0 0 |  imm6  | 0 0 0 0 1 |  Rn  |o2| mask |
4915  * +--+--+--+-----------------+--------+-----------+------+--+------+
4916  */
4917 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
4918 {
4919     int mask = extract32(insn, 0, 4);
4920     int o2 = extract32(insn, 4, 1);
4921     int rn = extract32(insn, 5, 5);
4922     int imm6 = extract32(insn, 15, 6);
4923     int sf_op_s = extract32(insn, 29, 3);
4924     TCGv_i64 tcg_rn;
4925     TCGv_i32 nzcv;
4926 
4927     if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
4928         unallocated_encoding(s);
4929         return;
4930     }
4931 
4932     tcg_rn = read_cpu_reg(s, rn, 1);
4933     tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
4934 
4935     nzcv = tcg_temp_new_i32();
4936     tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
4937 
4938     if (mask & 8) { /* N */
4939         tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
4940     }
4941     if (mask & 4) { /* Z */
4942         tcg_gen_not_i32(cpu_ZF, nzcv);
4943         tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
4944     }
4945     if (mask & 2) { /* C */
4946         tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
4947     }
4948     if (mask & 1) { /* V */
4949         tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
4950     }
4951 }
4952 
4953 /*
4954  * Evaluate into flags
4955  *  31 30 29                21        15   14        10      5  4      0
4956  * +--+--+--+-----------------+---------+----+---------+------+--+------+
4957  * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 |  Rn  |o3| mask |
4958  * +--+--+--+-----------------+---------+----+---------+------+--+------+
4959  */
4960 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
4961 {
4962     int o3_mask = extract32(insn, 0, 5);
4963     int rn = extract32(insn, 5, 5);
4964     int o2 = extract32(insn, 15, 6);
4965     int sz = extract32(insn, 14, 1);
4966     int sf_op_s = extract32(insn, 29, 3);
4967     TCGv_i32 tmp;
4968     int shift;
4969 
4970     if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
4971         !dc_isar_feature(aa64_condm_4, s)) {
4972         unallocated_encoding(s);
4973         return;
4974     }
4975     shift = sz ? 16 : 24;  /* SETF16 or SETF8 */
4976 
4977     tmp = tcg_temp_new_i32();
4978     tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
4979     tcg_gen_shli_i32(cpu_NF, tmp, shift);
4980     tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
4981     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
4982     tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
4983 }
4984 
4985 /* Conditional compare (immediate / register)
4986  *  31 30 29 28 27 26 25 24 23 22 21  20    16 15  12  11  10  9   5  4 3   0
4987  * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4988  * |sf|op| S| 1  1  0  1  0  0  1  0 |imm5/rm | cond |i/r |o2|  Rn  |o3|nzcv |
4989  * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4990  *        [1]                             y                [0]       [0]
4991  */
4992 static void disas_cc(DisasContext *s, uint32_t insn)
4993 {
4994     unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4995     TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4996     TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4997     DisasCompare c;
4998 
4999     if (!extract32(insn, 29, 1)) {
5000         unallocated_encoding(s);
5001         return;
5002     }
5003     if (insn & (1 << 10 | 1 << 4)) {
5004         unallocated_encoding(s);
5005         return;
5006     }
5007     sf = extract32(insn, 31, 1);
5008     op = extract32(insn, 30, 1);
5009     is_imm = extract32(insn, 11, 1);
5010     y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
5011     cond = extract32(insn, 12, 4);
5012     rn = extract32(insn, 5, 5);
5013     nzcv = extract32(insn, 0, 4);
5014 
5015     /* Set T0 = !COND.  */
5016     tcg_t0 = tcg_temp_new_i32();
5017     arm_test_cc(&c, cond);
5018     tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
5019 
5020     /* Load the arguments for the new comparison.  */
5021     if (is_imm) {
5022         tcg_y = tcg_temp_new_i64();
5023         tcg_gen_movi_i64(tcg_y, y);
5024     } else {
5025         tcg_y = cpu_reg(s, y);
5026     }
5027     tcg_rn = cpu_reg(s, rn);
5028 
5029     /* Set the flags for the new comparison.  */
5030     tcg_tmp = tcg_temp_new_i64();
5031     if (op) {
5032         gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5033     } else {
5034         gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
5035     }
5036 
5037     /* If COND was false, force the flags to #nzcv.  Compute two masks
5038      * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
5039      * For tcg hosts that support ANDC, we can make do with just T1.
5040      * In either case, allow the tcg optimizer to delete any unused mask.
5041      */
5042     tcg_t1 = tcg_temp_new_i32();
5043     tcg_t2 = tcg_temp_new_i32();
5044     tcg_gen_neg_i32(tcg_t1, tcg_t0);
5045     tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
5046 
5047     if (nzcv & 8) { /* N */
5048         tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
5049     } else {
5050         if (TCG_TARGET_HAS_andc_i32) {
5051             tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
5052         } else {
5053             tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
5054         }
5055     }
5056     if (nzcv & 4) { /* Z */
5057         if (TCG_TARGET_HAS_andc_i32) {
5058             tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
5059         } else {
5060             tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
5061         }
5062     } else {
5063         tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
5064     }
5065     if (nzcv & 2) { /* C */
5066         tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
5067     } else {
5068         if (TCG_TARGET_HAS_andc_i32) {
5069             tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
5070         } else {
5071             tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
5072         }
5073     }
5074     if (nzcv & 1) { /* V */
5075         tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
5076     } else {
5077         if (TCG_TARGET_HAS_andc_i32) {
5078             tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
5079         } else {
5080             tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
5081         }
5082     }
5083 }
5084 
5085 /* Conditional select
5086  *   31   30  29  28             21 20  16 15  12 11 10 9    5 4    0
5087  * +----+----+---+-----------------+------+------+-----+------+------+
5088  * | sf | op | S | 1 1 0 1 0 1 0 0 |  Rm  | cond | op2 |  Rn  |  Rd  |
5089  * +----+----+---+-----------------+------+------+-----+------+------+
5090  */
5091 static void disas_cond_select(DisasContext *s, uint32_t insn)
5092 {
5093     unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
5094     TCGv_i64 tcg_rd, zero;
5095     DisasCompare64 c;
5096 
5097     if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
5098         /* S == 1 or op2<1> == 1 */
5099         unallocated_encoding(s);
5100         return;
5101     }
5102     sf = extract32(insn, 31, 1);
5103     else_inv = extract32(insn, 30, 1);
5104     rm = extract32(insn, 16, 5);
5105     cond = extract32(insn, 12, 4);
5106     else_inc = extract32(insn, 10, 1);
5107     rn = extract32(insn, 5, 5);
5108     rd = extract32(insn, 0, 5);
5109 
5110     tcg_rd = cpu_reg(s, rd);
5111 
5112     a64_test_cc(&c, cond);
5113     zero = tcg_constant_i64(0);
5114 
5115     if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
5116         /* CSET & CSETM.  */
5117         tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
5118         if (else_inv) {
5119             tcg_gen_neg_i64(tcg_rd, tcg_rd);
5120         }
5121     } else {
5122         TCGv_i64 t_true = cpu_reg(s, rn);
5123         TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
5124         if (else_inv && else_inc) {
5125             tcg_gen_neg_i64(t_false, t_false);
5126         } else if (else_inv) {
5127             tcg_gen_not_i64(t_false, t_false);
5128         } else if (else_inc) {
5129             tcg_gen_addi_i64(t_false, t_false, 1);
5130         }
5131         tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
5132     }
5133 
5134     if (!sf) {
5135         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5136     }
5137 }
5138 
5139 static void handle_clz(DisasContext *s, unsigned int sf,
5140                        unsigned int rn, unsigned int rd)
5141 {
5142     TCGv_i64 tcg_rd, tcg_rn;
5143     tcg_rd = cpu_reg(s, rd);
5144     tcg_rn = cpu_reg(s, rn);
5145 
5146     if (sf) {
5147         tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
5148     } else {
5149         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5150         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5151         tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
5152         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5153     }
5154 }
5155 
5156 static void handle_cls(DisasContext *s, unsigned int sf,
5157                        unsigned int rn, unsigned int rd)
5158 {
5159     TCGv_i64 tcg_rd, tcg_rn;
5160     tcg_rd = cpu_reg(s, rd);
5161     tcg_rn = cpu_reg(s, rn);
5162 
5163     if (sf) {
5164         tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
5165     } else {
5166         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5167         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5168         tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
5169         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5170     }
5171 }
5172 
5173 static void handle_rbit(DisasContext *s, unsigned int sf,
5174                         unsigned int rn, unsigned int rd)
5175 {
5176     TCGv_i64 tcg_rd, tcg_rn;
5177     tcg_rd = cpu_reg(s, rd);
5178     tcg_rn = cpu_reg(s, rn);
5179 
5180     if (sf) {
5181         gen_helper_rbit64(tcg_rd, tcg_rn);
5182     } else {
5183         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
5184         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
5185         gen_helper_rbit(tcg_tmp32, tcg_tmp32);
5186         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
5187     }
5188 }
5189 
5190 /* REV with sf==1, opcode==3 ("REV64") */
5191 static void handle_rev64(DisasContext *s, unsigned int sf,
5192                          unsigned int rn, unsigned int rd)
5193 {
5194     if (!sf) {
5195         unallocated_encoding(s);
5196         return;
5197     }
5198     tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5199 }
5200 
5201 /* REV with sf==0, opcode==2
5202  * REV32 (sf==1, opcode==2)
5203  */
5204 static void handle_rev32(DisasContext *s, unsigned int sf,
5205                          unsigned int rn, unsigned int rd)
5206 {
5207     TCGv_i64 tcg_rd = cpu_reg(s, rd);
5208     TCGv_i64 tcg_rn = cpu_reg(s, rn);
5209 
5210     if (sf) {
5211         tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
5212         tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
5213     } else {
5214         tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
5215     }
5216 }
5217 
5218 /* REV16 (opcode==1) */
5219 static void handle_rev16(DisasContext *s, unsigned int sf,
5220                          unsigned int rn, unsigned int rd)
5221 {
5222     TCGv_i64 tcg_rd = cpu_reg(s, rd);
5223     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5224     TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5225     TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5226 
5227     tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5228     tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5229     tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5230     tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5231     tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5232 }
5233 
5234 /* Data-processing (1 source)
5235  *   31  30  29  28             21 20     16 15    10 9    5 4    0
5236  * +----+---+---+-----------------+---------+--------+------+------+
5237  * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode |  Rn  |  Rd  |
5238  * +----+---+---+-----------------+---------+--------+------+------+
5239  */
5240 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5241 {
5242     unsigned int sf, opcode, opcode2, rn, rd;
5243     TCGv_i64 tcg_rd;
5244 
5245     if (extract32(insn, 29, 1)) {
5246         unallocated_encoding(s);
5247         return;
5248     }
5249 
5250     sf = extract32(insn, 31, 1);
5251     opcode = extract32(insn, 10, 6);
5252     opcode2 = extract32(insn, 16, 5);
5253     rn = extract32(insn, 5, 5);
5254     rd = extract32(insn, 0, 5);
5255 
5256 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5257 
5258     switch (MAP(sf, opcode2, opcode)) {
5259     case MAP(0, 0x00, 0x00): /* RBIT */
5260     case MAP(1, 0x00, 0x00):
5261         handle_rbit(s, sf, rn, rd);
5262         break;
5263     case MAP(0, 0x00, 0x01): /* REV16 */
5264     case MAP(1, 0x00, 0x01):
5265         handle_rev16(s, sf, rn, rd);
5266         break;
5267     case MAP(0, 0x00, 0x02): /* REV/REV32 */
5268     case MAP(1, 0x00, 0x02):
5269         handle_rev32(s, sf, rn, rd);
5270         break;
5271     case MAP(1, 0x00, 0x03): /* REV64 */
5272         handle_rev64(s, sf, rn, rd);
5273         break;
5274     case MAP(0, 0x00, 0x04): /* CLZ */
5275     case MAP(1, 0x00, 0x04):
5276         handle_clz(s, sf, rn, rd);
5277         break;
5278     case MAP(0, 0x00, 0x05): /* CLS */
5279     case MAP(1, 0x00, 0x05):
5280         handle_cls(s, sf, rn, rd);
5281         break;
5282     case MAP(1, 0x01, 0x00): /* PACIA */
5283         if (s->pauth_active) {
5284             tcg_rd = cpu_reg(s, rd);
5285             gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5286         } else if (!dc_isar_feature(aa64_pauth, s)) {
5287             goto do_unallocated;
5288         }
5289         break;
5290     case MAP(1, 0x01, 0x01): /* PACIB */
5291         if (s->pauth_active) {
5292             tcg_rd = cpu_reg(s, rd);
5293             gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5294         } else if (!dc_isar_feature(aa64_pauth, s)) {
5295             goto do_unallocated;
5296         }
5297         break;
5298     case MAP(1, 0x01, 0x02): /* PACDA */
5299         if (s->pauth_active) {
5300             tcg_rd = cpu_reg(s, rd);
5301             gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5302         } else if (!dc_isar_feature(aa64_pauth, s)) {
5303             goto do_unallocated;
5304         }
5305         break;
5306     case MAP(1, 0x01, 0x03): /* PACDB */
5307         if (s->pauth_active) {
5308             tcg_rd = cpu_reg(s, rd);
5309             gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5310         } else if (!dc_isar_feature(aa64_pauth, s)) {
5311             goto do_unallocated;
5312         }
5313         break;
5314     case MAP(1, 0x01, 0x04): /* AUTIA */
5315         if (s->pauth_active) {
5316             tcg_rd = cpu_reg(s, rd);
5317             gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5318         } else if (!dc_isar_feature(aa64_pauth, s)) {
5319             goto do_unallocated;
5320         }
5321         break;
5322     case MAP(1, 0x01, 0x05): /* AUTIB */
5323         if (s->pauth_active) {
5324             tcg_rd = cpu_reg(s, rd);
5325             gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5326         } else if (!dc_isar_feature(aa64_pauth, s)) {
5327             goto do_unallocated;
5328         }
5329         break;
5330     case MAP(1, 0x01, 0x06): /* AUTDA */
5331         if (s->pauth_active) {
5332             tcg_rd = cpu_reg(s, rd);
5333             gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5334         } else if (!dc_isar_feature(aa64_pauth, s)) {
5335             goto do_unallocated;
5336         }
5337         break;
5338     case MAP(1, 0x01, 0x07): /* AUTDB */
5339         if (s->pauth_active) {
5340             tcg_rd = cpu_reg(s, rd);
5341             gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5342         } else if (!dc_isar_feature(aa64_pauth, s)) {
5343             goto do_unallocated;
5344         }
5345         break;
5346     case MAP(1, 0x01, 0x08): /* PACIZA */
5347         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5348             goto do_unallocated;
5349         } else if (s->pauth_active) {
5350             tcg_rd = cpu_reg(s, rd);
5351             gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5352         }
5353         break;
5354     case MAP(1, 0x01, 0x09): /* PACIZB */
5355         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5356             goto do_unallocated;
5357         } else if (s->pauth_active) {
5358             tcg_rd = cpu_reg(s, rd);
5359             gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5360         }
5361         break;
5362     case MAP(1, 0x01, 0x0a): /* PACDZA */
5363         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5364             goto do_unallocated;
5365         } else if (s->pauth_active) {
5366             tcg_rd = cpu_reg(s, rd);
5367             gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5368         }
5369         break;
5370     case MAP(1, 0x01, 0x0b): /* PACDZB */
5371         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5372             goto do_unallocated;
5373         } else if (s->pauth_active) {
5374             tcg_rd = cpu_reg(s, rd);
5375             gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5376         }
5377         break;
5378     case MAP(1, 0x01, 0x0c): /* AUTIZA */
5379         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5380             goto do_unallocated;
5381         } else if (s->pauth_active) {
5382             tcg_rd = cpu_reg(s, rd);
5383             gen_helper_autia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5384         }
5385         break;
5386     case MAP(1, 0x01, 0x0d): /* AUTIZB */
5387         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5388             goto do_unallocated;
5389         } else if (s->pauth_active) {
5390             tcg_rd = cpu_reg(s, rd);
5391             gen_helper_autib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5392         }
5393         break;
5394     case MAP(1, 0x01, 0x0e): /* AUTDZA */
5395         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5396             goto do_unallocated;
5397         } else if (s->pauth_active) {
5398             tcg_rd = cpu_reg(s, rd);
5399             gen_helper_autda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5400         }
5401         break;
5402     case MAP(1, 0x01, 0x0f): /* AUTDZB */
5403         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5404             goto do_unallocated;
5405         } else if (s->pauth_active) {
5406             tcg_rd = cpu_reg(s, rd);
5407             gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5408         }
5409         break;
5410     case MAP(1, 0x01, 0x10): /* XPACI */
5411         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5412             goto do_unallocated;
5413         } else if (s->pauth_active) {
5414             tcg_rd = cpu_reg(s, rd);
5415             gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5416         }
5417         break;
5418     case MAP(1, 0x01, 0x11): /* XPACD */
5419         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5420             goto do_unallocated;
5421         } else if (s->pauth_active) {
5422             tcg_rd = cpu_reg(s, rd);
5423             gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5424         }
5425         break;
5426     default:
5427     do_unallocated:
5428         unallocated_encoding(s);
5429         break;
5430     }
5431 
5432 #undef MAP
5433 }
5434 
5435 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5436                        unsigned int rm, unsigned int rn, unsigned int rd)
5437 {
5438     TCGv_i64 tcg_n, tcg_m, tcg_rd;
5439     tcg_rd = cpu_reg(s, rd);
5440 
5441     if (!sf && is_signed) {
5442         tcg_n = tcg_temp_new_i64();
5443         tcg_m = tcg_temp_new_i64();
5444         tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5445         tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5446     } else {
5447         tcg_n = read_cpu_reg(s, rn, sf);
5448         tcg_m = read_cpu_reg(s, rm, sf);
5449     }
5450 
5451     if (is_signed) {
5452         gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5453     } else {
5454         gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5455     }
5456 
5457     if (!sf) { /* zero extend final result */
5458         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5459     }
5460 }
5461 
5462 /* LSLV, LSRV, ASRV, RORV */
5463 static void handle_shift_reg(DisasContext *s,
5464                              enum a64_shift_type shift_type, unsigned int sf,
5465                              unsigned int rm, unsigned int rn, unsigned int rd)
5466 {
5467     TCGv_i64 tcg_shift = tcg_temp_new_i64();
5468     TCGv_i64 tcg_rd = cpu_reg(s, rd);
5469     TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5470 
5471     tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5472     shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5473 }
5474 
5475 /* CRC32[BHWX], CRC32C[BHWX] */
5476 static void handle_crc32(DisasContext *s,
5477                          unsigned int sf, unsigned int sz, bool crc32c,
5478                          unsigned int rm, unsigned int rn, unsigned int rd)
5479 {
5480     TCGv_i64 tcg_acc, tcg_val;
5481     TCGv_i32 tcg_bytes;
5482 
5483     if (!dc_isar_feature(aa64_crc32, s)
5484         || (sf == 1 && sz != 3)
5485         || (sf == 0 && sz == 3)) {
5486         unallocated_encoding(s);
5487         return;
5488     }
5489 
5490     if (sz == 3) {
5491         tcg_val = cpu_reg(s, rm);
5492     } else {
5493         uint64_t mask;
5494         switch (sz) {
5495         case 0:
5496             mask = 0xFF;
5497             break;
5498         case 1:
5499             mask = 0xFFFF;
5500             break;
5501         case 2:
5502             mask = 0xFFFFFFFF;
5503             break;
5504         default:
5505             g_assert_not_reached();
5506         }
5507         tcg_val = tcg_temp_new_i64();
5508         tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5509     }
5510 
5511     tcg_acc = cpu_reg(s, rn);
5512     tcg_bytes = tcg_constant_i32(1 << sz);
5513 
5514     if (crc32c) {
5515         gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5516     } else {
5517         gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5518     }
5519 }
5520 
5521 /* Data-processing (2 source)
5522  *   31   30  29 28             21 20  16 15    10 9    5 4    0
5523  * +----+---+---+-----------------+------+--------+------+------+
5524  * | sf | 0 | S | 1 1 0 1 0 1 1 0 |  Rm  | opcode |  Rn  |  Rd  |
5525  * +----+---+---+-----------------+------+--------+------+------+
5526  */
5527 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5528 {
5529     unsigned int sf, rm, opcode, rn, rd, setflag;
5530     sf = extract32(insn, 31, 1);
5531     setflag = extract32(insn, 29, 1);
5532     rm = extract32(insn, 16, 5);
5533     opcode = extract32(insn, 10, 6);
5534     rn = extract32(insn, 5, 5);
5535     rd = extract32(insn, 0, 5);
5536 
5537     if (setflag && opcode != 0) {
5538         unallocated_encoding(s);
5539         return;
5540     }
5541 
5542     switch (opcode) {
5543     case 0: /* SUBP(S) */
5544         if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5545             goto do_unallocated;
5546         } else {
5547             TCGv_i64 tcg_n, tcg_m, tcg_d;
5548 
5549             tcg_n = read_cpu_reg_sp(s, rn, true);
5550             tcg_m = read_cpu_reg_sp(s, rm, true);
5551             tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5552             tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5553             tcg_d = cpu_reg(s, rd);
5554 
5555             if (setflag) {
5556                 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5557             } else {
5558                 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5559             }
5560         }
5561         break;
5562     case 2: /* UDIV */
5563         handle_div(s, false, sf, rm, rn, rd);
5564         break;
5565     case 3: /* SDIV */
5566         handle_div(s, true, sf, rm, rn, rd);
5567         break;
5568     case 4: /* IRG */
5569         if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5570             goto do_unallocated;
5571         }
5572         if (s->ata) {
5573             gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5574                            cpu_reg_sp(s, rn), cpu_reg(s, rm));
5575         } else {
5576             gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5577                                              cpu_reg_sp(s, rn));
5578         }
5579         break;
5580     case 5: /* GMI */
5581         if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5582             goto do_unallocated;
5583         } else {
5584             TCGv_i64 t = tcg_temp_new_i64();
5585 
5586             tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4);
5587             tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
5588             tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t);
5589         }
5590         break;
5591     case 8: /* LSLV */
5592         handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5593         break;
5594     case 9: /* LSRV */
5595         handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5596         break;
5597     case 10: /* ASRV */
5598         handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5599         break;
5600     case 11: /* RORV */
5601         handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5602         break;
5603     case 12: /* PACGA */
5604         if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5605             goto do_unallocated;
5606         }
5607         gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5608                          cpu_reg(s, rn), cpu_reg_sp(s, rm));
5609         break;
5610     case 16:
5611     case 17:
5612     case 18:
5613     case 19:
5614     case 20:
5615     case 21:
5616     case 22:
5617     case 23: /* CRC32 */
5618     {
5619         int sz = extract32(opcode, 0, 2);
5620         bool crc32c = extract32(opcode, 2, 1);
5621         handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5622         break;
5623     }
5624     default:
5625     do_unallocated:
5626         unallocated_encoding(s);
5627         break;
5628     }
5629 }
5630 
5631 /*
5632  * Data processing - register
5633  *  31  30 29  28      25    21  20  16      10         0
5634  * +--+---+--+---+-------+-----+-------+-------+---------+
5635  * |  |op0|  |op1| 1 0 1 | op2 |       |  op3  |         |
5636  * +--+---+--+---+-------+-----+-------+-------+---------+
5637  */
5638 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5639 {
5640     int op0 = extract32(insn, 30, 1);
5641     int op1 = extract32(insn, 28, 1);
5642     int op2 = extract32(insn, 21, 4);
5643     int op3 = extract32(insn, 10, 6);
5644 
5645     if (!op1) {
5646         if (op2 & 8) {
5647             if (op2 & 1) {
5648                 /* Add/sub (extended register) */
5649                 disas_add_sub_ext_reg(s, insn);
5650             } else {
5651                 /* Add/sub (shifted register) */
5652                 disas_add_sub_reg(s, insn);
5653             }
5654         } else {
5655             /* Logical (shifted register) */
5656             disas_logic_reg(s, insn);
5657         }
5658         return;
5659     }
5660 
5661     switch (op2) {
5662     case 0x0:
5663         switch (op3) {
5664         case 0x00: /* Add/subtract (with carry) */
5665             disas_adc_sbc(s, insn);
5666             break;
5667 
5668         case 0x01: /* Rotate right into flags */
5669         case 0x21:
5670             disas_rotate_right_into_flags(s, insn);
5671             break;
5672 
5673         case 0x02: /* Evaluate into flags */
5674         case 0x12:
5675         case 0x22:
5676         case 0x32:
5677             disas_evaluate_into_flags(s, insn);
5678             break;
5679 
5680         default:
5681             goto do_unallocated;
5682         }
5683         break;
5684 
5685     case 0x2: /* Conditional compare */
5686         disas_cc(s, insn); /* both imm and reg forms */
5687         break;
5688 
5689     case 0x4: /* Conditional select */
5690         disas_cond_select(s, insn);
5691         break;
5692 
5693     case 0x6: /* Data-processing */
5694         if (op0) {    /* (1 source) */
5695             disas_data_proc_1src(s, insn);
5696         } else {      /* (2 source) */
5697             disas_data_proc_2src(s, insn);
5698         }
5699         break;
5700     case 0x8 ... 0xf: /* (3 source) */
5701         disas_data_proc_3src(s, insn);
5702         break;
5703 
5704     default:
5705     do_unallocated:
5706         unallocated_encoding(s);
5707         break;
5708     }
5709 }
5710 
5711 static void handle_fp_compare(DisasContext *s, int size,
5712                               unsigned int rn, unsigned int rm,
5713                               bool cmp_with_zero, bool signal_all_nans)
5714 {
5715     TCGv_i64 tcg_flags = tcg_temp_new_i64();
5716     TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
5717 
5718     if (size == MO_64) {
5719         TCGv_i64 tcg_vn, tcg_vm;
5720 
5721         tcg_vn = read_fp_dreg(s, rn);
5722         if (cmp_with_zero) {
5723             tcg_vm = tcg_constant_i64(0);
5724         } else {
5725             tcg_vm = read_fp_dreg(s, rm);
5726         }
5727         if (signal_all_nans) {
5728             gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5729         } else {
5730             gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5731         }
5732     } else {
5733         TCGv_i32 tcg_vn = tcg_temp_new_i32();
5734         TCGv_i32 tcg_vm = tcg_temp_new_i32();
5735 
5736         read_vec_element_i32(s, tcg_vn, rn, 0, size);
5737         if (cmp_with_zero) {
5738             tcg_gen_movi_i32(tcg_vm, 0);
5739         } else {
5740             read_vec_element_i32(s, tcg_vm, rm, 0, size);
5741         }
5742 
5743         switch (size) {
5744         case MO_32:
5745             if (signal_all_nans) {
5746                 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5747             } else {
5748                 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5749             }
5750             break;
5751         case MO_16:
5752             if (signal_all_nans) {
5753                 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5754             } else {
5755                 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5756             }
5757             break;
5758         default:
5759             g_assert_not_reached();
5760         }
5761     }
5762 
5763     gen_set_nzcv(tcg_flags);
5764 }
5765 
5766 /* Floating point compare
5767  *   31  30  29 28       24 23  22  21 20  16 15 14 13  10    9    5 4     0
5768  * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5769  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | op  | 1 0 0 0 |  Rn  |  op2  |
5770  * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5771  */
5772 static void disas_fp_compare(DisasContext *s, uint32_t insn)
5773 {
5774     unsigned int mos, type, rm, op, rn, opc, op2r;
5775     int size;
5776 
5777     mos = extract32(insn, 29, 3);
5778     type = extract32(insn, 22, 2);
5779     rm = extract32(insn, 16, 5);
5780     op = extract32(insn, 14, 2);
5781     rn = extract32(insn, 5, 5);
5782     opc = extract32(insn, 3, 2);
5783     op2r = extract32(insn, 0, 3);
5784 
5785     if (mos || op || op2r) {
5786         unallocated_encoding(s);
5787         return;
5788     }
5789 
5790     switch (type) {
5791     case 0:
5792         size = MO_32;
5793         break;
5794     case 1:
5795         size = MO_64;
5796         break;
5797     case 3:
5798         size = MO_16;
5799         if (dc_isar_feature(aa64_fp16, s)) {
5800             break;
5801         }
5802         /* fallthru */
5803     default:
5804         unallocated_encoding(s);
5805         return;
5806     }
5807 
5808     if (!fp_access_check(s)) {
5809         return;
5810     }
5811 
5812     handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
5813 }
5814 
5815 /* Floating point conditional compare
5816  *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5  4   3    0
5817  * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5818  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 0 1 |  Rn  | op | nzcv |
5819  * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5820  */
5821 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
5822 {
5823     unsigned int mos, type, rm, cond, rn, op, nzcv;
5824     TCGLabel *label_continue = NULL;
5825     int size;
5826 
5827     mos = extract32(insn, 29, 3);
5828     type = extract32(insn, 22, 2);
5829     rm = extract32(insn, 16, 5);
5830     cond = extract32(insn, 12, 4);
5831     rn = extract32(insn, 5, 5);
5832     op = extract32(insn, 4, 1);
5833     nzcv = extract32(insn, 0, 4);
5834 
5835     if (mos) {
5836         unallocated_encoding(s);
5837         return;
5838     }
5839 
5840     switch (type) {
5841     case 0:
5842         size = MO_32;
5843         break;
5844     case 1:
5845         size = MO_64;
5846         break;
5847     case 3:
5848         size = MO_16;
5849         if (dc_isar_feature(aa64_fp16, s)) {
5850             break;
5851         }
5852         /* fallthru */
5853     default:
5854         unallocated_encoding(s);
5855         return;
5856     }
5857 
5858     if (!fp_access_check(s)) {
5859         return;
5860     }
5861 
5862     if (cond < 0x0e) { /* not always */
5863         TCGLabel *label_match = gen_new_label();
5864         label_continue = gen_new_label();
5865         arm_gen_test_cc(cond, label_match);
5866         /* nomatch: */
5867         gen_set_nzcv(tcg_constant_i64(nzcv << 28));
5868         tcg_gen_br(label_continue);
5869         gen_set_label(label_match);
5870     }
5871 
5872     handle_fp_compare(s, size, rn, rm, false, op);
5873 
5874     if (cond < 0x0e) {
5875         gen_set_label(label_continue);
5876     }
5877 }
5878 
5879 /* Floating point conditional select
5880  *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5 4    0
5881  * +---+---+---+-----------+------+---+------+------+-----+------+------+
5882  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 1 1 |  Rn  |  Rd  |
5883  * +---+---+---+-----------+------+---+------+------+-----+------+------+
5884  */
5885 static void disas_fp_csel(DisasContext *s, uint32_t insn)
5886 {
5887     unsigned int mos, type, rm, cond, rn, rd;
5888     TCGv_i64 t_true, t_false;
5889     DisasCompare64 c;
5890     MemOp sz;
5891 
5892     mos = extract32(insn, 29, 3);
5893     type = extract32(insn, 22, 2);
5894     rm = extract32(insn, 16, 5);
5895     cond = extract32(insn, 12, 4);
5896     rn = extract32(insn, 5, 5);
5897     rd = extract32(insn, 0, 5);
5898 
5899     if (mos) {
5900         unallocated_encoding(s);
5901         return;
5902     }
5903 
5904     switch (type) {
5905     case 0:
5906         sz = MO_32;
5907         break;
5908     case 1:
5909         sz = MO_64;
5910         break;
5911     case 3:
5912         sz = MO_16;
5913         if (dc_isar_feature(aa64_fp16, s)) {
5914             break;
5915         }
5916         /* fallthru */
5917     default:
5918         unallocated_encoding(s);
5919         return;
5920     }
5921 
5922     if (!fp_access_check(s)) {
5923         return;
5924     }
5925 
5926     /* Zero extend sreg & hreg inputs to 64 bits now.  */
5927     t_true = tcg_temp_new_i64();
5928     t_false = tcg_temp_new_i64();
5929     read_vec_element(s, t_true, rn, 0, sz);
5930     read_vec_element(s, t_false, rm, 0, sz);
5931 
5932     a64_test_cc(&c, cond);
5933     tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
5934                         t_true, t_false);
5935 
5936     /* Note that sregs & hregs write back zeros to the high bits,
5937        and we've already done the zero-extension.  */
5938     write_fp_dreg(s, rd, t_true);
5939 }
5940 
5941 /* Floating-point data-processing (1 source) - half precision */
5942 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
5943 {
5944     TCGv_ptr fpst = NULL;
5945     TCGv_i32 tcg_op = read_fp_hreg(s, rn);
5946     TCGv_i32 tcg_res = tcg_temp_new_i32();
5947 
5948     switch (opcode) {
5949     case 0x0: /* FMOV */
5950         tcg_gen_mov_i32(tcg_res, tcg_op);
5951         break;
5952     case 0x1: /* FABS */
5953         tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
5954         break;
5955     case 0x2: /* FNEG */
5956         tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
5957         break;
5958     case 0x3: /* FSQRT */
5959         fpst = fpstatus_ptr(FPST_FPCR_F16);
5960         gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
5961         break;
5962     case 0x8: /* FRINTN */
5963     case 0x9: /* FRINTP */
5964     case 0xa: /* FRINTM */
5965     case 0xb: /* FRINTZ */
5966     case 0xc: /* FRINTA */
5967     {
5968         TCGv_i32 tcg_rmode;
5969 
5970         fpst = fpstatus_ptr(FPST_FPCR_F16);
5971         tcg_rmode = gen_set_rmode(opcode & 7, fpst);
5972         gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5973         gen_restore_rmode(tcg_rmode, fpst);
5974         break;
5975     }
5976     case 0xe: /* FRINTX */
5977         fpst = fpstatus_ptr(FPST_FPCR_F16);
5978         gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
5979         break;
5980     case 0xf: /* FRINTI */
5981         fpst = fpstatus_ptr(FPST_FPCR_F16);
5982         gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5983         break;
5984     default:
5985         g_assert_not_reached();
5986     }
5987 
5988     write_fp_sreg(s, rd, tcg_res);
5989 }
5990 
5991 /* Floating-point data-processing (1 source) - single precision */
5992 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
5993 {
5994     void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
5995     TCGv_i32 tcg_op, tcg_res;
5996     TCGv_ptr fpst;
5997     int rmode = -1;
5998 
5999     tcg_op = read_fp_sreg(s, rn);
6000     tcg_res = tcg_temp_new_i32();
6001 
6002     switch (opcode) {
6003     case 0x0: /* FMOV */
6004         tcg_gen_mov_i32(tcg_res, tcg_op);
6005         goto done;
6006     case 0x1: /* FABS */
6007         gen_helper_vfp_abss(tcg_res, tcg_op);
6008         goto done;
6009     case 0x2: /* FNEG */
6010         gen_helper_vfp_negs(tcg_res, tcg_op);
6011         goto done;
6012     case 0x3: /* FSQRT */
6013         gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
6014         goto done;
6015     case 0x6: /* BFCVT */
6016         gen_fpst = gen_helper_bfcvt;
6017         break;
6018     case 0x8: /* FRINTN */
6019     case 0x9: /* FRINTP */
6020     case 0xa: /* FRINTM */
6021     case 0xb: /* FRINTZ */
6022     case 0xc: /* FRINTA */
6023         rmode = opcode & 7;
6024         gen_fpst = gen_helper_rints;
6025         break;
6026     case 0xe: /* FRINTX */
6027         gen_fpst = gen_helper_rints_exact;
6028         break;
6029     case 0xf: /* FRINTI */
6030         gen_fpst = gen_helper_rints;
6031         break;
6032     case 0x10: /* FRINT32Z */
6033         rmode = FPROUNDING_ZERO;
6034         gen_fpst = gen_helper_frint32_s;
6035         break;
6036     case 0x11: /* FRINT32X */
6037         gen_fpst = gen_helper_frint32_s;
6038         break;
6039     case 0x12: /* FRINT64Z */
6040         rmode = FPROUNDING_ZERO;
6041         gen_fpst = gen_helper_frint64_s;
6042         break;
6043     case 0x13: /* FRINT64X */
6044         gen_fpst = gen_helper_frint64_s;
6045         break;
6046     default:
6047         g_assert_not_reached();
6048     }
6049 
6050     fpst = fpstatus_ptr(FPST_FPCR);
6051     if (rmode >= 0) {
6052         TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
6053         gen_fpst(tcg_res, tcg_op, fpst);
6054         gen_restore_rmode(tcg_rmode, fpst);
6055     } else {
6056         gen_fpst(tcg_res, tcg_op, fpst);
6057     }
6058 
6059  done:
6060     write_fp_sreg(s, rd, tcg_res);
6061 }
6062 
6063 /* Floating-point data-processing (1 source) - double precision */
6064 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
6065 {
6066     void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
6067     TCGv_i64 tcg_op, tcg_res;
6068     TCGv_ptr fpst;
6069     int rmode = -1;
6070 
6071     switch (opcode) {
6072     case 0x0: /* FMOV */
6073         gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
6074         return;
6075     }
6076 
6077     tcg_op = read_fp_dreg(s, rn);
6078     tcg_res = tcg_temp_new_i64();
6079 
6080     switch (opcode) {
6081     case 0x1: /* FABS */
6082         gen_helper_vfp_absd(tcg_res, tcg_op);
6083         goto done;
6084     case 0x2: /* FNEG */
6085         gen_helper_vfp_negd(tcg_res, tcg_op);
6086         goto done;
6087     case 0x3: /* FSQRT */
6088         gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
6089         goto done;
6090     case 0x8: /* FRINTN */
6091     case 0x9: /* FRINTP */
6092     case 0xa: /* FRINTM */
6093     case 0xb: /* FRINTZ */
6094     case 0xc: /* FRINTA */
6095         rmode = opcode & 7;
6096         gen_fpst = gen_helper_rintd;
6097         break;
6098     case 0xe: /* FRINTX */
6099         gen_fpst = gen_helper_rintd_exact;
6100         break;
6101     case 0xf: /* FRINTI */
6102         gen_fpst = gen_helper_rintd;
6103         break;
6104     case 0x10: /* FRINT32Z */
6105         rmode = FPROUNDING_ZERO;
6106         gen_fpst = gen_helper_frint32_d;
6107         break;
6108     case 0x11: /* FRINT32X */
6109         gen_fpst = gen_helper_frint32_d;
6110         break;
6111     case 0x12: /* FRINT64Z */
6112         rmode = FPROUNDING_ZERO;
6113         gen_fpst = gen_helper_frint64_d;
6114         break;
6115     case 0x13: /* FRINT64X */
6116         gen_fpst = gen_helper_frint64_d;
6117         break;
6118     default:
6119         g_assert_not_reached();
6120     }
6121 
6122     fpst = fpstatus_ptr(FPST_FPCR);
6123     if (rmode >= 0) {
6124         TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
6125         gen_fpst(tcg_res, tcg_op, fpst);
6126         gen_restore_rmode(tcg_rmode, fpst);
6127     } else {
6128         gen_fpst(tcg_res, tcg_op, fpst);
6129     }
6130 
6131  done:
6132     write_fp_dreg(s, rd, tcg_res);
6133 }
6134 
6135 static void handle_fp_fcvt(DisasContext *s, int opcode,
6136                            int rd, int rn, int dtype, int ntype)
6137 {
6138     switch (ntype) {
6139     case 0x0:
6140     {
6141         TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6142         if (dtype == 1) {
6143             /* Single to double */
6144             TCGv_i64 tcg_rd = tcg_temp_new_i64();
6145             gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
6146             write_fp_dreg(s, rd, tcg_rd);
6147         } else {
6148             /* Single to half */
6149             TCGv_i32 tcg_rd = tcg_temp_new_i32();
6150             TCGv_i32 ahp = get_ahp_flag();
6151             TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6152 
6153             gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6154             /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6155             write_fp_sreg(s, rd, tcg_rd);
6156         }
6157         break;
6158     }
6159     case 0x1:
6160     {
6161         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
6162         TCGv_i32 tcg_rd = tcg_temp_new_i32();
6163         if (dtype == 0) {
6164             /* Double to single */
6165             gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
6166         } else {
6167             TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6168             TCGv_i32 ahp = get_ahp_flag();
6169             /* Double to half */
6170             gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
6171             /* write_fp_sreg is OK here because top half of tcg_rd is zero */
6172         }
6173         write_fp_sreg(s, rd, tcg_rd);
6174         break;
6175     }
6176     case 0x3:
6177     {
6178         TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
6179         TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
6180         TCGv_i32 tcg_ahp = get_ahp_flag();
6181         tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
6182         if (dtype == 0) {
6183             /* Half to single */
6184             TCGv_i32 tcg_rd = tcg_temp_new_i32();
6185             gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6186             write_fp_sreg(s, rd, tcg_rd);
6187         } else {
6188             /* Half to double */
6189             TCGv_i64 tcg_rd = tcg_temp_new_i64();
6190             gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
6191             write_fp_dreg(s, rd, tcg_rd);
6192         }
6193         break;
6194     }
6195     default:
6196         g_assert_not_reached();
6197     }
6198 }
6199 
6200 /* Floating point data-processing (1 source)
6201  *   31  30  29 28       24 23  22  21 20    15 14       10 9    5 4    0
6202  * +---+---+---+-----------+------+---+--------+-----------+------+------+
6203  * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 |  Rn  |  Rd  |
6204  * +---+---+---+-----------+------+---+--------+-----------+------+------+
6205  */
6206 static void disas_fp_1src(DisasContext *s, uint32_t insn)
6207 {
6208     int mos = extract32(insn, 29, 3);
6209     int type = extract32(insn, 22, 2);
6210     int opcode = extract32(insn, 15, 6);
6211     int rn = extract32(insn, 5, 5);
6212     int rd = extract32(insn, 0, 5);
6213 
6214     if (mos) {
6215         goto do_unallocated;
6216     }
6217 
6218     switch (opcode) {
6219     case 0x4: case 0x5: case 0x7:
6220     {
6221         /* FCVT between half, single and double precision */
6222         int dtype = extract32(opcode, 0, 2);
6223         if (type == 2 || dtype == type) {
6224             goto do_unallocated;
6225         }
6226         if (!fp_access_check(s)) {
6227             return;
6228         }
6229 
6230         handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6231         break;
6232     }
6233 
6234     case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6235         if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6236             goto do_unallocated;
6237         }
6238         /* fall through */
6239     case 0x0 ... 0x3:
6240     case 0x8 ... 0xc:
6241     case 0xe ... 0xf:
6242         /* 32-to-32 and 64-to-64 ops */
6243         switch (type) {
6244         case 0:
6245             if (!fp_access_check(s)) {
6246                 return;
6247             }
6248             handle_fp_1src_single(s, opcode, rd, rn);
6249             break;
6250         case 1:
6251             if (!fp_access_check(s)) {
6252                 return;
6253             }
6254             handle_fp_1src_double(s, opcode, rd, rn);
6255             break;
6256         case 3:
6257             if (!dc_isar_feature(aa64_fp16, s)) {
6258                 goto do_unallocated;
6259             }
6260 
6261             if (!fp_access_check(s)) {
6262                 return;
6263             }
6264             handle_fp_1src_half(s, opcode, rd, rn);
6265             break;
6266         default:
6267             goto do_unallocated;
6268         }
6269         break;
6270 
6271     case 0x6:
6272         switch (type) {
6273         case 1: /* BFCVT */
6274             if (!dc_isar_feature(aa64_bf16, s)) {
6275                 goto do_unallocated;
6276             }
6277             if (!fp_access_check(s)) {
6278                 return;
6279             }
6280             handle_fp_1src_single(s, opcode, rd, rn);
6281             break;
6282         default:
6283             goto do_unallocated;
6284         }
6285         break;
6286 
6287     default:
6288     do_unallocated:
6289         unallocated_encoding(s);
6290         break;
6291     }
6292 }
6293 
6294 /* Floating-point data-processing (2 source) - single precision */
6295 static void handle_fp_2src_single(DisasContext *s, int opcode,
6296                                   int rd, int rn, int rm)
6297 {
6298     TCGv_i32 tcg_op1;
6299     TCGv_i32 tcg_op2;
6300     TCGv_i32 tcg_res;
6301     TCGv_ptr fpst;
6302 
6303     tcg_res = tcg_temp_new_i32();
6304     fpst = fpstatus_ptr(FPST_FPCR);
6305     tcg_op1 = read_fp_sreg(s, rn);
6306     tcg_op2 = read_fp_sreg(s, rm);
6307 
6308     switch (opcode) {
6309     case 0x0: /* FMUL */
6310         gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6311         break;
6312     case 0x1: /* FDIV */
6313         gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6314         break;
6315     case 0x2: /* FADD */
6316         gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6317         break;
6318     case 0x3: /* FSUB */
6319         gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6320         break;
6321     case 0x4: /* FMAX */
6322         gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6323         break;
6324     case 0x5: /* FMIN */
6325         gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6326         break;
6327     case 0x6: /* FMAXNM */
6328         gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6329         break;
6330     case 0x7: /* FMINNM */
6331         gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6332         break;
6333     case 0x8: /* FNMUL */
6334         gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6335         gen_helper_vfp_negs(tcg_res, tcg_res);
6336         break;
6337     }
6338 
6339     write_fp_sreg(s, rd, tcg_res);
6340 }
6341 
6342 /* Floating-point data-processing (2 source) - double precision */
6343 static void handle_fp_2src_double(DisasContext *s, int opcode,
6344                                   int rd, int rn, int rm)
6345 {
6346     TCGv_i64 tcg_op1;
6347     TCGv_i64 tcg_op2;
6348     TCGv_i64 tcg_res;
6349     TCGv_ptr fpst;
6350 
6351     tcg_res = tcg_temp_new_i64();
6352     fpst = fpstatus_ptr(FPST_FPCR);
6353     tcg_op1 = read_fp_dreg(s, rn);
6354     tcg_op2 = read_fp_dreg(s, rm);
6355 
6356     switch (opcode) {
6357     case 0x0: /* FMUL */
6358         gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6359         break;
6360     case 0x1: /* FDIV */
6361         gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6362         break;
6363     case 0x2: /* FADD */
6364         gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6365         break;
6366     case 0x3: /* FSUB */
6367         gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6368         break;
6369     case 0x4: /* FMAX */
6370         gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6371         break;
6372     case 0x5: /* FMIN */
6373         gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6374         break;
6375     case 0x6: /* FMAXNM */
6376         gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6377         break;
6378     case 0x7: /* FMINNM */
6379         gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6380         break;
6381     case 0x8: /* FNMUL */
6382         gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6383         gen_helper_vfp_negd(tcg_res, tcg_res);
6384         break;
6385     }
6386 
6387     write_fp_dreg(s, rd, tcg_res);
6388 }
6389 
6390 /* Floating-point data-processing (2 source) - half precision */
6391 static void handle_fp_2src_half(DisasContext *s, int opcode,
6392                                 int rd, int rn, int rm)
6393 {
6394     TCGv_i32 tcg_op1;
6395     TCGv_i32 tcg_op2;
6396     TCGv_i32 tcg_res;
6397     TCGv_ptr fpst;
6398 
6399     tcg_res = tcg_temp_new_i32();
6400     fpst = fpstatus_ptr(FPST_FPCR_F16);
6401     tcg_op1 = read_fp_hreg(s, rn);
6402     tcg_op2 = read_fp_hreg(s, rm);
6403 
6404     switch (opcode) {
6405     case 0x0: /* FMUL */
6406         gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6407         break;
6408     case 0x1: /* FDIV */
6409         gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6410         break;
6411     case 0x2: /* FADD */
6412         gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6413         break;
6414     case 0x3: /* FSUB */
6415         gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6416         break;
6417     case 0x4: /* FMAX */
6418         gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6419         break;
6420     case 0x5: /* FMIN */
6421         gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6422         break;
6423     case 0x6: /* FMAXNM */
6424         gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6425         break;
6426     case 0x7: /* FMINNM */
6427         gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6428         break;
6429     case 0x8: /* FNMUL */
6430         gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6431         tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6432         break;
6433     default:
6434         g_assert_not_reached();
6435     }
6436 
6437     write_fp_sreg(s, rd, tcg_res);
6438 }
6439 
6440 /* Floating point data-processing (2 source)
6441  *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
6442  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6443  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | opcode | 1 0 |  Rn  |  Rd  |
6444  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6445  */
6446 static void disas_fp_2src(DisasContext *s, uint32_t insn)
6447 {
6448     int mos = extract32(insn, 29, 3);
6449     int type = extract32(insn, 22, 2);
6450     int rd = extract32(insn, 0, 5);
6451     int rn = extract32(insn, 5, 5);
6452     int rm = extract32(insn, 16, 5);
6453     int opcode = extract32(insn, 12, 4);
6454 
6455     if (opcode > 8 || mos) {
6456         unallocated_encoding(s);
6457         return;
6458     }
6459 
6460     switch (type) {
6461     case 0:
6462         if (!fp_access_check(s)) {
6463             return;
6464         }
6465         handle_fp_2src_single(s, opcode, rd, rn, rm);
6466         break;
6467     case 1:
6468         if (!fp_access_check(s)) {
6469             return;
6470         }
6471         handle_fp_2src_double(s, opcode, rd, rn, rm);
6472         break;
6473     case 3:
6474         if (!dc_isar_feature(aa64_fp16, s)) {
6475             unallocated_encoding(s);
6476             return;
6477         }
6478         if (!fp_access_check(s)) {
6479             return;
6480         }
6481         handle_fp_2src_half(s, opcode, rd, rn, rm);
6482         break;
6483     default:
6484         unallocated_encoding(s);
6485     }
6486 }
6487 
6488 /* Floating-point data-processing (3 source) - single precision */
6489 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6490                                   int rd, int rn, int rm, int ra)
6491 {
6492     TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6493     TCGv_i32 tcg_res = tcg_temp_new_i32();
6494     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6495 
6496     tcg_op1 = read_fp_sreg(s, rn);
6497     tcg_op2 = read_fp_sreg(s, rm);
6498     tcg_op3 = read_fp_sreg(s, ra);
6499 
6500     /* These are fused multiply-add, and must be done as one
6501      * floating point operation with no rounding between the
6502      * multiplication and addition steps.
6503      * NB that doing the negations here as separate steps is
6504      * correct : an input NaN should come out with its sign bit
6505      * flipped if it is a negated-input.
6506      */
6507     if (o1 == true) {
6508         gen_helper_vfp_negs(tcg_op3, tcg_op3);
6509     }
6510 
6511     if (o0 != o1) {
6512         gen_helper_vfp_negs(tcg_op1, tcg_op1);
6513     }
6514 
6515     gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6516 
6517     write_fp_sreg(s, rd, tcg_res);
6518 }
6519 
6520 /* Floating-point data-processing (3 source) - double precision */
6521 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6522                                   int rd, int rn, int rm, int ra)
6523 {
6524     TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6525     TCGv_i64 tcg_res = tcg_temp_new_i64();
6526     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6527 
6528     tcg_op1 = read_fp_dreg(s, rn);
6529     tcg_op2 = read_fp_dreg(s, rm);
6530     tcg_op3 = read_fp_dreg(s, ra);
6531 
6532     /* These are fused multiply-add, and must be done as one
6533      * floating point operation with no rounding between the
6534      * multiplication and addition steps.
6535      * NB that doing the negations here as separate steps is
6536      * correct : an input NaN should come out with its sign bit
6537      * flipped if it is a negated-input.
6538      */
6539     if (o1 == true) {
6540         gen_helper_vfp_negd(tcg_op3, tcg_op3);
6541     }
6542 
6543     if (o0 != o1) {
6544         gen_helper_vfp_negd(tcg_op1, tcg_op1);
6545     }
6546 
6547     gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6548 
6549     write_fp_dreg(s, rd, tcg_res);
6550 }
6551 
6552 /* Floating-point data-processing (3 source) - half precision */
6553 static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6554                                 int rd, int rn, int rm, int ra)
6555 {
6556     TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6557     TCGv_i32 tcg_res = tcg_temp_new_i32();
6558     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6559 
6560     tcg_op1 = read_fp_hreg(s, rn);
6561     tcg_op2 = read_fp_hreg(s, rm);
6562     tcg_op3 = read_fp_hreg(s, ra);
6563 
6564     /* These are fused multiply-add, and must be done as one
6565      * floating point operation with no rounding between the
6566      * multiplication and addition steps.
6567      * NB that doing the negations here as separate steps is
6568      * correct : an input NaN should come out with its sign bit
6569      * flipped if it is a negated-input.
6570      */
6571     if (o1 == true) {
6572         tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6573     }
6574 
6575     if (o0 != o1) {
6576         tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6577     }
6578 
6579     gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6580 
6581     write_fp_sreg(s, rd, tcg_res);
6582 }
6583 
6584 /* Floating point data-processing (3 source)
6585  *   31  30  29 28       24 23  22  21  20  16  15  14  10 9    5 4    0
6586  * +---+---+---+-----------+------+----+------+----+------+------+------+
6587  * | M | 0 | S | 1 1 1 1 1 | type | o1 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
6588  * +---+---+---+-----------+------+----+------+----+------+------+------+
6589  */
6590 static void disas_fp_3src(DisasContext *s, uint32_t insn)
6591 {
6592     int mos = extract32(insn, 29, 3);
6593     int type = extract32(insn, 22, 2);
6594     int rd = extract32(insn, 0, 5);
6595     int rn = extract32(insn, 5, 5);
6596     int ra = extract32(insn, 10, 5);
6597     int rm = extract32(insn, 16, 5);
6598     bool o0 = extract32(insn, 15, 1);
6599     bool o1 = extract32(insn, 21, 1);
6600 
6601     if (mos) {
6602         unallocated_encoding(s);
6603         return;
6604     }
6605 
6606     switch (type) {
6607     case 0:
6608         if (!fp_access_check(s)) {
6609             return;
6610         }
6611         handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6612         break;
6613     case 1:
6614         if (!fp_access_check(s)) {
6615             return;
6616         }
6617         handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6618         break;
6619     case 3:
6620         if (!dc_isar_feature(aa64_fp16, s)) {
6621             unallocated_encoding(s);
6622             return;
6623         }
6624         if (!fp_access_check(s)) {
6625             return;
6626         }
6627         handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6628         break;
6629     default:
6630         unallocated_encoding(s);
6631     }
6632 }
6633 
6634 /* Floating point immediate
6635  *   31  30  29 28       24 23  22  21 20        13 12   10 9    5 4    0
6636  * +---+---+---+-----------+------+---+------------+-------+------+------+
6637  * | M | 0 | S | 1 1 1 1 0 | type | 1 |    imm8    | 1 0 0 | imm5 |  Rd  |
6638  * +---+---+---+-----------+------+---+------------+-------+------+------+
6639  */
6640 static void disas_fp_imm(DisasContext *s, uint32_t insn)
6641 {
6642     int rd = extract32(insn, 0, 5);
6643     int imm5 = extract32(insn, 5, 5);
6644     int imm8 = extract32(insn, 13, 8);
6645     int type = extract32(insn, 22, 2);
6646     int mos = extract32(insn, 29, 3);
6647     uint64_t imm;
6648     MemOp sz;
6649 
6650     if (mos || imm5) {
6651         unallocated_encoding(s);
6652         return;
6653     }
6654 
6655     switch (type) {
6656     case 0:
6657         sz = MO_32;
6658         break;
6659     case 1:
6660         sz = MO_64;
6661         break;
6662     case 3:
6663         sz = MO_16;
6664         if (dc_isar_feature(aa64_fp16, s)) {
6665             break;
6666         }
6667         /* fallthru */
6668     default:
6669         unallocated_encoding(s);
6670         return;
6671     }
6672 
6673     if (!fp_access_check(s)) {
6674         return;
6675     }
6676 
6677     imm = vfp_expand_imm(sz, imm8);
6678     write_fp_dreg(s, rd, tcg_constant_i64(imm));
6679 }
6680 
6681 /* Handle floating point <=> fixed point conversions. Note that we can
6682  * also deal with fp <=> integer conversions as a special case (scale == 64)
6683  * OPTME: consider handling that special case specially or at least skipping
6684  * the call to scalbn in the helpers for zero shifts.
6685  */
6686 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
6687                            bool itof, int rmode, int scale, int sf, int type)
6688 {
6689     bool is_signed = !(opcode & 1);
6690     TCGv_ptr tcg_fpstatus;
6691     TCGv_i32 tcg_shift, tcg_single;
6692     TCGv_i64 tcg_double;
6693 
6694     tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
6695 
6696     tcg_shift = tcg_constant_i32(64 - scale);
6697 
6698     if (itof) {
6699         TCGv_i64 tcg_int = cpu_reg(s, rn);
6700         if (!sf) {
6701             TCGv_i64 tcg_extend = tcg_temp_new_i64();
6702 
6703             if (is_signed) {
6704                 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
6705             } else {
6706                 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
6707             }
6708 
6709             tcg_int = tcg_extend;
6710         }
6711 
6712         switch (type) {
6713         case 1: /* float64 */
6714             tcg_double = tcg_temp_new_i64();
6715             if (is_signed) {
6716                 gen_helper_vfp_sqtod(tcg_double, tcg_int,
6717                                      tcg_shift, tcg_fpstatus);
6718             } else {
6719                 gen_helper_vfp_uqtod(tcg_double, tcg_int,
6720                                      tcg_shift, tcg_fpstatus);
6721             }
6722             write_fp_dreg(s, rd, tcg_double);
6723             break;
6724 
6725         case 0: /* float32 */
6726             tcg_single = tcg_temp_new_i32();
6727             if (is_signed) {
6728                 gen_helper_vfp_sqtos(tcg_single, tcg_int,
6729                                      tcg_shift, tcg_fpstatus);
6730             } else {
6731                 gen_helper_vfp_uqtos(tcg_single, tcg_int,
6732                                      tcg_shift, tcg_fpstatus);
6733             }
6734             write_fp_sreg(s, rd, tcg_single);
6735             break;
6736 
6737         case 3: /* float16 */
6738             tcg_single = tcg_temp_new_i32();
6739             if (is_signed) {
6740                 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
6741                                      tcg_shift, tcg_fpstatus);
6742             } else {
6743                 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
6744                                      tcg_shift, tcg_fpstatus);
6745             }
6746             write_fp_sreg(s, rd, tcg_single);
6747             break;
6748 
6749         default:
6750             g_assert_not_reached();
6751         }
6752     } else {
6753         TCGv_i64 tcg_int = cpu_reg(s, rd);
6754         TCGv_i32 tcg_rmode;
6755 
6756         if (extract32(opcode, 2, 1)) {
6757             /* There are too many rounding modes to all fit into rmode,
6758              * so FCVTA[US] is a special case.
6759              */
6760             rmode = FPROUNDING_TIEAWAY;
6761         }
6762 
6763         tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
6764 
6765         switch (type) {
6766         case 1: /* float64 */
6767             tcg_double = read_fp_dreg(s, rn);
6768             if (is_signed) {
6769                 if (!sf) {
6770                     gen_helper_vfp_tosld(tcg_int, tcg_double,
6771                                          tcg_shift, tcg_fpstatus);
6772                 } else {
6773                     gen_helper_vfp_tosqd(tcg_int, tcg_double,
6774                                          tcg_shift, tcg_fpstatus);
6775                 }
6776             } else {
6777                 if (!sf) {
6778                     gen_helper_vfp_tould(tcg_int, tcg_double,
6779                                          tcg_shift, tcg_fpstatus);
6780                 } else {
6781                     gen_helper_vfp_touqd(tcg_int, tcg_double,
6782                                          tcg_shift, tcg_fpstatus);
6783                 }
6784             }
6785             if (!sf) {
6786                 tcg_gen_ext32u_i64(tcg_int, tcg_int);
6787             }
6788             break;
6789 
6790         case 0: /* float32 */
6791             tcg_single = read_fp_sreg(s, rn);
6792             if (sf) {
6793                 if (is_signed) {
6794                     gen_helper_vfp_tosqs(tcg_int, tcg_single,
6795                                          tcg_shift, tcg_fpstatus);
6796                 } else {
6797                     gen_helper_vfp_touqs(tcg_int, tcg_single,
6798                                          tcg_shift, tcg_fpstatus);
6799                 }
6800             } else {
6801                 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6802                 if (is_signed) {
6803                     gen_helper_vfp_tosls(tcg_dest, tcg_single,
6804                                          tcg_shift, tcg_fpstatus);
6805                 } else {
6806                     gen_helper_vfp_touls(tcg_dest, tcg_single,
6807                                          tcg_shift, tcg_fpstatus);
6808                 }
6809                 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6810             }
6811             break;
6812 
6813         case 3: /* float16 */
6814             tcg_single = read_fp_sreg(s, rn);
6815             if (sf) {
6816                 if (is_signed) {
6817                     gen_helper_vfp_tosqh(tcg_int, tcg_single,
6818                                          tcg_shift, tcg_fpstatus);
6819                 } else {
6820                     gen_helper_vfp_touqh(tcg_int, tcg_single,
6821                                          tcg_shift, tcg_fpstatus);
6822                 }
6823             } else {
6824                 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6825                 if (is_signed) {
6826                     gen_helper_vfp_toslh(tcg_dest, tcg_single,
6827                                          tcg_shift, tcg_fpstatus);
6828                 } else {
6829                     gen_helper_vfp_toulh(tcg_dest, tcg_single,
6830                                          tcg_shift, tcg_fpstatus);
6831                 }
6832                 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6833             }
6834             break;
6835 
6836         default:
6837             g_assert_not_reached();
6838         }
6839 
6840         gen_restore_rmode(tcg_rmode, tcg_fpstatus);
6841     }
6842 }
6843 
6844 /* Floating point <-> fixed point conversions
6845  *   31   30  29 28       24 23  22  21 20   19 18    16 15   10 9    5 4    0
6846  * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6847  * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale |  Rn  |  Rd  |
6848  * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6849  */
6850 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
6851 {
6852     int rd = extract32(insn, 0, 5);
6853     int rn = extract32(insn, 5, 5);
6854     int scale = extract32(insn, 10, 6);
6855     int opcode = extract32(insn, 16, 3);
6856     int rmode = extract32(insn, 19, 2);
6857     int type = extract32(insn, 22, 2);
6858     bool sbit = extract32(insn, 29, 1);
6859     bool sf = extract32(insn, 31, 1);
6860     bool itof;
6861 
6862     if (sbit || (!sf && scale < 32)) {
6863         unallocated_encoding(s);
6864         return;
6865     }
6866 
6867     switch (type) {
6868     case 0: /* float32 */
6869     case 1: /* float64 */
6870         break;
6871     case 3: /* float16 */
6872         if (dc_isar_feature(aa64_fp16, s)) {
6873             break;
6874         }
6875         /* fallthru */
6876     default:
6877         unallocated_encoding(s);
6878         return;
6879     }
6880 
6881     switch ((rmode << 3) | opcode) {
6882     case 0x2: /* SCVTF */
6883     case 0x3: /* UCVTF */
6884         itof = true;
6885         break;
6886     case 0x18: /* FCVTZS */
6887     case 0x19: /* FCVTZU */
6888         itof = false;
6889         break;
6890     default:
6891         unallocated_encoding(s);
6892         return;
6893     }
6894 
6895     if (!fp_access_check(s)) {
6896         return;
6897     }
6898 
6899     handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
6900 }
6901 
6902 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
6903 {
6904     /* FMOV: gpr to or from float, double, or top half of quad fp reg,
6905      * without conversion.
6906      */
6907 
6908     if (itof) {
6909         TCGv_i64 tcg_rn = cpu_reg(s, rn);
6910         TCGv_i64 tmp;
6911 
6912         switch (type) {
6913         case 0:
6914             /* 32 bit */
6915             tmp = tcg_temp_new_i64();
6916             tcg_gen_ext32u_i64(tmp, tcg_rn);
6917             write_fp_dreg(s, rd, tmp);
6918             break;
6919         case 1:
6920             /* 64 bit */
6921             write_fp_dreg(s, rd, tcg_rn);
6922             break;
6923         case 2:
6924             /* 64 bit to top half. */
6925             tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
6926             clear_vec_high(s, true, rd);
6927             break;
6928         case 3:
6929             /* 16 bit */
6930             tmp = tcg_temp_new_i64();
6931             tcg_gen_ext16u_i64(tmp, tcg_rn);
6932             write_fp_dreg(s, rd, tmp);
6933             break;
6934         default:
6935             g_assert_not_reached();
6936         }
6937     } else {
6938         TCGv_i64 tcg_rd = cpu_reg(s, rd);
6939 
6940         switch (type) {
6941         case 0:
6942             /* 32 bit */
6943             tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
6944             break;
6945         case 1:
6946             /* 64 bit */
6947             tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
6948             break;
6949         case 2:
6950             /* 64 bits from top half */
6951             tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
6952             break;
6953         case 3:
6954             /* 16 bit */
6955             tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
6956             break;
6957         default:
6958             g_assert_not_reached();
6959         }
6960     }
6961 }
6962 
6963 static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
6964 {
6965     TCGv_i64 t = read_fp_dreg(s, rn);
6966     TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
6967 
6968     gen_helper_fjcvtzs(t, t, fpstatus);
6969 
6970     tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
6971     tcg_gen_extrh_i64_i32(cpu_ZF, t);
6972     tcg_gen_movi_i32(cpu_CF, 0);
6973     tcg_gen_movi_i32(cpu_NF, 0);
6974     tcg_gen_movi_i32(cpu_VF, 0);
6975 }
6976 
6977 /* Floating point <-> integer conversions
6978  *   31   30  29 28       24 23  22  21 20   19 18 16 15         10 9  5 4  0
6979  * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6980  * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
6981  * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6982  */
6983 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
6984 {
6985     int rd = extract32(insn, 0, 5);
6986     int rn = extract32(insn, 5, 5);
6987     int opcode = extract32(insn, 16, 3);
6988     int rmode = extract32(insn, 19, 2);
6989     int type = extract32(insn, 22, 2);
6990     bool sbit = extract32(insn, 29, 1);
6991     bool sf = extract32(insn, 31, 1);
6992     bool itof = false;
6993 
6994     if (sbit) {
6995         goto do_unallocated;
6996     }
6997 
6998     switch (opcode) {
6999     case 2: /* SCVTF */
7000     case 3: /* UCVTF */
7001         itof = true;
7002         /* fallthru */
7003     case 4: /* FCVTAS */
7004     case 5: /* FCVTAU */
7005         if (rmode != 0) {
7006             goto do_unallocated;
7007         }
7008         /* fallthru */
7009     case 0: /* FCVT[NPMZ]S */
7010     case 1: /* FCVT[NPMZ]U */
7011         switch (type) {
7012         case 0: /* float32 */
7013         case 1: /* float64 */
7014             break;
7015         case 3: /* float16 */
7016             if (!dc_isar_feature(aa64_fp16, s)) {
7017                 goto do_unallocated;
7018             }
7019             break;
7020         default:
7021             goto do_unallocated;
7022         }
7023         if (!fp_access_check(s)) {
7024             return;
7025         }
7026         handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
7027         break;
7028 
7029     default:
7030         switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
7031         case 0b01100110: /* FMOV half <-> 32-bit int */
7032         case 0b01100111:
7033         case 0b11100110: /* FMOV half <-> 64-bit int */
7034         case 0b11100111:
7035             if (!dc_isar_feature(aa64_fp16, s)) {
7036                 goto do_unallocated;
7037             }
7038             /* fallthru */
7039         case 0b00000110: /* FMOV 32-bit */
7040         case 0b00000111:
7041         case 0b10100110: /* FMOV 64-bit */
7042         case 0b10100111:
7043         case 0b11001110: /* FMOV top half of 128-bit */
7044         case 0b11001111:
7045             if (!fp_access_check(s)) {
7046                 return;
7047             }
7048             itof = opcode & 1;
7049             handle_fmov(s, rd, rn, type, itof);
7050             break;
7051 
7052         case 0b00111110: /* FJCVTZS */
7053             if (!dc_isar_feature(aa64_jscvt, s)) {
7054                 goto do_unallocated;
7055             } else if (fp_access_check(s)) {
7056                 handle_fjcvtzs(s, rd, rn);
7057             }
7058             break;
7059 
7060         default:
7061         do_unallocated:
7062             unallocated_encoding(s);
7063             return;
7064         }
7065         break;
7066     }
7067 }
7068 
7069 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
7070  *   31  30  29 28     25 24                          0
7071  * +---+---+---+---------+-----------------------------+
7072  * |   | 0 |   | 1 1 1 1 |                             |
7073  * +---+---+---+---------+-----------------------------+
7074  */
7075 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
7076 {
7077     if (extract32(insn, 24, 1)) {
7078         /* Floating point data-processing (3 source) */
7079         disas_fp_3src(s, insn);
7080     } else if (extract32(insn, 21, 1) == 0) {
7081         /* Floating point to fixed point conversions */
7082         disas_fp_fixed_conv(s, insn);
7083     } else {
7084         switch (extract32(insn, 10, 2)) {
7085         case 1:
7086             /* Floating point conditional compare */
7087             disas_fp_ccomp(s, insn);
7088             break;
7089         case 2:
7090             /* Floating point data-processing (2 source) */
7091             disas_fp_2src(s, insn);
7092             break;
7093         case 3:
7094             /* Floating point conditional select */
7095             disas_fp_csel(s, insn);
7096             break;
7097         case 0:
7098             switch (ctz32(extract32(insn, 12, 4))) {
7099             case 0: /* [15:12] == xxx1 */
7100                 /* Floating point immediate */
7101                 disas_fp_imm(s, insn);
7102                 break;
7103             case 1: /* [15:12] == xx10 */
7104                 /* Floating point compare */
7105                 disas_fp_compare(s, insn);
7106                 break;
7107             case 2: /* [15:12] == x100 */
7108                 /* Floating point data-processing (1 source) */
7109                 disas_fp_1src(s, insn);
7110                 break;
7111             case 3: /* [15:12] == 1000 */
7112                 unallocated_encoding(s);
7113                 break;
7114             default: /* [15:12] == 0000 */
7115                 /* Floating point <-> integer conversions */
7116                 disas_fp_int_conv(s, insn);
7117                 break;
7118             }
7119             break;
7120         }
7121     }
7122 }
7123 
7124 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
7125                      int pos)
7126 {
7127     /* Extract 64 bits from the middle of two concatenated 64 bit
7128      * vector register slices left:right. The extracted bits start
7129      * at 'pos' bits into the right (least significant) side.
7130      * We return the result in tcg_right, and guarantee not to
7131      * trash tcg_left.
7132      */
7133     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7134     assert(pos > 0 && pos < 64);
7135 
7136     tcg_gen_shri_i64(tcg_right, tcg_right, pos);
7137     tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
7138     tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
7139 }
7140 
7141 /* EXT
7142  *   31  30 29         24 23 22  21 20  16 15  14  11 10  9    5 4    0
7143  * +---+---+-------------+-----+---+------+---+------+---+------+------+
7144  * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | imm4 | 0 |  Rn  |  Rd  |
7145  * +---+---+-------------+-----+---+------+---+------+---+------+------+
7146  */
7147 static void disas_simd_ext(DisasContext *s, uint32_t insn)
7148 {
7149     int is_q = extract32(insn, 30, 1);
7150     int op2 = extract32(insn, 22, 2);
7151     int imm4 = extract32(insn, 11, 4);
7152     int rm = extract32(insn, 16, 5);
7153     int rn = extract32(insn, 5, 5);
7154     int rd = extract32(insn, 0, 5);
7155     int pos = imm4 << 3;
7156     TCGv_i64 tcg_resl, tcg_resh;
7157 
7158     if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
7159         unallocated_encoding(s);
7160         return;
7161     }
7162 
7163     if (!fp_access_check(s)) {
7164         return;
7165     }
7166 
7167     tcg_resh = tcg_temp_new_i64();
7168     tcg_resl = tcg_temp_new_i64();
7169 
7170     /* Vd gets bits starting at pos bits into Vm:Vn. This is
7171      * either extracting 128 bits from a 128:128 concatenation, or
7172      * extracting 64 bits from a 64:64 concatenation.
7173      */
7174     if (!is_q) {
7175         read_vec_element(s, tcg_resl, rn, 0, MO_64);
7176         if (pos != 0) {
7177             read_vec_element(s, tcg_resh, rm, 0, MO_64);
7178             do_ext64(s, tcg_resh, tcg_resl, pos);
7179         }
7180     } else {
7181         TCGv_i64 tcg_hh;
7182         typedef struct {
7183             int reg;
7184             int elt;
7185         } EltPosns;
7186         EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
7187         EltPosns *elt = eltposns;
7188 
7189         if (pos >= 64) {
7190             elt++;
7191             pos -= 64;
7192         }
7193 
7194         read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7195         elt++;
7196         read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7197         elt++;
7198         if (pos != 0) {
7199             do_ext64(s, tcg_resh, tcg_resl, pos);
7200             tcg_hh = tcg_temp_new_i64();
7201             read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7202             do_ext64(s, tcg_hh, tcg_resh, pos);
7203         }
7204     }
7205 
7206     write_vec_element(s, tcg_resl, rd, 0, MO_64);
7207     if (is_q) {
7208         write_vec_element(s, tcg_resh, rd, 1, MO_64);
7209     }
7210     clear_vec_high(s, is_q, rd);
7211 }
7212 
7213 /* TBL/TBX
7214  *   31  30 29         24 23 22  21 20  16 15  14 13  12  11 10 9    5 4    0
7215  * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7216  * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | len | op | 0 0 |  Rn  |  Rd  |
7217  * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7218  */
7219 static void disas_simd_tb(DisasContext *s, uint32_t insn)
7220 {
7221     int op2 = extract32(insn, 22, 2);
7222     int is_q = extract32(insn, 30, 1);
7223     int rm = extract32(insn, 16, 5);
7224     int rn = extract32(insn, 5, 5);
7225     int rd = extract32(insn, 0, 5);
7226     int is_tbx = extract32(insn, 12, 1);
7227     int len = (extract32(insn, 13, 2) + 1) * 16;
7228 
7229     if (op2 != 0) {
7230         unallocated_encoding(s);
7231         return;
7232     }
7233 
7234     if (!fp_access_check(s)) {
7235         return;
7236     }
7237 
7238     tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7239                        vec_full_reg_offset(s, rm), cpu_env,
7240                        is_q ? 16 : 8, vec_full_reg_size(s),
7241                        (len << 6) | (is_tbx << 5) | rn,
7242                        gen_helper_simd_tblx);
7243 }
7244 
7245 /* ZIP/UZP/TRN
7246  *   31  30 29         24 23  22  21 20   16 15 14 12 11 10 9    5 4    0
7247  * +---+---+-------------+------+---+------+---+------------------+------+
7248  * | 0 | Q | 0 0 1 1 1 0 | size | 0 |  Rm  | 0 | opc | 1 0 |  Rn  |  Rd  |
7249  * +---+---+-------------+------+---+------+---+------------------+------+
7250  */
7251 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7252 {
7253     int rd = extract32(insn, 0, 5);
7254     int rn = extract32(insn, 5, 5);
7255     int rm = extract32(insn, 16, 5);
7256     int size = extract32(insn, 22, 2);
7257     /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7258      * bit 2 indicates 1 vs 2 variant of the insn.
7259      */
7260     int opcode = extract32(insn, 12, 2);
7261     bool part = extract32(insn, 14, 1);
7262     bool is_q = extract32(insn, 30, 1);
7263     int esize = 8 << size;
7264     int i;
7265     int datasize = is_q ? 128 : 64;
7266     int elements = datasize / esize;
7267     TCGv_i64 tcg_res[2], tcg_ele;
7268 
7269     if (opcode == 0 || (size == 3 && !is_q)) {
7270         unallocated_encoding(s);
7271         return;
7272     }
7273 
7274     if (!fp_access_check(s)) {
7275         return;
7276     }
7277 
7278     tcg_res[0] = tcg_temp_new_i64();
7279     tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL;
7280     tcg_ele = tcg_temp_new_i64();
7281 
7282     for (i = 0; i < elements; i++) {
7283         int o, w;
7284 
7285         switch (opcode) {
7286         case 1: /* UZP1/2 */
7287         {
7288             int midpoint = elements / 2;
7289             if (i < midpoint) {
7290                 read_vec_element(s, tcg_ele, rn, 2 * i + part, size);
7291             } else {
7292                 read_vec_element(s, tcg_ele, rm,
7293                                  2 * (i - midpoint) + part, size);
7294             }
7295             break;
7296         }
7297         case 2: /* TRN1/2 */
7298             if (i & 1) {
7299                 read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size);
7300             } else {
7301                 read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size);
7302             }
7303             break;
7304         case 3: /* ZIP1/2 */
7305         {
7306             int base = part * elements / 2;
7307             if (i & 1) {
7308                 read_vec_element(s, tcg_ele, rm, base + (i >> 1), size);
7309             } else {
7310                 read_vec_element(s, tcg_ele, rn, base + (i >> 1), size);
7311             }
7312             break;
7313         }
7314         default:
7315             g_assert_not_reached();
7316         }
7317 
7318         w = (i * esize) / 64;
7319         o = (i * esize) % 64;
7320         if (o == 0) {
7321             tcg_gen_mov_i64(tcg_res[w], tcg_ele);
7322         } else {
7323             tcg_gen_shli_i64(tcg_ele, tcg_ele, o);
7324             tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele);
7325         }
7326     }
7327 
7328     for (i = 0; i <= is_q; ++i) {
7329         write_vec_element(s, tcg_res[i], rd, i, MO_64);
7330     }
7331     clear_vec_high(s, is_q, rd);
7332 }
7333 
7334 /*
7335  * do_reduction_op helper
7336  *
7337  * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7338  * important for correct NaN propagation that we do these
7339  * operations in exactly the order specified by the pseudocode.
7340  *
7341  * This is a recursive function, TCG temps should be freed by the
7342  * calling function once it is done with the values.
7343  */
7344 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7345                                 int esize, int size, int vmap, TCGv_ptr fpst)
7346 {
7347     if (esize == size) {
7348         int element;
7349         MemOp msize = esize == 16 ? MO_16 : MO_32;
7350         TCGv_i32 tcg_elem;
7351 
7352         /* We should have one register left here */
7353         assert(ctpop8(vmap) == 1);
7354         element = ctz32(vmap);
7355         assert(element < 8);
7356 
7357         tcg_elem = tcg_temp_new_i32();
7358         read_vec_element_i32(s, tcg_elem, rn, element, msize);
7359         return tcg_elem;
7360     } else {
7361         int bits = size / 2;
7362         int shift = ctpop8(vmap) / 2;
7363         int vmap_lo = (vmap >> shift) & vmap;
7364         int vmap_hi = (vmap & ~vmap_lo);
7365         TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7366 
7367         tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7368         tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7369         tcg_res = tcg_temp_new_i32();
7370 
7371         switch (fpopcode) {
7372         case 0x0c: /* fmaxnmv half-precision */
7373             gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7374             break;
7375         case 0x0f: /* fmaxv half-precision */
7376             gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7377             break;
7378         case 0x1c: /* fminnmv half-precision */
7379             gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7380             break;
7381         case 0x1f: /* fminv half-precision */
7382             gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7383             break;
7384         case 0x2c: /* fmaxnmv */
7385             gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7386             break;
7387         case 0x2f: /* fmaxv */
7388             gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7389             break;
7390         case 0x3c: /* fminnmv */
7391             gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7392             break;
7393         case 0x3f: /* fminv */
7394             gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7395             break;
7396         default:
7397             g_assert_not_reached();
7398         }
7399         return tcg_res;
7400     }
7401 }
7402 
7403 /* AdvSIMD across lanes
7404  *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
7405  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7406  * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
7407  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7408  */
7409 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7410 {
7411     int rd = extract32(insn, 0, 5);
7412     int rn = extract32(insn, 5, 5);
7413     int size = extract32(insn, 22, 2);
7414     int opcode = extract32(insn, 12, 5);
7415     bool is_q = extract32(insn, 30, 1);
7416     bool is_u = extract32(insn, 29, 1);
7417     bool is_fp = false;
7418     bool is_min = false;
7419     int esize;
7420     int elements;
7421     int i;
7422     TCGv_i64 tcg_res, tcg_elt;
7423 
7424     switch (opcode) {
7425     case 0x1b: /* ADDV */
7426         if (is_u) {
7427             unallocated_encoding(s);
7428             return;
7429         }
7430         /* fall through */
7431     case 0x3: /* SADDLV, UADDLV */
7432     case 0xa: /* SMAXV, UMAXV */
7433     case 0x1a: /* SMINV, UMINV */
7434         if (size == 3 || (size == 2 && !is_q)) {
7435             unallocated_encoding(s);
7436             return;
7437         }
7438         break;
7439     case 0xc: /* FMAXNMV, FMINNMV */
7440     case 0xf: /* FMAXV, FMINV */
7441         /* Bit 1 of size field encodes min vs max and the actual size
7442          * depends on the encoding of the U bit. If not set (and FP16
7443          * enabled) then we do half-precision float instead of single
7444          * precision.
7445          */
7446         is_min = extract32(size, 1, 1);
7447         is_fp = true;
7448         if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7449             size = 1;
7450         } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7451             unallocated_encoding(s);
7452             return;
7453         } else {
7454             size = 2;
7455         }
7456         break;
7457     default:
7458         unallocated_encoding(s);
7459         return;
7460     }
7461 
7462     if (!fp_access_check(s)) {
7463         return;
7464     }
7465 
7466     esize = 8 << size;
7467     elements = (is_q ? 128 : 64) / esize;
7468 
7469     tcg_res = tcg_temp_new_i64();
7470     tcg_elt = tcg_temp_new_i64();
7471 
7472     /* These instructions operate across all lanes of a vector
7473      * to produce a single result. We can guarantee that a 64
7474      * bit intermediate is sufficient:
7475      *  + for [US]ADDLV the maximum element size is 32 bits, and
7476      *    the result type is 64 bits
7477      *  + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7478      *    same as the element size, which is 32 bits at most
7479      * For the integer operations we can choose to work at 64
7480      * or 32 bits and truncate at the end; for simplicity
7481      * we use 64 bits always. The floating point
7482      * ops do require 32 bit intermediates, though.
7483      */
7484     if (!is_fp) {
7485         read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7486 
7487         for (i = 1; i < elements; i++) {
7488             read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7489 
7490             switch (opcode) {
7491             case 0x03: /* SADDLV / UADDLV */
7492             case 0x1b: /* ADDV */
7493                 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7494                 break;
7495             case 0x0a: /* SMAXV / UMAXV */
7496                 if (is_u) {
7497                     tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7498                 } else {
7499                     tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7500                 }
7501                 break;
7502             case 0x1a: /* SMINV / UMINV */
7503                 if (is_u) {
7504                     tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7505                 } else {
7506                     tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7507                 }
7508                 break;
7509             default:
7510                 g_assert_not_reached();
7511             }
7512 
7513         }
7514     } else {
7515         /* Floating point vector reduction ops which work across 32
7516          * bit (single) or 16 bit (half-precision) intermediates.
7517          * Note that correct NaN propagation requires that we do these
7518          * operations in exactly the order specified by the pseudocode.
7519          */
7520         TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7521         int fpopcode = opcode | is_min << 4 | is_u << 5;
7522         int vmap = (1 << elements) - 1;
7523         TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7524                                              (is_q ? 128 : 64), vmap, fpst);
7525         tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7526     }
7527 
7528     /* Now truncate the result to the width required for the final output */
7529     if (opcode == 0x03) {
7530         /* SADDLV, UADDLV: result is 2*esize */
7531         size++;
7532     }
7533 
7534     switch (size) {
7535     case 0:
7536         tcg_gen_ext8u_i64(tcg_res, tcg_res);
7537         break;
7538     case 1:
7539         tcg_gen_ext16u_i64(tcg_res, tcg_res);
7540         break;
7541     case 2:
7542         tcg_gen_ext32u_i64(tcg_res, tcg_res);
7543         break;
7544     case 3:
7545         break;
7546     default:
7547         g_assert_not_reached();
7548     }
7549 
7550     write_fp_dreg(s, rd, tcg_res);
7551 }
7552 
7553 /* DUP (Element, Vector)
7554  *
7555  *  31  30   29              21 20    16 15        10  9    5 4    0
7556  * +---+---+-------------------+--------+-------------+------+------+
7557  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
7558  * +---+---+-------------------+--------+-------------+------+------+
7559  *
7560  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7561  */
7562 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7563                              int imm5)
7564 {
7565     int size = ctz32(imm5);
7566     int index;
7567 
7568     if (size > 3 || (size == 3 && !is_q)) {
7569         unallocated_encoding(s);
7570         return;
7571     }
7572 
7573     if (!fp_access_check(s)) {
7574         return;
7575     }
7576 
7577     index = imm5 >> (size + 1);
7578     tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7579                          vec_reg_offset(s, rn, index, size),
7580                          is_q ? 16 : 8, vec_full_reg_size(s));
7581 }
7582 
7583 /* DUP (element, scalar)
7584  *  31                   21 20    16 15        10  9    5 4    0
7585  * +-----------------------+--------+-------------+------+------+
7586  * | 0 1 0 1 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
7587  * +-----------------------+--------+-------------+------+------+
7588  */
7589 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7590                               int imm5)
7591 {
7592     int size = ctz32(imm5);
7593     int index;
7594     TCGv_i64 tmp;
7595 
7596     if (size > 3) {
7597         unallocated_encoding(s);
7598         return;
7599     }
7600 
7601     if (!fp_access_check(s)) {
7602         return;
7603     }
7604 
7605     index = imm5 >> (size + 1);
7606 
7607     /* This instruction just extracts the specified element and
7608      * zero-extends it into the bottom of the destination register.
7609      */
7610     tmp = tcg_temp_new_i64();
7611     read_vec_element(s, tmp, rn, index, size);
7612     write_fp_dreg(s, rd, tmp);
7613 }
7614 
7615 /* DUP (General)
7616  *
7617  *  31  30   29              21 20    16 15        10  9    5 4    0
7618  * +---+---+-------------------+--------+-------------+------+------+
7619  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 1 1 |  Rn  |  Rd  |
7620  * +---+---+-------------------+--------+-------------+------+------+
7621  *
7622  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7623  */
7624 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7625                              int imm5)
7626 {
7627     int size = ctz32(imm5);
7628     uint32_t dofs, oprsz, maxsz;
7629 
7630     if (size > 3 || ((size == 3) && !is_q)) {
7631         unallocated_encoding(s);
7632         return;
7633     }
7634 
7635     if (!fp_access_check(s)) {
7636         return;
7637     }
7638 
7639     dofs = vec_full_reg_offset(s, rd);
7640     oprsz = is_q ? 16 : 8;
7641     maxsz = vec_full_reg_size(s);
7642 
7643     tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
7644 }
7645 
7646 /* INS (Element)
7647  *
7648  *  31                   21 20    16 15  14    11  10 9    5 4    0
7649  * +-----------------------+--------+------------+---+------+------+
7650  * | 0 1 1 0 1 1 1 0 0 0 0 |  imm5  | 0 |  imm4  | 1 |  Rn  |  Rd  |
7651  * +-----------------------+--------+------------+---+------+------+
7652  *
7653  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7654  * index: encoded in imm5<4:size+1>
7655  */
7656 static void handle_simd_inse(DisasContext *s, int rd, int rn,
7657                              int imm4, int imm5)
7658 {
7659     int size = ctz32(imm5);
7660     int src_index, dst_index;
7661     TCGv_i64 tmp;
7662 
7663     if (size > 3) {
7664         unallocated_encoding(s);
7665         return;
7666     }
7667 
7668     if (!fp_access_check(s)) {
7669         return;
7670     }
7671 
7672     dst_index = extract32(imm5, 1+size, 5);
7673     src_index = extract32(imm4, size, 4);
7674 
7675     tmp = tcg_temp_new_i64();
7676 
7677     read_vec_element(s, tmp, rn, src_index, size);
7678     write_vec_element(s, tmp, rd, dst_index, size);
7679 
7680     /* INS is considered a 128-bit write for SVE. */
7681     clear_vec_high(s, true, rd);
7682 }
7683 
7684 
7685 /* INS (General)
7686  *
7687  *  31                   21 20    16 15        10  9    5 4    0
7688  * +-----------------------+--------+-------------+------+------+
7689  * | 0 1 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 1 1 1 |  Rn  |  Rd  |
7690  * +-----------------------+--------+-------------+------+------+
7691  *
7692  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7693  * index: encoded in imm5<4:size+1>
7694  */
7695 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
7696 {
7697     int size = ctz32(imm5);
7698     int idx;
7699 
7700     if (size > 3) {
7701         unallocated_encoding(s);
7702         return;
7703     }
7704 
7705     if (!fp_access_check(s)) {
7706         return;
7707     }
7708 
7709     idx = extract32(imm5, 1 + size, 4 - size);
7710     write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
7711 
7712     /* INS is considered a 128-bit write for SVE. */
7713     clear_vec_high(s, true, rd);
7714 }
7715 
7716 /*
7717  * UMOV (General)
7718  * SMOV (General)
7719  *
7720  *  31  30   29              21 20    16 15    12   10 9    5 4    0
7721  * +---+---+-------------------+--------+-------------+------+------+
7722  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 1 U 1 1 |  Rn  |  Rd  |
7723  * +---+---+-------------------+--------+-------------+------+------+
7724  *
7725  * U: unsigned when set
7726  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7727  */
7728 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
7729                                   int rn, int rd, int imm5)
7730 {
7731     int size = ctz32(imm5);
7732     int element;
7733     TCGv_i64 tcg_rd;
7734 
7735     /* Check for UnallocatedEncodings */
7736     if (is_signed) {
7737         if (size > 2 || (size == 2 && !is_q)) {
7738             unallocated_encoding(s);
7739             return;
7740         }
7741     } else {
7742         if (size > 3
7743             || (size < 3 && is_q)
7744             || (size == 3 && !is_q)) {
7745             unallocated_encoding(s);
7746             return;
7747         }
7748     }
7749 
7750     if (!fp_access_check(s)) {
7751         return;
7752     }
7753 
7754     element = extract32(imm5, 1+size, 4);
7755 
7756     tcg_rd = cpu_reg(s, rd);
7757     read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
7758     if (is_signed && !is_q) {
7759         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
7760     }
7761 }
7762 
7763 /* AdvSIMD copy
7764  *   31  30  29  28             21 20  16 15  14  11 10  9    5 4    0
7765  * +---+---+----+-----------------+------+---+------+---+------+------+
7766  * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
7767  * +---+---+----+-----------------+------+---+------+---+------+------+
7768  */
7769 static void disas_simd_copy(DisasContext *s, uint32_t insn)
7770 {
7771     int rd = extract32(insn, 0, 5);
7772     int rn = extract32(insn, 5, 5);
7773     int imm4 = extract32(insn, 11, 4);
7774     int op = extract32(insn, 29, 1);
7775     int is_q = extract32(insn, 30, 1);
7776     int imm5 = extract32(insn, 16, 5);
7777 
7778     if (op) {
7779         if (is_q) {
7780             /* INS (element) */
7781             handle_simd_inse(s, rd, rn, imm4, imm5);
7782         } else {
7783             unallocated_encoding(s);
7784         }
7785     } else {
7786         switch (imm4) {
7787         case 0:
7788             /* DUP (element - vector) */
7789             handle_simd_dupe(s, is_q, rd, rn, imm5);
7790             break;
7791         case 1:
7792             /* DUP (general) */
7793             handle_simd_dupg(s, is_q, rd, rn, imm5);
7794             break;
7795         case 3:
7796             if (is_q) {
7797                 /* INS (general) */
7798                 handle_simd_insg(s, rd, rn, imm5);
7799             } else {
7800                 unallocated_encoding(s);
7801             }
7802             break;
7803         case 5:
7804         case 7:
7805             /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
7806             handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
7807             break;
7808         default:
7809             unallocated_encoding(s);
7810             break;
7811         }
7812     }
7813 }
7814 
7815 /* AdvSIMD modified immediate
7816  *  31  30   29  28                 19 18 16 15   12  11  10  9     5 4    0
7817  * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7818  * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh |  Rd  |
7819  * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7820  *
7821  * There are a number of operations that can be carried out here:
7822  *   MOVI - move (shifted) imm into register
7823  *   MVNI - move inverted (shifted) imm into register
7824  *   ORR  - bitwise OR of (shifted) imm with register
7825  *   BIC  - bitwise clear of (shifted) imm with register
7826  * With ARMv8.2 we also have:
7827  *   FMOV half-precision
7828  */
7829 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
7830 {
7831     int rd = extract32(insn, 0, 5);
7832     int cmode = extract32(insn, 12, 4);
7833     int o2 = extract32(insn, 11, 1);
7834     uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
7835     bool is_neg = extract32(insn, 29, 1);
7836     bool is_q = extract32(insn, 30, 1);
7837     uint64_t imm = 0;
7838 
7839     if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
7840         /* Check for FMOV (vector, immediate) - half-precision */
7841         if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
7842             unallocated_encoding(s);
7843             return;
7844         }
7845     }
7846 
7847     if (!fp_access_check(s)) {
7848         return;
7849     }
7850 
7851     if (cmode == 15 && o2 && !is_neg) {
7852         /* FMOV (vector, immediate) - half-precision */
7853         imm = vfp_expand_imm(MO_16, abcdefgh);
7854         /* now duplicate across the lanes */
7855         imm = dup_const(MO_16, imm);
7856     } else {
7857         imm = asimd_imm_const(abcdefgh, cmode, is_neg);
7858     }
7859 
7860     if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
7861         /* MOVI or MVNI, with MVNI negation handled above.  */
7862         tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
7863                              vec_full_reg_size(s), imm);
7864     } else {
7865         /* ORR or BIC, with BIC negation to AND handled above.  */
7866         if (is_neg) {
7867             gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
7868         } else {
7869             gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
7870         }
7871     }
7872 }
7873 
7874 /* AdvSIMD scalar copy
7875  *  31 30  29  28             21 20  16 15  14  11 10  9    5 4    0
7876  * +-----+----+-----------------+------+---+------+---+------+------+
7877  * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
7878  * +-----+----+-----------------+------+---+------+---+------+------+
7879  */
7880 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
7881 {
7882     int rd = extract32(insn, 0, 5);
7883     int rn = extract32(insn, 5, 5);
7884     int imm4 = extract32(insn, 11, 4);
7885     int imm5 = extract32(insn, 16, 5);
7886     int op = extract32(insn, 29, 1);
7887 
7888     if (op != 0 || imm4 != 0) {
7889         unallocated_encoding(s);
7890         return;
7891     }
7892 
7893     /* DUP (element, scalar) */
7894     handle_simd_dupes(s, rd, rn, imm5);
7895 }
7896 
7897 /* AdvSIMD scalar pairwise
7898  *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
7899  * +-----+---+-----------+------+-----------+--------+-----+------+------+
7900  * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
7901  * +-----+---+-----------+------+-----------+--------+-----+------+------+
7902  */
7903 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
7904 {
7905     int u = extract32(insn, 29, 1);
7906     int size = extract32(insn, 22, 2);
7907     int opcode = extract32(insn, 12, 5);
7908     int rn = extract32(insn, 5, 5);
7909     int rd = extract32(insn, 0, 5);
7910     TCGv_ptr fpst;
7911 
7912     /* For some ops (the FP ones), size[1] is part of the encoding.
7913      * For ADDP strictly it is not but size[1] is always 1 for valid
7914      * encodings.
7915      */
7916     opcode |= (extract32(size, 1, 1) << 5);
7917 
7918     switch (opcode) {
7919     case 0x3b: /* ADDP */
7920         if (u || size != 3) {
7921             unallocated_encoding(s);
7922             return;
7923         }
7924         if (!fp_access_check(s)) {
7925             return;
7926         }
7927 
7928         fpst = NULL;
7929         break;
7930     case 0xc: /* FMAXNMP */
7931     case 0xd: /* FADDP */
7932     case 0xf: /* FMAXP */
7933     case 0x2c: /* FMINNMP */
7934     case 0x2f: /* FMINP */
7935         /* FP op, size[0] is 32 or 64 bit*/
7936         if (!u) {
7937             if (!dc_isar_feature(aa64_fp16, s)) {
7938                 unallocated_encoding(s);
7939                 return;
7940             } else {
7941                 size = MO_16;
7942             }
7943         } else {
7944             size = extract32(size, 0, 1) ? MO_64 : MO_32;
7945         }
7946 
7947         if (!fp_access_check(s)) {
7948             return;
7949         }
7950 
7951         fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7952         break;
7953     default:
7954         unallocated_encoding(s);
7955         return;
7956     }
7957 
7958     if (size == MO_64) {
7959         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7960         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7961         TCGv_i64 tcg_res = tcg_temp_new_i64();
7962 
7963         read_vec_element(s, tcg_op1, rn, 0, MO_64);
7964         read_vec_element(s, tcg_op2, rn, 1, MO_64);
7965 
7966         switch (opcode) {
7967         case 0x3b: /* ADDP */
7968             tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
7969             break;
7970         case 0xc: /* FMAXNMP */
7971             gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7972             break;
7973         case 0xd: /* FADDP */
7974             gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7975             break;
7976         case 0xf: /* FMAXP */
7977             gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7978             break;
7979         case 0x2c: /* FMINNMP */
7980             gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7981             break;
7982         case 0x2f: /* FMINP */
7983             gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7984             break;
7985         default:
7986             g_assert_not_reached();
7987         }
7988 
7989         write_fp_dreg(s, rd, tcg_res);
7990     } else {
7991         TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7992         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7993         TCGv_i32 tcg_res = tcg_temp_new_i32();
7994 
7995         read_vec_element_i32(s, tcg_op1, rn, 0, size);
7996         read_vec_element_i32(s, tcg_op2, rn, 1, size);
7997 
7998         if (size == MO_16) {
7999             switch (opcode) {
8000             case 0xc: /* FMAXNMP */
8001                 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8002                 break;
8003             case 0xd: /* FADDP */
8004                 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
8005                 break;
8006             case 0xf: /* FMAXP */
8007                 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
8008                 break;
8009             case 0x2c: /* FMINNMP */
8010                 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
8011                 break;
8012             case 0x2f: /* FMINP */
8013                 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
8014                 break;
8015             default:
8016                 g_assert_not_reached();
8017             }
8018         } else {
8019             switch (opcode) {
8020             case 0xc: /* FMAXNMP */
8021                 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8022                 break;
8023             case 0xd: /* FADDP */
8024                 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8025                 break;
8026             case 0xf: /* FMAXP */
8027                 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8028                 break;
8029             case 0x2c: /* FMINNMP */
8030                 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8031                 break;
8032             case 0x2f: /* FMINP */
8033                 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8034                 break;
8035             default:
8036                 g_assert_not_reached();
8037             }
8038         }
8039 
8040         write_fp_sreg(s, rd, tcg_res);
8041     }
8042 }
8043 
8044 /*
8045  * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
8046  *
8047  * This code is handles the common shifting code and is used by both
8048  * the vector and scalar code.
8049  */
8050 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
8051                                     TCGv_i64 tcg_rnd, bool accumulate,
8052                                     bool is_u, int size, int shift)
8053 {
8054     bool extended_result = false;
8055     bool round = tcg_rnd != NULL;
8056     int ext_lshift = 0;
8057     TCGv_i64 tcg_src_hi;
8058 
8059     if (round && size == 3) {
8060         extended_result = true;
8061         ext_lshift = 64 - shift;
8062         tcg_src_hi = tcg_temp_new_i64();
8063     } else if (shift == 64) {
8064         if (!accumulate && is_u) {
8065             /* result is zero */
8066             tcg_gen_movi_i64(tcg_res, 0);
8067             return;
8068         }
8069     }
8070 
8071     /* Deal with the rounding step */
8072     if (round) {
8073         if (extended_result) {
8074             TCGv_i64 tcg_zero = tcg_constant_i64(0);
8075             if (!is_u) {
8076                 /* take care of sign extending tcg_res */
8077                 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
8078                 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8079                                  tcg_src, tcg_src_hi,
8080                                  tcg_rnd, tcg_zero);
8081             } else {
8082                 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
8083                                  tcg_src, tcg_zero,
8084                                  tcg_rnd, tcg_zero);
8085             }
8086         } else {
8087             tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
8088         }
8089     }
8090 
8091     /* Now do the shift right */
8092     if (round && extended_result) {
8093         /* extended case, >64 bit precision required */
8094         if (ext_lshift == 0) {
8095             /* special case, only high bits matter */
8096             tcg_gen_mov_i64(tcg_src, tcg_src_hi);
8097         } else {
8098             tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8099             tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
8100             tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
8101         }
8102     } else {
8103         if (is_u) {
8104             if (shift == 64) {
8105                 /* essentially shifting in 64 zeros */
8106                 tcg_gen_movi_i64(tcg_src, 0);
8107             } else {
8108                 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
8109             }
8110         } else {
8111             if (shift == 64) {
8112                 /* effectively extending the sign-bit */
8113                 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
8114             } else {
8115                 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
8116             }
8117         }
8118     }
8119 
8120     if (accumulate) {
8121         tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
8122     } else {
8123         tcg_gen_mov_i64(tcg_res, tcg_src);
8124     }
8125 }
8126 
8127 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
8128 static void handle_scalar_simd_shri(DisasContext *s,
8129                                     bool is_u, int immh, int immb,
8130                                     int opcode, int rn, int rd)
8131 {
8132     const int size = 3;
8133     int immhb = immh << 3 | immb;
8134     int shift = 2 * (8 << size) - immhb;
8135     bool accumulate = false;
8136     bool round = false;
8137     bool insert = false;
8138     TCGv_i64 tcg_rn;
8139     TCGv_i64 tcg_rd;
8140     TCGv_i64 tcg_round;
8141 
8142     if (!extract32(immh, 3, 1)) {
8143         unallocated_encoding(s);
8144         return;
8145     }
8146 
8147     if (!fp_access_check(s)) {
8148         return;
8149     }
8150 
8151     switch (opcode) {
8152     case 0x02: /* SSRA / USRA (accumulate) */
8153         accumulate = true;
8154         break;
8155     case 0x04: /* SRSHR / URSHR (rounding) */
8156         round = true;
8157         break;
8158     case 0x06: /* SRSRA / URSRA (accum + rounding) */
8159         accumulate = round = true;
8160         break;
8161     case 0x08: /* SRI */
8162         insert = true;
8163         break;
8164     }
8165 
8166     if (round) {
8167         tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8168     } else {
8169         tcg_round = NULL;
8170     }
8171 
8172     tcg_rn = read_fp_dreg(s, rn);
8173     tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8174 
8175     if (insert) {
8176         /* shift count same as element size is valid but does nothing;
8177          * special case to avoid potential shift by 64.
8178          */
8179         int esize = 8 << size;
8180         if (shift != esize) {
8181             tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
8182             tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
8183         }
8184     } else {
8185         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8186                                 accumulate, is_u, size, shift);
8187     }
8188 
8189     write_fp_dreg(s, rd, tcg_rd);
8190 }
8191 
8192 /* SHL/SLI - Scalar shift left */
8193 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8194                                     int immh, int immb, int opcode,
8195                                     int rn, int rd)
8196 {
8197     int size = 32 - clz32(immh) - 1;
8198     int immhb = immh << 3 | immb;
8199     int shift = immhb - (8 << size);
8200     TCGv_i64 tcg_rn;
8201     TCGv_i64 tcg_rd;
8202 
8203     if (!extract32(immh, 3, 1)) {
8204         unallocated_encoding(s);
8205         return;
8206     }
8207 
8208     if (!fp_access_check(s)) {
8209         return;
8210     }
8211 
8212     tcg_rn = read_fp_dreg(s, rn);
8213     tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8214 
8215     if (insert) {
8216         tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8217     } else {
8218         tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8219     }
8220 
8221     write_fp_dreg(s, rd, tcg_rd);
8222 }
8223 
8224 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8225  * (signed/unsigned) narrowing */
8226 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8227                                    bool is_u_shift, bool is_u_narrow,
8228                                    int immh, int immb, int opcode,
8229                                    int rn, int rd)
8230 {
8231     int immhb = immh << 3 | immb;
8232     int size = 32 - clz32(immh) - 1;
8233     int esize = 8 << size;
8234     int shift = (2 * esize) - immhb;
8235     int elements = is_scalar ? 1 : (64 / esize);
8236     bool round = extract32(opcode, 0, 1);
8237     MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8238     TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8239     TCGv_i32 tcg_rd_narrowed;
8240     TCGv_i64 tcg_final;
8241 
8242     static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8243         { gen_helper_neon_narrow_sat_s8,
8244           gen_helper_neon_unarrow_sat8 },
8245         { gen_helper_neon_narrow_sat_s16,
8246           gen_helper_neon_unarrow_sat16 },
8247         { gen_helper_neon_narrow_sat_s32,
8248           gen_helper_neon_unarrow_sat32 },
8249         { NULL, NULL },
8250     };
8251     static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8252         gen_helper_neon_narrow_sat_u8,
8253         gen_helper_neon_narrow_sat_u16,
8254         gen_helper_neon_narrow_sat_u32,
8255         NULL
8256     };
8257     NeonGenNarrowEnvFn *narrowfn;
8258 
8259     int i;
8260 
8261     assert(size < 4);
8262 
8263     if (extract32(immh, 3, 1)) {
8264         unallocated_encoding(s);
8265         return;
8266     }
8267 
8268     if (!fp_access_check(s)) {
8269         return;
8270     }
8271 
8272     if (is_u_shift) {
8273         narrowfn = unsigned_narrow_fns[size];
8274     } else {
8275         narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8276     }
8277 
8278     tcg_rn = tcg_temp_new_i64();
8279     tcg_rd = tcg_temp_new_i64();
8280     tcg_rd_narrowed = tcg_temp_new_i32();
8281     tcg_final = tcg_temp_new_i64();
8282 
8283     if (round) {
8284         tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8285     } else {
8286         tcg_round = NULL;
8287     }
8288 
8289     for (i = 0; i < elements; i++) {
8290         read_vec_element(s, tcg_rn, rn, i, ldop);
8291         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8292                                 false, is_u_shift, size+1, shift);
8293         narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8294         tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8295         if (i == 0) {
8296             tcg_gen_mov_i64(tcg_final, tcg_rd);
8297         } else {
8298             tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8299         }
8300     }
8301 
8302     if (!is_q) {
8303         write_vec_element(s, tcg_final, rd, 0, MO_64);
8304     } else {
8305         write_vec_element(s, tcg_final, rd, 1, MO_64);
8306     }
8307     clear_vec_high(s, is_q, rd);
8308 }
8309 
8310 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8311 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8312                              bool src_unsigned, bool dst_unsigned,
8313                              int immh, int immb, int rn, int rd)
8314 {
8315     int immhb = immh << 3 | immb;
8316     int size = 32 - clz32(immh) - 1;
8317     int shift = immhb - (8 << size);
8318     int pass;
8319 
8320     assert(immh != 0);
8321     assert(!(scalar && is_q));
8322 
8323     if (!scalar) {
8324         if (!is_q && extract32(immh, 3, 1)) {
8325             unallocated_encoding(s);
8326             return;
8327         }
8328 
8329         /* Since we use the variable-shift helpers we must
8330          * replicate the shift count into each element of
8331          * the tcg_shift value.
8332          */
8333         switch (size) {
8334         case 0:
8335             shift |= shift << 8;
8336             /* fall through */
8337         case 1:
8338             shift |= shift << 16;
8339             break;
8340         case 2:
8341         case 3:
8342             break;
8343         default:
8344             g_assert_not_reached();
8345         }
8346     }
8347 
8348     if (!fp_access_check(s)) {
8349         return;
8350     }
8351 
8352     if (size == 3) {
8353         TCGv_i64 tcg_shift = tcg_constant_i64(shift);
8354         static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8355             { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8356             { NULL, gen_helper_neon_qshl_u64 },
8357         };
8358         NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8359         int maxpass = is_q ? 2 : 1;
8360 
8361         for (pass = 0; pass < maxpass; pass++) {
8362             TCGv_i64 tcg_op = tcg_temp_new_i64();
8363 
8364             read_vec_element(s, tcg_op, rn, pass, MO_64);
8365             genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8366             write_vec_element(s, tcg_op, rd, pass, MO_64);
8367         }
8368         clear_vec_high(s, is_q, rd);
8369     } else {
8370         TCGv_i32 tcg_shift = tcg_constant_i32(shift);
8371         static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8372             {
8373                 { gen_helper_neon_qshl_s8,
8374                   gen_helper_neon_qshl_s16,
8375                   gen_helper_neon_qshl_s32 },
8376                 { gen_helper_neon_qshlu_s8,
8377                   gen_helper_neon_qshlu_s16,
8378                   gen_helper_neon_qshlu_s32 }
8379             }, {
8380                 { NULL, NULL, NULL },
8381                 { gen_helper_neon_qshl_u8,
8382                   gen_helper_neon_qshl_u16,
8383                   gen_helper_neon_qshl_u32 }
8384             }
8385         };
8386         NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8387         MemOp memop = scalar ? size : MO_32;
8388         int maxpass = scalar ? 1 : is_q ? 4 : 2;
8389 
8390         for (pass = 0; pass < maxpass; pass++) {
8391             TCGv_i32 tcg_op = tcg_temp_new_i32();
8392 
8393             read_vec_element_i32(s, tcg_op, rn, pass, memop);
8394             genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8395             if (scalar) {
8396                 switch (size) {
8397                 case 0:
8398                     tcg_gen_ext8u_i32(tcg_op, tcg_op);
8399                     break;
8400                 case 1:
8401                     tcg_gen_ext16u_i32(tcg_op, tcg_op);
8402                     break;
8403                 case 2:
8404                     break;
8405                 default:
8406                     g_assert_not_reached();
8407                 }
8408                 write_fp_sreg(s, rd, tcg_op);
8409             } else {
8410                 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8411             }
8412         }
8413 
8414         if (!scalar) {
8415             clear_vec_high(s, is_q, rd);
8416         }
8417     }
8418 }
8419 
8420 /* Common vector code for handling integer to FP conversion */
8421 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8422                                    int elements, int is_signed,
8423                                    int fracbits, int size)
8424 {
8425     TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8426     TCGv_i32 tcg_shift = NULL;
8427 
8428     MemOp mop = size | (is_signed ? MO_SIGN : 0);
8429     int pass;
8430 
8431     if (fracbits || size == MO_64) {
8432         tcg_shift = tcg_constant_i32(fracbits);
8433     }
8434 
8435     if (size == MO_64) {
8436         TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8437         TCGv_i64 tcg_double = tcg_temp_new_i64();
8438 
8439         for (pass = 0; pass < elements; pass++) {
8440             read_vec_element(s, tcg_int64, rn, pass, mop);
8441 
8442             if (is_signed) {
8443                 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8444                                      tcg_shift, tcg_fpst);
8445             } else {
8446                 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8447                                      tcg_shift, tcg_fpst);
8448             }
8449             if (elements == 1) {
8450                 write_fp_dreg(s, rd, tcg_double);
8451             } else {
8452                 write_vec_element(s, tcg_double, rd, pass, MO_64);
8453             }
8454         }
8455     } else {
8456         TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8457         TCGv_i32 tcg_float = tcg_temp_new_i32();
8458 
8459         for (pass = 0; pass < elements; pass++) {
8460             read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8461 
8462             switch (size) {
8463             case MO_32:
8464                 if (fracbits) {
8465                     if (is_signed) {
8466                         gen_helper_vfp_sltos(tcg_float, tcg_int32,
8467                                              tcg_shift, tcg_fpst);
8468                     } else {
8469                         gen_helper_vfp_ultos(tcg_float, tcg_int32,
8470                                              tcg_shift, tcg_fpst);
8471                     }
8472                 } else {
8473                     if (is_signed) {
8474                         gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8475                     } else {
8476                         gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8477                     }
8478                 }
8479                 break;
8480             case MO_16:
8481                 if (fracbits) {
8482                     if (is_signed) {
8483                         gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8484                                              tcg_shift, tcg_fpst);
8485                     } else {
8486                         gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8487                                              tcg_shift, tcg_fpst);
8488                     }
8489                 } else {
8490                     if (is_signed) {
8491                         gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8492                     } else {
8493                         gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8494                     }
8495                 }
8496                 break;
8497             default:
8498                 g_assert_not_reached();
8499             }
8500 
8501             if (elements == 1) {
8502                 write_fp_sreg(s, rd, tcg_float);
8503             } else {
8504                 write_vec_element_i32(s, tcg_float, rd, pass, size);
8505             }
8506         }
8507     }
8508 
8509     clear_vec_high(s, elements << size == 16, rd);
8510 }
8511 
8512 /* UCVTF/SCVTF - Integer to FP conversion */
8513 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8514                                          bool is_q, bool is_u,
8515                                          int immh, int immb, int opcode,
8516                                          int rn, int rd)
8517 {
8518     int size, elements, fracbits;
8519     int immhb = immh << 3 | immb;
8520 
8521     if (immh & 8) {
8522         size = MO_64;
8523         if (!is_scalar && !is_q) {
8524             unallocated_encoding(s);
8525             return;
8526         }
8527     } else if (immh & 4) {
8528         size = MO_32;
8529     } else if (immh & 2) {
8530         size = MO_16;
8531         if (!dc_isar_feature(aa64_fp16, s)) {
8532             unallocated_encoding(s);
8533             return;
8534         }
8535     } else {
8536         /* immh == 0 would be a failure of the decode logic */
8537         g_assert(immh == 1);
8538         unallocated_encoding(s);
8539         return;
8540     }
8541 
8542     if (is_scalar) {
8543         elements = 1;
8544     } else {
8545         elements = (8 << is_q) >> size;
8546     }
8547     fracbits = (16 << size) - immhb;
8548 
8549     if (!fp_access_check(s)) {
8550         return;
8551     }
8552 
8553     handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
8554 }
8555 
8556 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8557 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
8558                                          bool is_q, bool is_u,
8559                                          int immh, int immb, int rn, int rd)
8560 {
8561     int immhb = immh << 3 | immb;
8562     int pass, size, fracbits;
8563     TCGv_ptr tcg_fpstatus;
8564     TCGv_i32 tcg_rmode, tcg_shift;
8565 
8566     if (immh & 0x8) {
8567         size = MO_64;
8568         if (!is_scalar && !is_q) {
8569             unallocated_encoding(s);
8570             return;
8571         }
8572     } else if (immh & 0x4) {
8573         size = MO_32;
8574     } else if (immh & 0x2) {
8575         size = MO_16;
8576         if (!dc_isar_feature(aa64_fp16, s)) {
8577             unallocated_encoding(s);
8578             return;
8579         }
8580     } else {
8581         /* Should have split out AdvSIMD modified immediate earlier.  */
8582         assert(immh == 1);
8583         unallocated_encoding(s);
8584         return;
8585     }
8586 
8587     if (!fp_access_check(s)) {
8588         return;
8589     }
8590 
8591     assert(!(is_scalar && is_q));
8592 
8593     tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8594     tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus);
8595     fracbits = (16 << size) - immhb;
8596     tcg_shift = tcg_constant_i32(fracbits);
8597 
8598     if (size == MO_64) {
8599         int maxpass = is_scalar ? 1 : 2;
8600 
8601         for (pass = 0; pass < maxpass; pass++) {
8602             TCGv_i64 tcg_op = tcg_temp_new_i64();
8603 
8604             read_vec_element(s, tcg_op, rn, pass, MO_64);
8605             if (is_u) {
8606                 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8607             } else {
8608                 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8609             }
8610             write_vec_element(s, tcg_op, rd, pass, MO_64);
8611         }
8612         clear_vec_high(s, is_q, rd);
8613     } else {
8614         void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
8615         int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
8616 
8617         switch (size) {
8618         case MO_16:
8619             if (is_u) {
8620                 fn = gen_helper_vfp_touhh;
8621             } else {
8622                 fn = gen_helper_vfp_toshh;
8623             }
8624             break;
8625         case MO_32:
8626             if (is_u) {
8627                 fn = gen_helper_vfp_touls;
8628             } else {
8629                 fn = gen_helper_vfp_tosls;
8630             }
8631             break;
8632         default:
8633             g_assert_not_reached();
8634         }
8635 
8636         for (pass = 0; pass < maxpass; pass++) {
8637             TCGv_i32 tcg_op = tcg_temp_new_i32();
8638 
8639             read_vec_element_i32(s, tcg_op, rn, pass, size);
8640             fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8641             if (is_scalar) {
8642                 write_fp_sreg(s, rd, tcg_op);
8643             } else {
8644                 write_vec_element_i32(s, tcg_op, rd, pass, size);
8645             }
8646         }
8647         if (!is_scalar) {
8648             clear_vec_high(s, is_q, rd);
8649         }
8650     }
8651 
8652     gen_restore_rmode(tcg_rmode, tcg_fpstatus);
8653 }
8654 
8655 /* AdvSIMD scalar shift by immediate
8656  *  31 30  29 28         23 22  19 18  16 15    11  10 9    5 4    0
8657  * +-----+---+-------------+------+------+--------+---+------+------+
8658  * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
8659  * +-----+---+-------------+------+------+--------+---+------+------+
8660  *
8661  * This is the scalar version so it works on a fixed sized registers
8662  */
8663 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
8664 {
8665     int rd = extract32(insn, 0, 5);
8666     int rn = extract32(insn, 5, 5);
8667     int opcode = extract32(insn, 11, 5);
8668     int immb = extract32(insn, 16, 3);
8669     int immh = extract32(insn, 19, 4);
8670     bool is_u = extract32(insn, 29, 1);
8671 
8672     if (immh == 0) {
8673         unallocated_encoding(s);
8674         return;
8675     }
8676 
8677     switch (opcode) {
8678     case 0x08: /* SRI */
8679         if (!is_u) {
8680             unallocated_encoding(s);
8681             return;
8682         }
8683         /* fall through */
8684     case 0x00: /* SSHR / USHR */
8685     case 0x02: /* SSRA / USRA */
8686     case 0x04: /* SRSHR / URSHR */
8687     case 0x06: /* SRSRA / URSRA */
8688         handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
8689         break;
8690     case 0x0a: /* SHL / SLI */
8691         handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
8692         break;
8693     case 0x1c: /* SCVTF, UCVTF */
8694         handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
8695                                      opcode, rn, rd);
8696         break;
8697     case 0x10: /* SQSHRUN, SQSHRUN2 */
8698     case 0x11: /* SQRSHRUN, SQRSHRUN2 */
8699         if (!is_u) {
8700             unallocated_encoding(s);
8701             return;
8702         }
8703         handle_vec_simd_sqshrn(s, true, false, false, true,
8704                                immh, immb, opcode, rn, rd);
8705         break;
8706     case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
8707     case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
8708         handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
8709                                immh, immb, opcode, rn, rd);
8710         break;
8711     case 0xc: /* SQSHLU */
8712         if (!is_u) {
8713             unallocated_encoding(s);
8714             return;
8715         }
8716         handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
8717         break;
8718     case 0xe: /* SQSHL, UQSHL */
8719         handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
8720         break;
8721     case 0x1f: /* FCVTZS, FCVTZU */
8722         handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
8723         break;
8724     default:
8725         unallocated_encoding(s);
8726         break;
8727     }
8728 }
8729 
8730 /* AdvSIMD scalar three different
8731  *  31 30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
8732  * +-----+---+-----------+------+---+------+--------+-----+------+------+
8733  * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
8734  * +-----+---+-----------+------+---+------+--------+-----+------+------+
8735  */
8736 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
8737 {
8738     bool is_u = extract32(insn, 29, 1);
8739     int size = extract32(insn, 22, 2);
8740     int opcode = extract32(insn, 12, 4);
8741     int rm = extract32(insn, 16, 5);
8742     int rn = extract32(insn, 5, 5);
8743     int rd = extract32(insn, 0, 5);
8744 
8745     if (is_u) {
8746         unallocated_encoding(s);
8747         return;
8748     }
8749 
8750     switch (opcode) {
8751     case 0x9: /* SQDMLAL, SQDMLAL2 */
8752     case 0xb: /* SQDMLSL, SQDMLSL2 */
8753     case 0xd: /* SQDMULL, SQDMULL2 */
8754         if (size == 0 || size == 3) {
8755             unallocated_encoding(s);
8756             return;
8757         }
8758         break;
8759     default:
8760         unallocated_encoding(s);
8761         return;
8762     }
8763 
8764     if (!fp_access_check(s)) {
8765         return;
8766     }
8767 
8768     if (size == 2) {
8769         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8770         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8771         TCGv_i64 tcg_res = tcg_temp_new_i64();
8772 
8773         read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
8774         read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
8775 
8776         tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
8777         gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
8778 
8779         switch (opcode) {
8780         case 0xd: /* SQDMULL, SQDMULL2 */
8781             break;
8782         case 0xb: /* SQDMLSL, SQDMLSL2 */
8783             tcg_gen_neg_i64(tcg_res, tcg_res);
8784             /* fall through */
8785         case 0x9: /* SQDMLAL, SQDMLAL2 */
8786             read_vec_element(s, tcg_op1, rd, 0, MO_64);
8787             gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
8788                                               tcg_res, tcg_op1);
8789             break;
8790         default:
8791             g_assert_not_reached();
8792         }
8793 
8794         write_fp_dreg(s, rd, tcg_res);
8795     } else {
8796         TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
8797         TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
8798         TCGv_i64 tcg_res = tcg_temp_new_i64();
8799 
8800         gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
8801         gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
8802 
8803         switch (opcode) {
8804         case 0xd: /* SQDMULL, SQDMULL2 */
8805             break;
8806         case 0xb: /* SQDMLSL, SQDMLSL2 */
8807             gen_helper_neon_negl_u32(tcg_res, tcg_res);
8808             /* fall through */
8809         case 0x9: /* SQDMLAL, SQDMLAL2 */
8810         {
8811             TCGv_i64 tcg_op3 = tcg_temp_new_i64();
8812             read_vec_element(s, tcg_op3, rd, 0, MO_32);
8813             gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
8814                                               tcg_res, tcg_op3);
8815             break;
8816         }
8817         default:
8818             g_assert_not_reached();
8819         }
8820 
8821         tcg_gen_ext32u_i64(tcg_res, tcg_res);
8822         write_fp_dreg(s, rd, tcg_res);
8823     }
8824 }
8825 
8826 static void handle_3same_64(DisasContext *s, int opcode, bool u,
8827                             TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
8828 {
8829     /* Handle 64x64->64 opcodes which are shared between the scalar
8830      * and vector 3-same groups. We cover every opcode where size == 3
8831      * is valid in either the three-reg-same (integer, not pairwise)
8832      * or scalar-three-reg-same groups.
8833      */
8834     TCGCond cond;
8835 
8836     switch (opcode) {
8837     case 0x1: /* SQADD */
8838         if (u) {
8839             gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8840         } else {
8841             gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8842         }
8843         break;
8844     case 0x5: /* SQSUB */
8845         if (u) {
8846             gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8847         } else {
8848             gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8849         }
8850         break;
8851     case 0x6: /* CMGT, CMHI */
8852         /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
8853          * We implement this using setcond (test) and then negating.
8854          */
8855         cond = u ? TCG_COND_GTU : TCG_COND_GT;
8856     do_cmop:
8857         tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
8858         tcg_gen_neg_i64(tcg_rd, tcg_rd);
8859         break;
8860     case 0x7: /* CMGE, CMHS */
8861         cond = u ? TCG_COND_GEU : TCG_COND_GE;
8862         goto do_cmop;
8863     case 0x11: /* CMTST, CMEQ */
8864         if (u) {
8865             cond = TCG_COND_EQ;
8866             goto do_cmop;
8867         }
8868         gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
8869         break;
8870     case 0x8: /* SSHL, USHL */
8871         if (u) {
8872             gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
8873         } else {
8874             gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
8875         }
8876         break;
8877     case 0x9: /* SQSHL, UQSHL */
8878         if (u) {
8879             gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8880         } else {
8881             gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8882         }
8883         break;
8884     case 0xa: /* SRSHL, URSHL */
8885         if (u) {
8886             gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
8887         } else {
8888             gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
8889         }
8890         break;
8891     case 0xb: /* SQRSHL, UQRSHL */
8892         if (u) {
8893             gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8894         } else {
8895             gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8896         }
8897         break;
8898     case 0x10: /* ADD, SUB */
8899         if (u) {
8900             tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
8901         } else {
8902             tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
8903         }
8904         break;
8905     default:
8906         g_assert_not_reached();
8907     }
8908 }
8909 
8910 /* Handle the 3-same-operands float operations; shared by the scalar
8911  * and vector encodings. The caller must filter out any encodings
8912  * not allocated for the encoding it is dealing with.
8913  */
8914 static void handle_3same_float(DisasContext *s, int size, int elements,
8915                                int fpopcode, int rd, int rn, int rm)
8916 {
8917     int pass;
8918     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
8919 
8920     for (pass = 0; pass < elements; pass++) {
8921         if (size) {
8922             /* Double */
8923             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8924             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8925             TCGv_i64 tcg_res = tcg_temp_new_i64();
8926 
8927             read_vec_element(s, tcg_op1, rn, pass, MO_64);
8928             read_vec_element(s, tcg_op2, rm, pass, MO_64);
8929 
8930             switch (fpopcode) {
8931             case 0x39: /* FMLS */
8932                 /* As usual for ARM, separate negation for fused multiply-add */
8933                 gen_helper_vfp_negd(tcg_op1, tcg_op1);
8934                 /* fall through */
8935             case 0x19: /* FMLA */
8936                 read_vec_element(s, tcg_res, rd, pass, MO_64);
8937                 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
8938                                        tcg_res, fpst);
8939                 break;
8940             case 0x18: /* FMAXNM */
8941                 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8942                 break;
8943             case 0x1a: /* FADD */
8944                 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8945                 break;
8946             case 0x1b: /* FMULX */
8947                 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
8948                 break;
8949             case 0x1c: /* FCMEQ */
8950                 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8951                 break;
8952             case 0x1e: /* FMAX */
8953                 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8954                 break;
8955             case 0x1f: /* FRECPS */
8956                 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8957                 break;
8958             case 0x38: /* FMINNM */
8959                 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8960                 break;
8961             case 0x3a: /* FSUB */
8962                 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8963                 break;
8964             case 0x3e: /* FMIN */
8965                 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8966                 break;
8967             case 0x3f: /* FRSQRTS */
8968                 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8969                 break;
8970             case 0x5b: /* FMUL */
8971                 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
8972                 break;
8973             case 0x5c: /* FCMGE */
8974                 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8975                 break;
8976             case 0x5d: /* FACGE */
8977                 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8978                 break;
8979             case 0x5f: /* FDIV */
8980                 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
8981                 break;
8982             case 0x7a: /* FABD */
8983                 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8984                 gen_helper_vfp_absd(tcg_res, tcg_res);
8985                 break;
8986             case 0x7c: /* FCMGT */
8987                 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8988                 break;
8989             case 0x7d: /* FACGT */
8990                 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8991                 break;
8992             default:
8993                 g_assert_not_reached();
8994             }
8995 
8996             write_vec_element(s, tcg_res, rd, pass, MO_64);
8997         } else {
8998             /* Single */
8999             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9000             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9001             TCGv_i32 tcg_res = tcg_temp_new_i32();
9002 
9003             read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
9004             read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
9005 
9006             switch (fpopcode) {
9007             case 0x39: /* FMLS */
9008                 /* As usual for ARM, separate negation for fused multiply-add */
9009                 gen_helper_vfp_negs(tcg_op1, tcg_op1);
9010                 /* fall through */
9011             case 0x19: /* FMLA */
9012                 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9013                 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
9014                                        tcg_res, fpst);
9015                 break;
9016             case 0x1a: /* FADD */
9017                 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
9018                 break;
9019             case 0x1b: /* FMULX */
9020                 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
9021                 break;
9022             case 0x1c: /* FCMEQ */
9023                 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9024                 break;
9025             case 0x1e: /* FMAX */
9026                 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
9027                 break;
9028             case 0x1f: /* FRECPS */
9029                 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9030                 break;
9031             case 0x18: /* FMAXNM */
9032                 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
9033                 break;
9034             case 0x38: /* FMINNM */
9035                 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
9036                 break;
9037             case 0x3a: /* FSUB */
9038                 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9039                 break;
9040             case 0x3e: /* FMIN */
9041                 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
9042                 break;
9043             case 0x3f: /* FRSQRTS */
9044                 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9045                 break;
9046             case 0x5b: /* FMUL */
9047                 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
9048                 break;
9049             case 0x5c: /* FCMGE */
9050                 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9051                 break;
9052             case 0x5d: /* FACGE */
9053                 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9054                 break;
9055             case 0x5f: /* FDIV */
9056                 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
9057                 break;
9058             case 0x7a: /* FABD */
9059                 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
9060                 gen_helper_vfp_abss(tcg_res, tcg_res);
9061                 break;
9062             case 0x7c: /* FCMGT */
9063                 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9064                 break;
9065             case 0x7d: /* FACGT */
9066                 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
9067                 break;
9068             default:
9069                 g_assert_not_reached();
9070             }
9071 
9072             if (elements == 1) {
9073                 /* scalar single so clear high part */
9074                 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
9075 
9076                 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
9077                 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
9078             } else {
9079                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9080             }
9081         }
9082     }
9083 
9084     clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
9085 }
9086 
9087 /* AdvSIMD scalar three same
9088  *  31 30  29 28       24 23  22  21 20  16 15    11  10 9    5 4    0
9089  * +-----+---+-----------+------+---+------+--------+---+------+------+
9090  * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
9091  * +-----+---+-----------+------+---+------+--------+---+------+------+
9092  */
9093 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
9094 {
9095     int rd = extract32(insn, 0, 5);
9096     int rn = extract32(insn, 5, 5);
9097     int opcode = extract32(insn, 11, 5);
9098     int rm = extract32(insn, 16, 5);
9099     int size = extract32(insn, 22, 2);
9100     bool u = extract32(insn, 29, 1);
9101     TCGv_i64 tcg_rd;
9102 
9103     if (opcode >= 0x18) {
9104         /* Floating point: U, size[1] and opcode indicate operation */
9105         int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
9106         switch (fpopcode) {
9107         case 0x1b: /* FMULX */
9108         case 0x1f: /* FRECPS */
9109         case 0x3f: /* FRSQRTS */
9110         case 0x5d: /* FACGE */
9111         case 0x7d: /* FACGT */
9112         case 0x1c: /* FCMEQ */
9113         case 0x5c: /* FCMGE */
9114         case 0x7c: /* FCMGT */
9115         case 0x7a: /* FABD */
9116             break;
9117         default:
9118             unallocated_encoding(s);
9119             return;
9120         }
9121 
9122         if (!fp_access_check(s)) {
9123             return;
9124         }
9125 
9126         handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
9127         return;
9128     }
9129 
9130     switch (opcode) {
9131     case 0x1: /* SQADD, UQADD */
9132     case 0x5: /* SQSUB, UQSUB */
9133     case 0x9: /* SQSHL, UQSHL */
9134     case 0xb: /* SQRSHL, UQRSHL */
9135         break;
9136     case 0x8: /* SSHL, USHL */
9137     case 0xa: /* SRSHL, URSHL */
9138     case 0x6: /* CMGT, CMHI */
9139     case 0x7: /* CMGE, CMHS */
9140     case 0x11: /* CMTST, CMEQ */
9141     case 0x10: /* ADD, SUB (vector) */
9142         if (size != 3) {
9143             unallocated_encoding(s);
9144             return;
9145         }
9146         break;
9147     case 0x16: /* SQDMULH, SQRDMULH (vector) */
9148         if (size != 1 && size != 2) {
9149             unallocated_encoding(s);
9150             return;
9151         }
9152         break;
9153     default:
9154         unallocated_encoding(s);
9155         return;
9156     }
9157 
9158     if (!fp_access_check(s)) {
9159         return;
9160     }
9161 
9162     tcg_rd = tcg_temp_new_i64();
9163 
9164     if (size == 3) {
9165         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9166         TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
9167 
9168         handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
9169     } else {
9170         /* Do a single operation on the lowest element in the vector.
9171          * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
9172          * no side effects for all these operations.
9173          * OPTME: special-purpose helpers would avoid doing some
9174          * unnecessary work in the helper for the 8 and 16 bit cases.
9175          */
9176         NeonGenTwoOpEnvFn *genenvfn;
9177         TCGv_i32 tcg_rn = tcg_temp_new_i32();
9178         TCGv_i32 tcg_rm = tcg_temp_new_i32();
9179         TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
9180 
9181         read_vec_element_i32(s, tcg_rn, rn, 0, size);
9182         read_vec_element_i32(s, tcg_rm, rm, 0, size);
9183 
9184         switch (opcode) {
9185         case 0x1: /* SQADD, UQADD */
9186         {
9187             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9188                 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
9189                 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
9190                 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
9191             };
9192             genenvfn = fns[size][u];
9193             break;
9194         }
9195         case 0x5: /* SQSUB, UQSUB */
9196         {
9197             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9198                 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9199                 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9200                 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9201             };
9202             genenvfn = fns[size][u];
9203             break;
9204         }
9205         case 0x9: /* SQSHL, UQSHL */
9206         {
9207             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9208                 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9209                 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9210                 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9211             };
9212             genenvfn = fns[size][u];
9213             break;
9214         }
9215         case 0xb: /* SQRSHL, UQRSHL */
9216         {
9217             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9218                 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9219                 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9220                 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9221             };
9222             genenvfn = fns[size][u];
9223             break;
9224         }
9225         case 0x16: /* SQDMULH, SQRDMULH */
9226         {
9227             static NeonGenTwoOpEnvFn * const fns[2][2] = {
9228                 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9229                 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9230             };
9231             assert(size == 1 || size == 2);
9232             genenvfn = fns[size - 1][u];
9233             break;
9234         }
9235         default:
9236             g_assert_not_reached();
9237         }
9238 
9239         genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9240         tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9241     }
9242 
9243     write_fp_dreg(s, rd, tcg_rd);
9244 }
9245 
9246 /* AdvSIMD scalar three same FP16
9247  *  31 30  29 28       24 23  22 21 20  16 15 14 13    11 10  9  5 4  0
9248  * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9249  * | 0 1 | U | 1 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 | Rn | Rd |
9250  * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9251  * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9252  * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9253  */
9254 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9255                                                   uint32_t insn)
9256 {
9257     int rd = extract32(insn, 0, 5);
9258     int rn = extract32(insn, 5, 5);
9259     int opcode = extract32(insn, 11, 3);
9260     int rm = extract32(insn, 16, 5);
9261     bool u = extract32(insn, 29, 1);
9262     bool a = extract32(insn, 23, 1);
9263     int fpopcode = opcode | (a << 3) |  (u << 4);
9264     TCGv_ptr fpst;
9265     TCGv_i32 tcg_op1;
9266     TCGv_i32 tcg_op2;
9267     TCGv_i32 tcg_res;
9268 
9269     switch (fpopcode) {
9270     case 0x03: /* FMULX */
9271     case 0x04: /* FCMEQ (reg) */
9272     case 0x07: /* FRECPS */
9273     case 0x0f: /* FRSQRTS */
9274     case 0x14: /* FCMGE (reg) */
9275     case 0x15: /* FACGE */
9276     case 0x1a: /* FABD */
9277     case 0x1c: /* FCMGT (reg) */
9278     case 0x1d: /* FACGT */
9279         break;
9280     default:
9281         unallocated_encoding(s);
9282         return;
9283     }
9284 
9285     if (!dc_isar_feature(aa64_fp16, s)) {
9286         unallocated_encoding(s);
9287     }
9288 
9289     if (!fp_access_check(s)) {
9290         return;
9291     }
9292 
9293     fpst = fpstatus_ptr(FPST_FPCR_F16);
9294 
9295     tcg_op1 = read_fp_hreg(s, rn);
9296     tcg_op2 = read_fp_hreg(s, rm);
9297     tcg_res = tcg_temp_new_i32();
9298 
9299     switch (fpopcode) {
9300     case 0x03: /* FMULX */
9301         gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9302         break;
9303     case 0x04: /* FCMEQ (reg) */
9304         gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9305         break;
9306     case 0x07: /* FRECPS */
9307         gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9308         break;
9309     case 0x0f: /* FRSQRTS */
9310         gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9311         break;
9312     case 0x14: /* FCMGE (reg) */
9313         gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9314         break;
9315     case 0x15: /* FACGE */
9316         gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9317         break;
9318     case 0x1a: /* FABD */
9319         gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9320         tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9321         break;
9322     case 0x1c: /* FCMGT (reg) */
9323         gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9324         break;
9325     case 0x1d: /* FACGT */
9326         gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9327         break;
9328     default:
9329         g_assert_not_reached();
9330     }
9331 
9332     write_fp_sreg(s, rd, tcg_res);
9333 }
9334 
9335 /* AdvSIMD scalar three same extra
9336  *  31 30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
9337  * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9338  * | 0 1 | U | 1 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
9339  * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9340  */
9341 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9342                                                    uint32_t insn)
9343 {
9344     int rd = extract32(insn, 0, 5);
9345     int rn = extract32(insn, 5, 5);
9346     int opcode = extract32(insn, 11, 4);
9347     int rm = extract32(insn, 16, 5);
9348     int size = extract32(insn, 22, 2);
9349     bool u = extract32(insn, 29, 1);
9350     TCGv_i32 ele1, ele2, ele3;
9351     TCGv_i64 res;
9352     bool feature;
9353 
9354     switch (u * 16 + opcode) {
9355     case 0x10: /* SQRDMLAH (vector) */
9356     case 0x11: /* SQRDMLSH (vector) */
9357         if (size != 1 && size != 2) {
9358             unallocated_encoding(s);
9359             return;
9360         }
9361         feature = dc_isar_feature(aa64_rdm, s);
9362         break;
9363     default:
9364         unallocated_encoding(s);
9365         return;
9366     }
9367     if (!feature) {
9368         unallocated_encoding(s);
9369         return;
9370     }
9371     if (!fp_access_check(s)) {
9372         return;
9373     }
9374 
9375     /* Do a single operation on the lowest element in the vector.
9376      * We use the standard Neon helpers and rely on 0 OP 0 == 0
9377      * with no side effects for all these operations.
9378      * OPTME: special-purpose helpers would avoid doing some
9379      * unnecessary work in the helper for the 16 bit cases.
9380      */
9381     ele1 = tcg_temp_new_i32();
9382     ele2 = tcg_temp_new_i32();
9383     ele3 = tcg_temp_new_i32();
9384 
9385     read_vec_element_i32(s, ele1, rn, 0, size);
9386     read_vec_element_i32(s, ele2, rm, 0, size);
9387     read_vec_element_i32(s, ele3, rd, 0, size);
9388 
9389     switch (opcode) {
9390     case 0x0: /* SQRDMLAH */
9391         if (size == 1) {
9392             gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9393         } else {
9394             gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9395         }
9396         break;
9397     case 0x1: /* SQRDMLSH */
9398         if (size == 1) {
9399             gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9400         } else {
9401             gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9402         }
9403         break;
9404     default:
9405         g_assert_not_reached();
9406     }
9407 
9408     res = tcg_temp_new_i64();
9409     tcg_gen_extu_i32_i64(res, ele3);
9410     write_fp_dreg(s, rd, res);
9411 }
9412 
9413 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9414                             TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9415                             TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9416 {
9417     /* Handle 64->64 opcodes which are shared between the scalar and
9418      * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9419      * is valid in either group and also the double-precision fp ops.
9420      * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9421      * requires them.
9422      */
9423     TCGCond cond;
9424 
9425     switch (opcode) {
9426     case 0x4: /* CLS, CLZ */
9427         if (u) {
9428             tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9429         } else {
9430             tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9431         }
9432         break;
9433     case 0x5: /* NOT */
9434         /* This opcode is shared with CNT and RBIT but we have earlier
9435          * enforced that size == 3 if and only if this is the NOT insn.
9436          */
9437         tcg_gen_not_i64(tcg_rd, tcg_rn);
9438         break;
9439     case 0x7: /* SQABS, SQNEG */
9440         if (u) {
9441             gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9442         } else {
9443             gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9444         }
9445         break;
9446     case 0xa: /* CMLT */
9447         /* 64 bit integer comparison against zero, result is
9448          * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9449          * subtracting 1.
9450          */
9451         cond = TCG_COND_LT;
9452     do_cmop:
9453         tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9454         tcg_gen_neg_i64(tcg_rd, tcg_rd);
9455         break;
9456     case 0x8: /* CMGT, CMGE */
9457         cond = u ? TCG_COND_GE : TCG_COND_GT;
9458         goto do_cmop;
9459     case 0x9: /* CMEQ, CMLE */
9460         cond = u ? TCG_COND_LE : TCG_COND_EQ;
9461         goto do_cmop;
9462     case 0xb: /* ABS, NEG */
9463         if (u) {
9464             tcg_gen_neg_i64(tcg_rd, tcg_rn);
9465         } else {
9466             tcg_gen_abs_i64(tcg_rd, tcg_rn);
9467         }
9468         break;
9469     case 0x2f: /* FABS */
9470         gen_helper_vfp_absd(tcg_rd, tcg_rn);
9471         break;
9472     case 0x6f: /* FNEG */
9473         gen_helper_vfp_negd(tcg_rd, tcg_rn);
9474         break;
9475     case 0x7f: /* FSQRT */
9476         gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9477         break;
9478     case 0x1a: /* FCVTNS */
9479     case 0x1b: /* FCVTMS */
9480     case 0x1c: /* FCVTAS */
9481     case 0x3a: /* FCVTPS */
9482     case 0x3b: /* FCVTZS */
9483         gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9484         break;
9485     case 0x5a: /* FCVTNU */
9486     case 0x5b: /* FCVTMU */
9487     case 0x5c: /* FCVTAU */
9488     case 0x7a: /* FCVTPU */
9489     case 0x7b: /* FCVTZU */
9490         gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9491         break;
9492     case 0x18: /* FRINTN */
9493     case 0x19: /* FRINTM */
9494     case 0x38: /* FRINTP */
9495     case 0x39: /* FRINTZ */
9496     case 0x58: /* FRINTA */
9497     case 0x79: /* FRINTI */
9498         gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9499         break;
9500     case 0x59: /* FRINTX */
9501         gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9502         break;
9503     case 0x1e: /* FRINT32Z */
9504     case 0x5e: /* FRINT32X */
9505         gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9506         break;
9507     case 0x1f: /* FRINT64Z */
9508     case 0x5f: /* FRINT64X */
9509         gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9510         break;
9511     default:
9512         g_assert_not_reached();
9513     }
9514 }
9515 
9516 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9517                                    bool is_scalar, bool is_u, bool is_q,
9518                                    int size, int rn, int rd)
9519 {
9520     bool is_double = (size == MO_64);
9521     TCGv_ptr fpst;
9522 
9523     if (!fp_access_check(s)) {
9524         return;
9525     }
9526 
9527     fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9528 
9529     if (is_double) {
9530         TCGv_i64 tcg_op = tcg_temp_new_i64();
9531         TCGv_i64 tcg_zero = tcg_constant_i64(0);
9532         TCGv_i64 tcg_res = tcg_temp_new_i64();
9533         NeonGenTwoDoubleOpFn *genfn;
9534         bool swap = false;
9535         int pass;
9536 
9537         switch (opcode) {
9538         case 0x2e: /* FCMLT (zero) */
9539             swap = true;
9540             /* fallthrough */
9541         case 0x2c: /* FCMGT (zero) */
9542             genfn = gen_helper_neon_cgt_f64;
9543             break;
9544         case 0x2d: /* FCMEQ (zero) */
9545             genfn = gen_helper_neon_ceq_f64;
9546             break;
9547         case 0x6d: /* FCMLE (zero) */
9548             swap = true;
9549             /* fall through */
9550         case 0x6c: /* FCMGE (zero) */
9551             genfn = gen_helper_neon_cge_f64;
9552             break;
9553         default:
9554             g_assert_not_reached();
9555         }
9556 
9557         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9558             read_vec_element(s, tcg_op, rn, pass, MO_64);
9559             if (swap) {
9560                 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9561             } else {
9562                 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9563             }
9564             write_vec_element(s, tcg_res, rd, pass, MO_64);
9565         }
9566 
9567         clear_vec_high(s, !is_scalar, rd);
9568     } else {
9569         TCGv_i32 tcg_op = tcg_temp_new_i32();
9570         TCGv_i32 tcg_zero = tcg_constant_i32(0);
9571         TCGv_i32 tcg_res = tcg_temp_new_i32();
9572         NeonGenTwoSingleOpFn *genfn;
9573         bool swap = false;
9574         int pass, maxpasses;
9575 
9576         if (size == MO_16) {
9577             switch (opcode) {
9578             case 0x2e: /* FCMLT (zero) */
9579                 swap = true;
9580                 /* fall through */
9581             case 0x2c: /* FCMGT (zero) */
9582                 genfn = gen_helper_advsimd_cgt_f16;
9583                 break;
9584             case 0x2d: /* FCMEQ (zero) */
9585                 genfn = gen_helper_advsimd_ceq_f16;
9586                 break;
9587             case 0x6d: /* FCMLE (zero) */
9588                 swap = true;
9589                 /* fall through */
9590             case 0x6c: /* FCMGE (zero) */
9591                 genfn = gen_helper_advsimd_cge_f16;
9592                 break;
9593             default:
9594                 g_assert_not_reached();
9595             }
9596         } else {
9597             switch (opcode) {
9598             case 0x2e: /* FCMLT (zero) */
9599                 swap = true;
9600                 /* fall through */
9601             case 0x2c: /* FCMGT (zero) */
9602                 genfn = gen_helper_neon_cgt_f32;
9603                 break;
9604             case 0x2d: /* FCMEQ (zero) */
9605                 genfn = gen_helper_neon_ceq_f32;
9606                 break;
9607             case 0x6d: /* FCMLE (zero) */
9608                 swap = true;
9609                 /* fall through */
9610             case 0x6c: /* FCMGE (zero) */
9611                 genfn = gen_helper_neon_cge_f32;
9612                 break;
9613             default:
9614                 g_assert_not_reached();
9615             }
9616         }
9617 
9618         if (is_scalar) {
9619             maxpasses = 1;
9620         } else {
9621             int vector_size = 8 << is_q;
9622             maxpasses = vector_size >> size;
9623         }
9624 
9625         for (pass = 0; pass < maxpasses; pass++) {
9626             read_vec_element_i32(s, tcg_op, rn, pass, size);
9627             if (swap) {
9628                 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9629             } else {
9630                 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9631             }
9632             if (is_scalar) {
9633                 write_fp_sreg(s, rd, tcg_res);
9634             } else {
9635                 write_vec_element_i32(s, tcg_res, rd, pass, size);
9636             }
9637         }
9638 
9639         if (!is_scalar) {
9640             clear_vec_high(s, is_q, rd);
9641         }
9642     }
9643 }
9644 
9645 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
9646                                     bool is_scalar, bool is_u, bool is_q,
9647                                     int size, int rn, int rd)
9648 {
9649     bool is_double = (size == 3);
9650     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9651 
9652     if (is_double) {
9653         TCGv_i64 tcg_op = tcg_temp_new_i64();
9654         TCGv_i64 tcg_res = tcg_temp_new_i64();
9655         int pass;
9656 
9657         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9658             read_vec_element(s, tcg_op, rn, pass, MO_64);
9659             switch (opcode) {
9660             case 0x3d: /* FRECPE */
9661                 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
9662                 break;
9663             case 0x3f: /* FRECPX */
9664                 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
9665                 break;
9666             case 0x7d: /* FRSQRTE */
9667                 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
9668                 break;
9669             default:
9670                 g_assert_not_reached();
9671             }
9672             write_vec_element(s, tcg_res, rd, pass, MO_64);
9673         }
9674         clear_vec_high(s, !is_scalar, rd);
9675     } else {
9676         TCGv_i32 tcg_op = tcg_temp_new_i32();
9677         TCGv_i32 tcg_res = tcg_temp_new_i32();
9678         int pass, maxpasses;
9679 
9680         if (is_scalar) {
9681             maxpasses = 1;
9682         } else {
9683             maxpasses = is_q ? 4 : 2;
9684         }
9685 
9686         for (pass = 0; pass < maxpasses; pass++) {
9687             read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
9688 
9689             switch (opcode) {
9690             case 0x3c: /* URECPE */
9691                 gen_helper_recpe_u32(tcg_res, tcg_op);
9692                 break;
9693             case 0x3d: /* FRECPE */
9694                 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
9695                 break;
9696             case 0x3f: /* FRECPX */
9697                 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
9698                 break;
9699             case 0x7d: /* FRSQRTE */
9700                 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
9701                 break;
9702             default:
9703                 g_assert_not_reached();
9704             }
9705 
9706             if (is_scalar) {
9707                 write_fp_sreg(s, rd, tcg_res);
9708             } else {
9709                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9710             }
9711         }
9712         if (!is_scalar) {
9713             clear_vec_high(s, is_q, rd);
9714         }
9715     }
9716 }
9717 
9718 static void handle_2misc_narrow(DisasContext *s, bool scalar,
9719                                 int opcode, bool u, bool is_q,
9720                                 int size, int rn, int rd)
9721 {
9722     /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9723      * in the source becomes a size element in the destination).
9724      */
9725     int pass;
9726     TCGv_i32 tcg_res[2];
9727     int destelt = is_q ? 2 : 0;
9728     int passes = scalar ? 1 : 2;
9729 
9730     if (scalar) {
9731         tcg_res[1] = tcg_constant_i32(0);
9732     }
9733 
9734     for (pass = 0; pass < passes; pass++) {
9735         TCGv_i64 tcg_op = tcg_temp_new_i64();
9736         NeonGenNarrowFn *genfn = NULL;
9737         NeonGenNarrowEnvFn *genenvfn = NULL;
9738 
9739         if (scalar) {
9740             read_vec_element(s, tcg_op, rn, pass, size + 1);
9741         } else {
9742             read_vec_element(s, tcg_op, rn, pass, MO_64);
9743         }
9744         tcg_res[pass] = tcg_temp_new_i32();
9745 
9746         switch (opcode) {
9747         case 0x12: /* XTN, SQXTUN */
9748         {
9749             static NeonGenNarrowFn * const xtnfns[3] = {
9750                 gen_helper_neon_narrow_u8,
9751                 gen_helper_neon_narrow_u16,
9752                 tcg_gen_extrl_i64_i32,
9753             };
9754             static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9755                 gen_helper_neon_unarrow_sat8,
9756                 gen_helper_neon_unarrow_sat16,
9757                 gen_helper_neon_unarrow_sat32,
9758             };
9759             if (u) {
9760                 genenvfn = sqxtunfns[size];
9761             } else {
9762                 genfn = xtnfns[size];
9763             }
9764             break;
9765         }
9766         case 0x14: /* SQXTN, UQXTN */
9767         {
9768             static NeonGenNarrowEnvFn * const fns[3][2] = {
9769                 { gen_helper_neon_narrow_sat_s8,
9770                   gen_helper_neon_narrow_sat_u8 },
9771                 { gen_helper_neon_narrow_sat_s16,
9772                   gen_helper_neon_narrow_sat_u16 },
9773                 { gen_helper_neon_narrow_sat_s32,
9774                   gen_helper_neon_narrow_sat_u32 },
9775             };
9776             genenvfn = fns[size][u];
9777             break;
9778         }
9779         case 0x16: /* FCVTN, FCVTN2 */
9780             /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9781             if (size == 2) {
9782                 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
9783             } else {
9784                 TCGv_i32 tcg_lo = tcg_temp_new_i32();
9785                 TCGv_i32 tcg_hi = tcg_temp_new_i32();
9786                 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9787                 TCGv_i32 ahp = get_ahp_flag();
9788 
9789                 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9790                 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9791                 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9792                 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9793             }
9794             break;
9795         case 0x36: /* BFCVTN, BFCVTN2 */
9796             {
9797                 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9798                 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
9799             }
9800             break;
9801         case 0x56:  /* FCVTXN, FCVTXN2 */
9802             /* 64 bit to 32 bit float conversion
9803              * with von Neumann rounding (round to odd)
9804              */
9805             assert(size == 2);
9806             gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
9807             break;
9808         default:
9809             g_assert_not_reached();
9810         }
9811 
9812         if (genfn) {
9813             genfn(tcg_res[pass], tcg_op);
9814         } else if (genenvfn) {
9815             genenvfn(tcg_res[pass], cpu_env, tcg_op);
9816         }
9817     }
9818 
9819     for (pass = 0; pass < 2; pass++) {
9820         write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
9821     }
9822     clear_vec_high(s, is_q, rd);
9823 }
9824 
9825 /* Remaining saturating accumulating ops */
9826 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
9827                                 bool is_q, int size, int rn, int rd)
9828 {
9829     bool is_double = (size == 3);
9830 
9831     if (is_double) {
9832         TCGv_i64 tcg_rn = tcg_temp_new_i64();
9833         TCGv_i64 tcg_rd = tcg_temp_new_i64();
9834         int pass;
9835 
9836         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9837             read_vec_element(s, tcg_rn, rn, pass, MO_64);
9838             read_vec_element(s, tcg_rd, rd, pass, MO_64);
9839 
9840             if (is_u) { /* USQADD */
9841                 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9842             } else { /* SUQADD */
9843                 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9844             }
9845             write_vec_element(s, tcg_rd, rd, pass, MO_64);
9846         }
9847         clear_vec_high(s, !is_scalar, rd);
9848     } else {
9849         TCGv_i32 tcg_rn = tcg_temp_new_i32();
9850         TCGv_i32 tcg_rd = tcg_temp_new_i32();
9851         int pass, maxpasses;
9852 
9853         if (is_scalar) {
9854             maxpasses = 1;
9855         } else {
9856             maxpasses = is_q ? 4 : 2;
9857         }
9858 
9859         for (pass = 0; pass < maxpasses; pass++) {
9860             if (is_scalar) {
9861                 read_vec_element_i32(s, tcg_rn, rn, pass, size);
9862                 read_vec_element_i32(s, tcg_rd, rd, pass, size);
9863             } else {
9864                 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
9865                 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9866             }
9867 
9868             if (is_u) { /* USQADD */
9869                 switch (size) {
9870                 case 0:
9871                     gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9872                     break;
9873                 case 1:
9874                     gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9875                     break;
9876                 case 2:
9877                     gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9878                     break;
9879                 default:
9880                     g_assert_not_reached();
9881                 }
9882             } else { /* SUQADD */
9883                 switch (size) {
9884                 case 0:
9885                     gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9886                     break;
9887                 case 1:
9888                     gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9889                     break;
9890                 case 2:
9891                     gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9892                     break;
9893                 default:
9894                     g_assert_not_reached();
9895                 }
9896             }
9897 
9898             if (is_scalar) {
9899                 write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
9900             }
9901             write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9902         }
9903         clear_vec_high(s, is_q, rd);
9904     }
9905 }
9906 
9907 /* AdvSIMD scalar two reg misc
9908  *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
9909  * +-----+---+-----------+------+-----------+--------+-----+------+------+
9910  * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
9911  * +-----+---+-----------+------+-----------+--------+-----+------+------+
9912  */
9913 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
9914 {
9915     int rd = extract32(insn, 0, 5);
9916     int rn = extract32(insn, 5, 5);
9917     int opcode = extract32(insn, 12, 5);
9918     int size = extract32(insn, 22, 2);
9919     bool u = extract32(insn, 29, 1);
9920     bool is_fcvt = false;
9921     int rmode;
9922     TCGv_i32 tcg_rmode;
9923     TCGv_ptr tcg_fpstatus;
9924 
9925     switch (opcode) {
9926     case 0x3: /* USQADD / SUQADD*/
9927         if (!fp_access_check(s)) {
9928             return;
9929         }
9930         handle_2misc_satacc(s, true, u, false, size, rn, rd);
9931         return;
9932     case 0x7: /* SQABS / SQNEG */
9933         break;
9934     case 0xa: /* CMLT */
9935         if (u) {
9936             unallocated_encoding(s);
9937             return;
9938         }
9939         /* fall through */
9940     case 0x8: /* CMGT, CMGE */
9941     case 0x9: /* CMEQ, CMLE */
9942     case 0xb: /* ABS, NEG */
9943         if (size != 3) {
9944             unallocated_encoding(s);
9945             return;
9946         }
9947         break;
9948     case 0x12: /* SQXTUN */
9949         if (!u) {
9950             unallocated_encoding(s);
9951             return;
9952         }
9953         /* fall through */
9954     case 0x14: /* SQXTN, UQXTN */
9955         if (size == 3) {
9956             unallocated_encoding(s);
9957             return;
9958         }
9959         if (!fp_access_check(s)) {
9960             return;
9961         }
9962         handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
9963         return;
9964     case 0xc ... 0xf:
9965     case 0x16 ... 0x1d:
9966     case 0x1f:
9967         /* Floating point: U, size[1] and opcode indicate operation;
9968          * size[0] indicates single or double precision.
9969          */
9970         opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
9971         size = extract32(size, 0, 1) ? 3 : 2;
9972         switch (opcode) {
9973         case 0x2c: /* FCMGT (zero) */
9974         case 0x2d: /* FCMEQ (zero) */
9975         case 0x2e: /* FCMLT (zero) */
9976         case 0x6c: /* FCMGE (zero) */
9977         case 0x6d: /* FCMLE (zero) */
9978             handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
9979             return;
9980         case 0x1d: /* SCVTF */
9981         case 0x5d: /* UCVTF */
9982         {
9983             bool is_signed = (opcode == 0x1d);
9984             if (!fp_access_check(s)) {
9985                 return;
9986             }
9987             handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
9988             return;
9989         }
9990         case 0x3d: /* FRECPE */
9991         case 0x3f: /* FRECPX */
9992         case 0x7d: /* FRSQRTE */
9993             if (!fp_access_check(s)) {
9994                 return;
9995             }
9996             handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
9997             return;
9998         case 0x1a: /* FCVTNS */
9999         case 0x1b: /* FCVTMS */
10000         case 0x3a: /* FCVTPS */
10001         case 0x3b: /* FCVTZS */
10002         case 0x5a: /* FCVTNU */
10003         case 0x5b: /* FCVTMU */
10004         case 0x7a: /* FCVTPU */
10005         case 0x7b: /* FCVTZU */
10006             is_fcvt = true;
10007             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
10008             break;
10009         case 0x1c: /* FCVTAS */
10010         case 0x5c: /* FCVTAU */
10011             /* TIEAWAY doesn't fit in the usual rounding mode encoding */
10012             is_fcvt = true;
10013             rmode = FPROUNDING_TIEAWAY;
10014             break;
10015         case 0x56: /* FCVTXN, FCVTXN2 */
10016             if (size == 2) {
10017                 unallocated_encoding(s);
10018                 return;
10019             }
10020             if (!fp_access_check(s)) {
10021                 return;
10022             }
10023             handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
10024             return;
10025         default:
10026             unallocated_encoding(s);
10027             return;
10028         }
10029         break;
10030     default:
10031         unallocated_encoding(s);
10032         return;
10033     }
10034 
10035     if (!fp_access_check(s)) {
10036         return;
10037     }
10038 
10039     if (is_fcvt) {
10040         tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
10041         tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
10042     } else {
10043         tcg_fpstatus = NULL;
10044         tcg_rmode = NULL;
10045     }
10046 
10047     if (size == 3) {
10048         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
10049         TCGv_i64 tcg_rd = tcg_temp_new_i64();
10050 
10051         handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
10052         write_fp_dreg(s, rd, tcg_rd);
10053     } else {
10054         TCGv_i32 tcg_rn = tcg_temp_new_i32();
10055         TCGv_i32 tcg_rd = tcg_temp_new_i32();
10056 
10057         read_vec_element_i32(s, tcg_rn, rn, 0, size);
10058 
10059         switch (opcode) {
10060         case 0x7: /* SQABS, SQNEG */
10061         {
10062             NeonGenOneOpEnvFn *genfn;
10063             static NeonGenOneOpEnvFn * const fns[3][2] = {
10064                 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
10065                 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
10066                 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
10067             };
10068             genfn = fns[size][u];
10069             genfn(tcg_rd, cpu_env, tcg_rn);
10070             break;
10071         }
10072         case 0x1a: /* FCVTNS */
10073         case 0x1b: /* FCVTMS */
10074         case 0x1c: /* FCVTAS */
10075         case 0x3a: /* FCVTPS */
10076         case 0x3b: /* FCVTZS */
10077             gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10078                                  tcg_fpstatus);
10079             break;
10080         case 0x5a: /* FCVTNU */
10081         case 0x5b: /* FCVTMU */
10082         case 0x5c: /* FCVTAU */
10083         case 0x7a: /* FCVTPU */
10084         case 0x7b: /* FCVTZU */
10085             gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0),
10086                                  tcg_fpstatus);
10087             break;
10088         default:
10089             g_assert_not_reached();
10090         }
10091 
10092         write_fp_sreg(s, rd, tcg_rd);
10093     }
10094 
10095     if (is_fcvt) {
10096         gen_restore_rmode(tcg_rmode, tcg_fpstatus);
10097     }
10098 }
10099 
10100 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
10101 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
10102                                  int immh, int immb, int opcode, int rn, int rd)
10103 {
10104     int size = 32 - clz32(immh) - 1;
10105     int immhb = immh << 3 | immb;
10106     int shift = 2 * (8 << size) - immhb;
10107     GVecGen2iFn *gvec_fn;
10108 
10109     if (extract32(immh, 3, 1) && !is_q) {
10110         unallocated_encoding(s);
10111         return;
10112     }
10113     tcg_debug_assert(size <= 3);
10114 
10115     if (!fp_access_check(s)) {
10116         return;
10117     }
10118 
10119     switch (opcode) {
10120     case 0x02: /* SSRA / USRA (accumulate) */
10121         gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
10122         break;
10123 
10124     case 0x08: /* SRI */
10125         gvec_fn = gen_gvec_sri;
10126         break;
10127 
10128     case 0x00: /* SSHR / USHR */
10129         if (is_u) {
10130             if (shift == 8 << size) {
10131                 /* Shift count the same size as element size produces zero.  */
10132                 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
10133                                      is_q ? 16 : 8, vec_full_reg_size(s), 0);
10134                 return;
10135             }
10136             gvec_fn = tcg_gen_gvec_shri;
10137         } else {
10138             /* Shift count the same size as element size produces all sign.  */
10139             if (shift == 8 << size) {
10140                 shift -= 1;
10141             }
10142             gvec_fn = tcg_gen_gvec_sari;
10143         }
10144         break;
10145 
10146     case 0x04: /* SRSHR / URSHR (rounding) */
10147         gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
10148         break;
10149 
10150     case 0x06: /* SRSRA / URSRA (accum + rounding) */
10151         gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
10152         break;
10153 
10154     default:
10155         g_assert_not_reached();
10156     }
10157 
10158     gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
10159 }
10160 
10161 /* SHL/SLI - Vector shift left */
10162 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
10163                                  int immh, int immb, int opcode, int rn, int rd)
10164 {
10165     int size = 32 - clz32(immh) - 1;
10166     int immhb = immh << 3 | immb;
10167     int shift = immhb - (8 << size);
10168 
10169     /* Range of size is limited by decode: immh is a non-zero 4 bit field */
10170     assert(size >= 0 && size <= 3);
10171 
10172     if (extract32(immh, 3, 1) && !is_q) {
10173         unallocated_encoding(s);
10174         return;
10175     }
10176 
10177     if (!fp_access_check(s)) {
10178         return;
10179     }
10180 
10181     if (insert) {
10182         gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
10183     } else {
10184         gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
10185     }
10186 }
10187 
10188 /* USHLL/SHLL - Vector shift left with widening */
10189 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
10190                                  int immh, int immb, int opcode, int rn, int rd)
10191 {
10192     int size = 32 - clz32(immh) - 1;
10193     int immhb = immh << 3 | immb;
10194     int shift = immhb - (8 << size);
10195     int dsize = 64;
10196     int esize = 8 << size;
10197     int elements = dsize/esize;
10198     TCGv_i64 tcg_rn = tcg_temp_new_i64();
10199     TCGv_i64 tcg_rd = tcg_temp_new_i64();
10200     int i;
10201 
10202     if (size >= 3) {
10203         unallocated_encoding(s);
10204         return;
10205     }
10206 
10207     if (!fp_access_check(s)) {
10208         return;
10209     }
10210 
10211     /* For the LL variants the store is larger than the load,
10212      * so if rd == rn we would overwrite parts of our input.
10213      * So load everything right now and use shifts in the main loop.
10214      */
10215     read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10216 
10217     for (i = 0; i < elements; i++) {
10218         tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10219         ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10220         tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10221         write_vec_element(s, tcg_rd, rd, i, size + 1);
10222     }
10223 }
10224 
10225 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10226 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10227                                  int immh, int immb, int opcode, int rn, int rd)
10228 {
10229     int immhb = immh << 3 | immb;
10230     int size = 32 - clz32(immh) - 1;
10231     int dsize = 64;
10232     int esize = 8 << size;
10233     int elements = dsize/esize;
10234     int shift = (2 * esize) - immhb;
10235     bool round = extract32(opcode, 0, 1);
10236     TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10237     TCGv_i64 tcg_round;
10238     int i;
10239 
10240     if (extract32(immh, 3, 1)) {
10241         unallocated_encoding(s);
10242         return;
10243     }
10244 
10245     if (!fp_access_check(s)) {
10246         return;
10247     }
10248 
10249     tcg_rn = tcg_temp_new_i64();
10250     tcg_rd = tcg_temp_new_i64();
10251     tcg_final = tcg_temp_new_i64();
10252     read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10253 
10254     if (round) {
10255         tcg_round = tcg_constant_i64(1ULL << (shift - 1));
10256     } else {
10257         tcg_round = NULL;
10258     }
10259 
10260     for (i = 0; i < elements; i++) {
10261         read_vec_element(s, tcg_rn, rn, i, size+1);
10262         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10263                                 false, true, size+1, shift);
10264 
10265         tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10266     }
10267 
10268     if (!is_q) {
10269         write_vec_element(s, tcg_final, rd, 0, MO_64);
10270     } else {
10271         write_vec_element(s, tcg_final, rd, 1, MO_64);
10272     }
10273 
10274     clear_vec_high(s, is_q, rd);
10275 }
10276 
10277 
10278 /* AdvSIMD shift by immediate
10279  *  31  30   29 28         23 22  19 18  16 15    11  10 9    5 4    0
10280  * +---+---+---+-------------+------+------+--------+---+------+------+
10281  * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
10282  * +---+---+---+-------------+------+------+--------+---+------+------+
10283  */
10284 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10285 {
10286     int rd = extract32(insn, 0, 5);
10287     int rn = extract32(insn, 5, 5);
10288     int opcode = extract32(insn, 11, 5);
10289     int immb = extract32(insn, 16, 3);
10290     int immh = extract32(insn, 19, 4);
10291     bool is_u = extract32(insn, 29, 1);
10292     bool is_q = extract32(insn, 30, 1);
10293 
10294     /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10295     assert(immh != 0);
10296 
10297     switch (opcode) {
10298     case 0x08: /* SRI */
10299         if (!is_u) {
10300             unallocated_encoding(s);
10301             return;
10302         }
10303         /* fall through */
10304     case 0x00: /* SSHR / USHR */
10305     case 0x02: /* SSRA / USRA (accumulate) */
10306     case 0x04: /* SRSHR / URSHR (rounding) */
10307     case 0x06: /* SRSRA / URSRA (accum + rounding) */
10308         handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10309         break;
10310     case 0x0a: /* SHL / SLI */
10311         handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10312         break;
10313     case 0x10: /* SHRN */
10314     case 0x11: /* RSHRN / SQRSHRUN */
10315         if (is_u) {
10316             handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10317                                    opcode, rn, rd);
10318         } else {
10319             handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10320         }
10321         break;
10322     case 0x12: /* SQSHRN / UQSHRN */
10323     case 0x13: /* SQRSHRN / UQRSHRN */
10324         handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10325                                opcode, rn, rd);
10326         break;
10327     case 0x14: /* SSHLL / USHLL */
10328         handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10329         break;
10330     case 0x1c: /* SCVTF / UCVTF */
10331         handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10332                                      opcode, rn, rd);
10333         break;
10334     case 0xc: /* SQSHLU */
10335         if (!is_u) {
10336             unallocated_encoding(s);
10337             return;
10338         }
10339         handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10340         break;
10341     case 0xe: /* SQSHL, UQSHL */
10342         handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10343         break;
10344     case 0x1f: /* FCVTZS/ FCVTZU */
10345         handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10346         return;
10347     default:
10348         unallocated_encoding(s);
10349         return;
10350     }
10351 }
10352 
10353 /* Generate code to do a "long" addition or subtraction, ie one done in
10354  * TCGv_i64 on vector lanes twice the width specified by size.
10355  */
10356 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10357                           TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10358 {
10359     static NeonGenTwo64OpFn * const fns[3][2] = {
10360         { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10361         { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10362         { tcg_gen_add_i64, tcg_gen_sub_i64 },
10363     };
10364     NeonGenTwo64OpFn *genfn;
10365     assert(size < 3);
10366 
10367     genfn = fns[size][is_sub];
10368     genfn(tcg_res, tcg_op1, tcg_op2);
10369 }
10370 
10371 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10372                                 int opcode, int rd, int rn, int rm)
10373 {
10374     /* 3-reg-different widening insns: 64 x 64 -> 128 */
10375     TCGv_i64 tcg_res[2];
10376     int pass, accop;
10377 
10378     tcg_res[0] = tcg_temp_new_i64();
10379     tcg_res[1] = tcg_temp_new_i64();
10380 
10381     /* Does this op do an adding accumulate, a subtracting accumulate,
10382      * or no accumulate at all?
10383      */
10384     switch (opcode) {
10385     case 5:
10386     case 8:
10387     case 9:
10388         accop = 1;
10389         break;
10390     case 10:
10391     case 11:
10392         accop = -1;
10393         break;
10394     default:
10395         accop = 0;
10396         break;
10397     }
10398 
10399     if (accop != 0) {
10400         read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10401         read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10402     }
10403 
10404     /* size == 2 means two 32x32->64 operations; this is worth special
10405      * casing because we can generally handle it inline.
10406      */
10407     if (size == 2) {
10408         for (pass = 0; pass < 2; pass++) {
10409             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10410             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10411             TCGv_i64 tcg_passres;
10412             MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10413 
10414             int elt = pass + is_q * 2;
10415 
10416             read_vec_element(s, tcg_op1, rn, elt, memop);
10417             read_vec_element(s, tcg_op2, rm, elt, memop);
10418 
10419             if (accop == 0) {
10420                 tcg_passres = tcg_res[pass];
10421             } else {
10422                 tcg_passres = tcg_temp_new_i64();
10423             }
10424 
10425             switch (opcode) {
10426             case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10427                 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10428                 break;
10429             case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10430                 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10431                 break;
10432             case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10433             case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10434             {
10435                 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10436                 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10437 
10438                 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10439                 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10440                 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10441                                     tcg_passres,
10442                                     tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10443                 break;
10444             }
10445             case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10446             case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10447             case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10448                 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10449                 break;
10450             case 9: /* SQDMLAL, SQDMLAL2 */
10451             case 11: /* SQDMLSL, SQDMLSL2 */
10452             case 13: /* SQDMULL, SQDMULL2 */
10453                 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10454                 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10455                                                   tcg_passres, tcg_passres);
10456                 break;
10457             default:
10458                 g_assert_not_reached();
10459             }
10460 
10461             if (opcode == 9 || opcode == 11) {
10462                 /* saturating accumulate ops */
10463                 if (accop < 0) {
10464                     tcg_gen_neg_i64(tcg_passres, tcg_passres);
10465                 }
10466                 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10467                                                   tcg_res[pass], tcg_passres);
10468             } else if (accop > 0) {
10469                 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10470             } else if (accop < 0) {
10471                 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10472             }
10473         }
10474     } else {
10475         /* size 0 or 1, generally helper functions */
10476         for (pass = 0; pass < 2; pass++) {
10477             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10478             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10479             TCGv_i64 tcg_passres;
10480             int elt = pass + is_q * 2;
10481 
10482             read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10483             read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10484 
10485             if (accop == 0) {
10486                 tcg_passres = tcg_res[pass];
10487             } else {
10488                 tcg_passres = tcg_temp_new_i64();
10489             }
10490 
10491             switch (opcode) {
10492             case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10493             case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10494             {
10495                 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10496                 static NeonGenWidenFn * const widenfns[2][2] = {
10497                     { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10498                     { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10499                 };
10500                 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10501 
10502                 widenfn(tcg_op2_64, tcg_op2);
10503                 widenfn(tcg_passres, tcg_op1);
10504                 gen_neon_addl(size, (opcode == 2), tcg_passres,
10505                               tcg_passres, tcg_op2_64);
10506                 break;
10507             }
10508             case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10509             case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10510                 if (size == 0) {
10511                     if (is_u) {
10512                         gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
10513                     } else {
10514                         gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
10515                     }
10516                 } else {
10517                     if (is_u) {
10518                         gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
10519                     } else {
10520                         gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
10521                     }
10522                 }
10523                 break;
10524             case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10525             case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10526             case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10527                 if (size == 0) {
10528                     if (is_u) {
10529                         gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
10530                     } else {
10531                         gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
10532                     }
10533                 } else {
10534                     if (is_u) {
10535                         gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
10536                     } else {
10537                         gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10538                     }
10539                 }
10540                 break;
10541             case 9: /* SQDMLAL, SQDMLAL2 */
10542             case 11: /* SQDMLSL, SQDMLSL2 */
10543             case 13: /* SQDMULL, SQDMULL2 */
10544                 assert(size == 1);
10545                 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10546                 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10547                                                   tcg_passres, tcg_passres);
10548                 break;
10549             default:
10550                 g_assert_not_reached();
10551             }
10552 
10553             if (accop != 0) {
10554                 if (opcode == 9 || opcode == 11) {
10555                     /* saturating accumulate ops */
10556                     if (accop < 0) {
10557                         gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10558                     }
10559                     gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10560                                                       tcg_res[pass],
10561                                                       tcg_passres);
10562                 } else {
10563                     gen_neon_addl(size, (accop < 0), tcg_res[pass],
10564                                   tcg_res[pass], tcg_passres);
10565                 }
10566             }
10567         }
10568     }
10569 
10570     write_vec_element(s, tcg_res[0], rd, 0, MO_64);
10571     write_vec_element(s, tcg_res[1], rd, 1, MO_64);
10572 }
10573 
10574 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
10575                             int opcode, int rd, int rn, int rm)
10576 {
10577     TCGv_i64 tcg_res[2];
10578     int part = is_q ? 2 : 0;
10579     int pass;
10580 
10581     for (pass = 0; pass < 2; pass++) {
10582         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10583         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10584         TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
10585         static NeonGenWidenFn * const widenfns[3][2] = {
10586             { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10587             { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10588             { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
10589         };
10590         NeonGenWidenFn *widenfn = widenfns[size][is_u];
10591 
10592         read_vec_element(s, tcg_op1, rn, pass, MO_64);
10593         read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
10594         widenfn(tcg_op2_wide, tcg_op2);
10595         tcg_res[pass] = tcg_temp_new_i64();
10596         gen_neon_addl(size, (opcode == 3),
10597                       tcg_res[pass], tcg_op1, tcg_op2_wide);
10598     }
10599 
10600     for (pass = 0; pass < 2; pass++) {
10601         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10602     }
10603 }
10604 
10605 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
10606 {
10607     tcg_gen_addi_i64(in, in, 1U << 31);
10608     tcg_gen_extrh_i64_i32(res, in);
10609 }
10610 
10611 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
10612                                  int opcode, int rd, int rn, int rm)
10613 {
10614     TCGv_i32 tcg_res[2];
10615     int part = is_q ? 2 : 0;
10616     int pass;
10617 
10618     for (pass = 0; pass < 2; pass++) {
10619         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10620         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10621         TCGv_i64 tcg_wideres = tcg_temp_new_i64();
10622         static NeonGenNarrowFn * const narrowfns[3][2] = {
10623             { gen_helper_neon_narrow_high_u8,
10624               gen_helper_neon_narrow_round_high_u8 },
10625             { gen_helper_neon_narrow_high_u16,
10626               gen_helper_neon_narrow_round_high_u16 },
10627             { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
10628         };
10629         NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
10630 
10631         read_vec_element(s, tcg_op1, rn, pass, MO_64);
10632         read_vec_element(s, tcg_op2, rm, pass, MO_64);
10633 
10634         gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
10635 
10636         tcg_res[pass] = tcg_temp_new_i32();
10637         gennarrow(tcg_res[pass], tcg_wideres);
10638     }
10639 
10640     for (pass = 0; pass < 2; pass++) {
10641         write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
10642     }
10643     clear_vec_high(s, is_q, rd);
10644 }
10645 
10646 /* AdvSIMD three different
10647  *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
10648  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10649  * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
10650  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10651  */
10652 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10653 {
10654     /* Instructions in this group fall into three basic classes
10655      * (in each case with the operation working on each element in
10656      * the input vectors):
10657      * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10658      *     128 bit input)
10659      * (2) wide 64 x 128 -> 128
10660      * (3) narrowing 128 x 128 -> 64
10661      * Here we do initial decode, catch unallocated cases and
10662      * dispatch to separate functions for each class.
10663      */
10664     int is_q = extract32(insn, 30, 1);
10665     int is_u = extract32(insn, 29, 1);
10666     int size = extract32(insn, 22, 2);
10667     int opcode = extract32(insn, 12, 4);
10668     int rm = extract32(insn, 16, 5);
10669     int rn = extract32(insn, 5, 5);
10670     int rd = extract32(insn, 0, 5);
10671 
10672     switch (opcode) {
10673     case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10674     case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10675         /* 64 x 128 -> 128 */
10676         if (size == 3) {
10677             unallocated_encoding(s);
10678             return;
10679         }
10680         if (!fp_access_check(s)) {
10681             return;
10682         }
10683         handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10684         break;
10685     case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10686     case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10687         /* 128 x 128 -> 64 */
10688         if (size == 3) {
10689             unallocated_encoding(s);
10690             return;
10691         }
10692         if (!fp_access_check(s)) {
10693             return;
10694         }
10695         handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10696         break;
10697     case 14: /* PMULL, PMULL2 */
10698         if (is_u) {
10699             unallocated_encoding(s);
10700             return;
10701         }
10702         switch (size) {
10703         case 0: /* PMULL.P8 */
10704             if (!fp_access_check(s)) {
10705                 return;
10706             }
10707             /* The Q field specifies lo/hi half input for this insn.  */
10708             gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10709                              gen_helper_neon_pmull_h);
10710             break;
10711 
10712         case 3: /* PMULL.P64 */
10713             if (!dc_isar_feature(aa64_pmull, s)) {
10714                 unallocated_encoding(s);
10715                 return;
10716             }
10717             if (!fp_access_check(s)) {
10718                 return;
10719             }
10720             /* The Q field specifies lo/hi half input for this insn.  */
10721             gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10722                              gen_helper_gvec_pmull_q);
10723             break;
10724 
10725         default:
10726             unallocated_encoding(s);
10727             break;
10728         }
10729         return;
10730     case 9: /* SQDMLAL, SQDMLAL2 */
10731     case 11: /* SQDMLSL, SQDMLSL2 */
10732     case 13: /* SQDMULL, SQDMULL2 */
10733         if (is_u || size == 0) {
10734             unallocated_encoding(s);
10735             return;
10736         }
10737         /* fall through */
10738     case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10739     case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10740     case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10741     case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10742     case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10743     case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10744     case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10745         /* 64 x 64 -> 128 */
10746         if (size == 3) {
10747             unallocated_encoding(s);
10748             return;
10749         }
10750         if (!fp_access_check(s)) {
10751             return;
10752         }
10753 
10754         handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10755         break;
10756     default:
10757         /* opcode 15 not allocated */
10758         unallocated_encoding(s);
10759         break;
10760     }
10761 }
10762 
10763 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10764 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10765 {
10766     int rd = extract32(insn, 0, 5);
10767     int rn = extract32(insn, 5, 5);
10768     int rm = extract32(insn, 16, 5);
10769     int size = extract32(insn, 22, 2);
10770     bool is_u = extract32(insn, 29, 1);
10771     bool is_q = extract32(insn, 30, 1);
10772 
10773     if (!fp_access_check(s)) {
10774         return;
10775     }
10776 
10777     switch (size + 4 * is_u) {
10778     case 0: /* AND */
10779         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10780         return;
10781     case 1: /* BIC */
10782         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10783         return;
10784     case 2: /* ORR */
10785         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10786         return;
10787     case 3: /* ORN */
10788         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10789         return;
10790     case 4: /* EOR */
10791         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10792         return;
10793 
10794     case 5: /* BSL bitwise select */
10795         gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
10796         return;
10797     case 6: /* BIT, bitwise insert if true */
10798         gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
10799         return;
10800     case 7: /* BIF, bitwise insert if false */
10801         gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
10802         return;
10803 
10804     default:
10805         g_assert_not_reached();
10806     }
10807 }
10808 
10809 /* Pairwise op subgroup of C3.6.16.
10810  *
10811  * This is called directly or via the handle_3same_float for float pairwise
10812  * operations where the opcode and size are calculated differently.
10813  */
10814 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10815                                    int size, int rn, int rm, int rd)
10816 {
10817     TCGv_ptr fpst;
10818     int pass;
10819 
10820     /* Floating point operations need fpst */
10821     if (opcode >= 0x58) {
10822         fpst = fpstatus_ptr(FPST_FPCR);
10823     } else {
10824         fpst = NULL;
10825     }
10826 
10827     if (!fp_access_check(s)) {
10828         return;
10829     }
10830 
10831     /* These operations work on the concatenated rm:rn, with each pair of
10832      * adjacent elements being operated on to produce an element in the result.
10833      */
10834     if (size == 3) {
10835         TCGv_i64 tcg_res[2];
10836 
10837         for (pass = 0; pass < 2; pass++) {
10838             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10839             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10840             int passreg = (pass == 0) ? rn : rm;
10841 
10842             read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10843             read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10844             tcg_res[pass] = tcg_temp_new_i64();
10845 
10846             switch (opcode) {
10847             case 0x17: /* ADDP */
10848                 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10849                 break;
10850             case 0x58: /* FMAXNMP */
10851                 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10852                 break;
10853             case 0x5a: /* FADDP */
10854                 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10855                 break;
10856             case 0x5e: /* FMAXP */
10857                 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10858                 break;
10859             case 0x78: /* FMINNMP */
10860                 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10861                 break;
10862             case 0x7e: /* FMINP */
10863                 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10864                 break;
10865             default:
10866                 g_assert_not_reached();
10867             }
10868         }
10869 
10870         for (pass = 0; pass < 2; pass++) {
10871             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10872         }
10873     } else {
10874         int maxpass = is_q ? 4 : 2;
10875         TCGv_i32 tcg_res[4];
10876 
10877         for (pass = 0; pass < maxpass; pass++) {
10878             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10879             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10880             NeonGenTwoOpFn *genfn = NULL;
10881             int passreg = pass < (maxpass / 2) ? rn : rm;
10882             int passelt = (is_q && (pass & 1)) ? 2 : 0;
10883 
10884             read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
10885             read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
10886             tcg_res[pass] = tcg_temp_new_i32();
10887 
10888             switch (opcode) {
10889             case 0x17: /* ADDP */
10890             {
10891                 static NeonGenTwoOpFn * const fns[3] = {
10892                     gen_helper_neon_padd_u8,
10893                     gen_helper_neon_padd_u16,
10894                     tcg_gen_add_i32,
10895                 };
10896                 genfn = fns[size];
10897                 break;
10898             }
10899             case 0x14: /* SMAXP, UMAXP */
10900             {
10901                 static NeonGenTwoOpFn * const fns[3][2] = {
10902                     { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
10903                     { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
10904                     { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10905                 };
10906                 genfn = fns[size][u];
10907                 break;
10908             }
10909             case 0x15: /* SMINP, UMINP */
10910             {
10911                 static NeonGenTwoOpFn * const fns[3][2] = {
10912                     { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
10913                     { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
10914                     { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10915                 };
10916                 genfn = fns[size][u];
10917                 break;
10918             }
10919             /* The FP operations are all on single floats (32 bit) */
10920             case 0x58: /* FMAXNMP */
10921                 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10922                 break;
10923             case 0x5a: /* FADDP */
10924                 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10925                 break;
10926             case 0x5e: /* FMAXP */
10927                 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10928                 break;
10929             case 0x78: /* FMINNMP */
10930                 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10931                 break;
10932             case 0x7e: /* FMINP */
10933                 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10934                 break;
10935             default:
10936                 g_assert_not_reached();
10937             }
10938 
10939             /* FP ops called directly, otherwise call now */
10940             if (genfn) {
10941                 genfn(tcg_res[pass], tcg_op1, tcg_op2);
10942             }
10943         }
10944 
10945         for (pass = 0; pass < maxpass; pass++) {
10946             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
10947         }
10948         clear_vec_high(s, is_q, rd);
10949     }
10950 }
10951 
10952 /* Floating point op subgroup of C3.6.16. */
10953 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
10954 {
10955     /* For floating point ops, the U, size[1] and opcode bits
10956      * together indicate the operation. size[0] indicates single
10957      * or double.
10958      */
10959     int fpopcode = extract32(insn, 11, 5)
10960         | (extract32(insn, 23, 1) << 5)
10961         | (extract32(insn, 29, 1) << 6);
10962     int is_q = extract32(insn, 30, 1);
10963     int size = extract32(insn, 22, 1);
10964     int rm = extract32(insn, 16, 5);
10965     int rn = extract32(insn, 5, 5);
10966     int rd = extract32(insn, 0, 5);
10967 
10968     int datasize = is_q ? 128 : 64;
10969     int esize = 32 << size;
10970     int elements = datasize / esize;
10971 
10972     if (size == 1 && !is_q) {
10973         unallocated_encoding(s);
10974         return;
10975     }
10976 
10977     switch (fpopcode) {
10978     case 0x58: /* FMAXNMP */
10979     case 0x5a: /* FADDP */
10980     case 0x5e: /* FMAXP */
10981     case 0x78: /* FMINNMP */
10982     case 0x7e: /* FMINP */
10983         if (size && !is_q) {
10984             unallocated_encoding(s);
10985             return;
10986         }
10987         handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
10988                                rn, rm, rd);
10989         return;
10990     case 0x1b: /* FMULX */
10991     case 0x1f: /* FRECPS */
10992     case 0x3f: /* FRSQRTS */
10993     case 0x5d: /* FACGE */
10994     case 0x7d: /* FACGT */
10995     case 0x19: /* FMLA */
10996     case 0x39: /* FMLS */
10997     case 0x18: /* FMAXNM */
10998     case 0x1a: /* FADD */
10999     case 0x1c: /* FCMEQ */
11000     case 0x1e: /* FMAX */
11001     case 0x38: /* FMINNM */
11002     case 0x3a: /* FSUB */
11003     case 0x3e: /* FMIN */
11004     case 0x5b: /* FMUL */
11005     case 0x5c: /* FCMGE */
11006     case 0x5f: /* FDIV */
11007     case 0x7a: /* FABD */
11008     case 0x7c: /* FCMGT */
11009         if (!fp_access_check(s)) {
11010             return;
11011         }
11012         handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
11013         return;
11014 
11015     case 0x1d: /* FMLAL  */
11016     case 0x3d: /* FMLSL  */
11017     case 0x59: /* FMLAL2 */
11018     case 0x79: /* FMLSL2 */
11019         if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
11020             unallocated_encoding(s);
11021             return;
11022         }
11023         if (fp_access_check(s)) {
11024             int is_s = extract32(insn, 23, 1);
11025             int is_2 = extract32(insn, 29, 1);
11026             int data = (is_2 << 1) | is_s;
11027             tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
11028                                vec_full_reg_offset(s, rn),
11029                                vec_full_reg_offset(s, rm), cpu_env,
11030                                is_q ? 16 : 8, vec_full_reg_size(s),
11031                                data, gen_helper_gvec_fmlal_a64);
11032         }
11033         return;
11034 
11035     default:
11036         unallocated_encoding(s);
11037         return;
11038     }
11039 }
11040 
11041 /* Integer op subgroup of C3.6.16. */
11042 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
11043 {
11044     int is_q = extract32(insn, 30, 1);
11045     int u = extract32(insn, 29, 1);
11046     int size = extract32(insn, 22, 2);
11047     int opcode = extract32(insn, 11, 5);
11048     int rm = extract32(insn, 16, 5);
11049     int rn = extract32(insn, 5, 5);
11050     int rd = extract32(insn, 0, 5);
11051     int pass;
11052     TCGCond cond;
11053 
11054     switch (opcode) {
11055     case 0x13: /* MUL, PMUL */
11056         if (u && size != 0) {
11057             unallocated_encoding(s);
11058             return;
11059         }
11060         /* fall through */
11061     case 0x0: /* SHADD, UHADD */
11062     case 0x2: /* SRHADD, URHADD */
11063     case 0x4: /* SHSUB, UHSUB */
11064     case 0xc: /* SMAX, UMAX */
11065     case 0xd: /* SMIN, UMIN */
11066     case 0xe: /* SABD, UABD */
11067     case 0xf: /* SABA, UABA */
11068     case 0x12: /* MLA, MLS */
11069         if (size == 3) {
11070             unallocated_encoding(s);
11071             return;
11072         }
11073         break;
11074     case 0x16: /* SQDMULH, SQRDMULH */
11075         if (size == 0 || size == 3) {
11076             unallocated_encoding(s);
11077             return;
11078         }
11079         break;
11080     default:
11081         if (size == 3 && !is_q) {
11082             unallocated_encoding(s);
11083             return;
11084         }
11085         break;
11086     }
11087 
11088     if (!fp_access_check(s)) {
11089         return;
11090     }
11091 
11092     switch (opcode) {
11093     case 0x01: /* SQADD, UQADD */
11094         if (u) {
11095             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
11096         } else {
11097             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
11098         }
11099         return;
11100     case 0x05: /* SQSUB, UQSUB */
11101         if (u) {
11102             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
11103         } else {
11104             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
11105         }
11106         return;
11107     case 0x08: /* SSHL, USHL */
11108         if (u) {
11109             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
11110         } else {
11111             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
11112         }
11113         return;
11114     case 0x0c: /* SMAX, UMAX */
11115         if (u) {
11116             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
11117         } else {
11118             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
11119         }
11120         return;
11121     case 0x0d: /* SMIN, UMIN */
11122         if (u) {
11123             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
11124         } else {
11125             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
11126         }
11127         return;
11128     case 0xe: /* SABD, UABD */
11129         if (u) {
11130             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
11131         } else {
11132             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
11133         }
11134         return;
11135     case 0xf: /* SABA, UABA */
11136         if (u) {
11137             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
11138         } else {
11139             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
11140         }
11141         return;
11142     case 0x10: /* ADD, SUB */
11143         if (u) {
11144             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
11145         } else {
11146             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
11147         }
11148         return;
11149     case 0x13: /* MUL, PMUL */
11150         if (!u) { /* MUL */
11151             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
11152         } else {  /* PMUL */
11153             gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
11154         }
11155         return;
11156     case 0x12: /* MLA, MLS */
11157         if (u) {
11158             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
11159         } else {
11160             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
11161         }
11162         return;
11163     case 0x16: /* SQDMULH, SQRDMULH */
11164         {
11165             static gen_helper_gvec_3_ptr * const fns[2][2] = {
11166                 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
11167                 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
11168             };
11169             gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
11170         }
11171         return;
11172     case 0x11:
11173         if (!u) { /* CMTST */
11174             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
11175             return;
11176         }
11177         /* else CMEQ */
11178         cond = TCG_COND_EQ;
11179         goto do_gvec_cmp;
11180     case 0x06: /* CMGT, CMHI */
11181         cond = u ? TCG_COND_GTU : TCG_COND_GT;
11182         goto do_gvec_cmp;
11183     case 0x07: /* CMGE, CMHS */
11184         cond = u ? TCG_COND_GEU : TCG_COND_GE;
11185     do_gvec_cmp:
11186         tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
11187                          vec_full_reg_offset(s, rn),
11188                          vec_full_reg_offset(s, rm),
11189                          is_q ? 16 : 8, vec_full_reg_size(s));
11190         return;
11191     }
11192 
11193     if (size == 3) {
11194         assert(is_q);
11195         for (pass = 0; pass < 2; pass++) {
11196             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11197             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11198             TCGv_i64 tcg_res = tcg_temp_new_i64();
11199 
11200             read_vec_element(s, tcg_op1, rn, pass, MO_64);
11201             read_vec_element(s, tcg_op2, rm, pass, MO_64);
11202 
11203             handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11204 
11205             write_vec_element(s, tcg_res, rd, pass, MO_64);
11206         }
11207     } else {
11208         for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11209             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11210             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11211             TCGv_i32 tcg_res = tcg_temp_new_i32();
11212             NeonGenTwoOpFn *genfn = NULL;
11213             NeonGenTwoOpEnvFn *genenvfn = NULL;
11214 
11215             read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11216             read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11217 
11218             switch (opcode) {
11219             case 0x0: /* SHADD, UHADD */
11220             {
11221                 static NeonGenTwoOpFn * const fns[3][2] = {
11222                     { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11223                     { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11224                     { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11225                 };
11226                 genfn = fns[size][u];
11227                 break;
11228             }
11229             case 0x2: /* SRHADD, URHADD */
11230             {
11231                 static NeonGenTwoOpFn * const fns[3][2] = {
11232                     { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11233                     { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11234                     { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11235                 };
11236                 genfn = fns[size][u];
11237                 break;
11238             }
11239             case 0x4: /* SHSUB, UHSUB */
11240             {
11241                 static NeonGenTwoOpFn * const fns[3][2] = {
11242                     { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11243                     { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11244                     { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11245                 };
11246                 genfn = fns[size][u];
11247                 break;
11248             }
11249             case 0x9: /* SQSHL, UQSHL */
11250             {
11251                 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11252                     { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11253                     { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11254                     { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11255                 };
11256                 genenvfn = fns[size][u];
11257                 break;
11258             }
11259             case 0xa: /* SRSHL, URSHL */
11260             {
11261                 static NeonGenTwoOpFn * const fns[3][2] = {
11262                     { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11263                     { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11264                     { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11265                 };
11266                 genfn = fns[size][u];
11267                 break;
11268             }
11269             case 0xb: /* SQRSHL, UQRSHL */
11270             {
11271                 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11272                     { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11273                     { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11274                     { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11275                 };
11276                 genenvfn = fns[size][u];
11277                 break;
11278             }
11279             default:
11280                 g_assert_not_reached();
11281             }
11282 
11283             if (genenvfn) {
11284                 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11285             } else {
11286                 genfn(tcg_res, tcg_op1, tcg_op2);
11287             }
11288 
11289             write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11290         }
11291     }
11292     clear_vec_high(s, is_q, rd);
11293 }
11294 
11295 /* AdvSIMD three same
11296  *  31  30  29  28       24 23  22  21 20  16 15    11  10 9    5 4    0
11297  * +---+---+---+-----------+------+---+------+--------+---+------+------+
11298  * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
11299  * +---+---+---+-----------+------+---+------+--------+---+------+------+
11300  */
11301 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11302 {
11303     int opcode = extract32(insn, 11, 5);
11304 
11305     switch (opcode) {
11306     case 0x3: /* logic ops */
11307         disas_simd_3same_logic(s, insn);
11308         break;
11309     case 0x17: /* ADDP */
11310     case 0x14: /* SMAXP, UMAXP */
11311     case 0x15: /* SMINP, UMINP */
11312     {
11313         /* Pairwise operations */
11314         int is_q = extract32(insn, 30, 1);
11315         int u = extract32(insn, 29, 1);
11316         int size = extract32(insn, 22, 2);
11317         int rm = extract32(insn, 16, 5);
11318         int rn = extract32(insn, 5, 5);
11319         int rd = extract32(insn, 0, 5);
11320         if (opcode == 0x17) {
11321             if (u || (size == 3 && !is_q)) {
11322                 unallocated_encoding(s);
11323                 return;
11324             }
11325         } else {
11326             if (size == 3) {
11327                 unallocated_encoding(s);
11328                 return;
11329             }
11330         }
11331         handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11332         break;
11333     }
11334     case 0x18 ... 0x31:
11335         /* floating point ops, sz[1] and U are part of opcode */
11336         disas_simd_3same_float(s, insn);
11337         break;
11338     default:
11339         disas_simd_3same_int(s, insn);
11340         break;
11341     }
11342 }
11343 
11344 /*
11345  * Advanced SIMD three same (ARMv8.2 FP16 variants)
11346  *
11347  *  31  30  29  28       24 23  22 21 20  16 15 14 13    11 10  9    5 4    0
11348  * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11349  * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 |  Rn  |  Rd  |
11350  * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11351  *
11352  * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11353  * (register), FACGE, FABD, FCMGT (register) and FACGT.
11354  *
11355  */
11356 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11357 {
11358     int opcode = extract32(insn, 11, 3);
11359     int u = extract32(insn, 29, 1);
11360     int a = extract32(insn, 23, 1);
11361     int is_q = extract32(insn, 30, 1);
11362     int rm = extract32(insn, 16, 5);
11363     int rn = extract32(insn, 5, 5);
11364     int rd = extract32(insn, 0, 5);
11365     /*
11366      * For these floating point ops, the U, a and opcode bits
11367      * together indicate the operation.
11368      */
11369     int fpopcode = opcode | (a << 3) | (u << 4);
11370     int datasize = is_q ? 128 : 64;
11371     int elements = datasize / 16;
11372     bool pairwise;
11373     TCGv_ptr fpst;
11374     int pass;
11375 
11376     switch (fpopcode) {
11377     case 0x0: /* FMAXNM */
11378     case 0x1: /* FMLA */
11379     case 0x2: /* FADD */
11380     case 0x3: /* FMULX */
11381     case 0x4: /* FCMEQ */
11382     case 0x6: /* FMAX */
11383     case 0x7: /* FRECPS */
11384     case 0x8: /* FMINNM */
11385     case 0x9: /* FMLS */
11386     case 0xa: /* FSUB */
11387     case 0xe: /* FMIN */
11388     case 0xf: /* FRSQRTS */
11389     case 0x13: /* FMUL */
11390     case 0x14: /* FCMGE */
11391     case 0x15: /* FACGE */
11392     case 0x17: /* FDIV */
11393     case 0x1a: /* FABD */
11394     case 0x1c: /* FCMGT */
11395     case 0x1d: /* FACGT */
11396         pairwise = false;
11397         break;
11398     case 0x10: /* FMAXNMP */
11399     case 0x12: /* FADDP */
11400     case 0x16: /* FMAXP */
11401     case 0x18: /* FMINNMP */
11402     case 0x1e: /* FMINP */
11403         pairwise = true;
11404         break;
11405     default:
11406         unallocated_encoding(s);
11407         return;
11408     }
11409 
11410     if (!dc_isar_feature(aa64_fp16, s)) {
11411         unallocated_encoding(s);
11412         return;
11413     }
11414 
11415     if (!fp_access_check(s)) {
11416         return;
11417     }
11418 
11419     fpst = fpstatus_ptr(FPST_FPCR_F16);
11420 
11421     if (pairwise) {
11422         int maxpass = is_q ? 8 : 4;
11423         TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11424         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11425         TCGv_i32 tcg_res[8];
11426 
11427         for (pass = 0; pass < maxpass; pass++) {
11428             int passreg = pass < (maxpass / 2) ? rn : rm;
11429             int passelt = (pass << 1) & (maxpass - 1);
11430 
11431             read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11432             read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11433             tcg_res[pass] = tcg_temp_new_i32();
11434 
11435             switch (fpopcode) {
11436             case 0x10: /* FMAXNMP */
11437                 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11438                                            fpst);
11439                 break;
11440             case 0x12: /* FADDP */
11441                 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11442                 break;
11443             case 0x16: /* FMAXP */
11444                 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11445                 break;
11446             case 0x18: /* FMINNMP */
11447                 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11448                                            fpst);
11449                 break;
11450             case 0x1e: /* FMINP */
11451                 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11452                 break;
11453             default:
11454                 g_assert_not_reached();
11455             }
11456         }
11457 
11458         for (pass = 0; pass < maxpass; pass++) {
11459             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11460         }
11461     } else {
11462         for (pass = 0; pass < elements; pass++) {
11463             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11464             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11465             TCGv_i32 tcg_res = tcg_temp_new_i32();
11466 
11467             read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
11468             read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
11469 
11470             switch (fpopcode) {
11471             case 0x0: /* FMAXNM */
11472                 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11473                 break;
11474             case 0x1: /* FMLA */
11475                 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11476                 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11477                                            fpst);
11478                 break;
11479             case 0x2: /* FADD */
11480                 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
11481                 break;
11482             case 0x3: /* FMULX */
11483                 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
11484                 break;
11485             case 0x4: /* FCMEQ */
11486                 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11487                 break;
11488             case 0x6: /* FMAX */
11489                 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
11490                 break;
11491             case 0x7: /* FRECPS */
11492                 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11493                 break;
11494             case 0x8: /* FMINNM */
11495                 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11496                 break;
11497             case 0x9: /* FMLS */
11498                 /* As usual for ARM, separate negation for fused multiply-add */
11499                 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
11500                 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11501                 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11502                                            fpst);
11503                 break;
11504             case 0xa: /* FSUB */
11505                 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11506                 break;
11507             case 0xe: /* FMIN */
11508                 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
11509                 break;
11510             case 0xf: /* FRSQRTS */
11511                 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11512                 break;
11513             case 0x13: /* FMUL */
11514                 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
11515                 break;
11516             case 0x14: /* FCMGE */
11517                 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11518                 break;
11519             case 0x15: /* FACGE */
11520                 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11521                 break;
11522             case 0x17: /* FDIV */
11523                 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
11524                 break;
11525             case 0x1a: /* FABD */
11526                 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11527                 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
11528                 break;
11529             case 0x1c: /* FCMGT */
11530                 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11531                 break;
11532             case 0x1d: /* FACGT */
11533                 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11534                 break;
11535             default:
11536                 g_assert_not_reached();
11537             }
11538 
11539             write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11540         }
11541     }
11542 
11543     clear_vec_high(s, is_q, rd);
11544 }
11545 
11546 /* AdvSIMD three same extra
11547  *  31   30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
11548  * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11549  * | 0 | Q | U | 0 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
11550  * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11551  */
11552 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
11553 {
11554     int rd = extract32(insn, 0, 5);
11555     int rn = extract32(insn, 5, 5);
11556     int opcode = extract32(insn, 11, 4);
11557     int rm = extract32(insn, 16, 5);
11558     int size = extract32(insn, 22, 2);
11559     bool u = extract32(insn, 29, 1);
11560     bool is_q = extract32(insn, 30, 1);
11561     bool feature;
11562     int rot;
11563 
11564     switch (u * 16 + opcode) {
11565     case 0x10: /* SQRDMLAH (vector) */
11566     case 0x11: /* SQRDMLSH (vector) */
11567         if (size != 1 && size != 2) {
11568             unallocated_encoding(s);
11569             return;
11570         }
11571         feature = dc_isar_feature(aa64_rdm, s);
11572         break;
11573     case 0x02: /* SDOT (vector) */
11574     case 0x12: /* UDOT (vector) */
11575         if (size != MO_32) {
11576             unallocated_encoding(s);
11577             return;
11578         }
11579         feature = dc_isar_feature(aa64_dp, s);
11580         break;
11581     case 0x03: /* USDOT */
11582         if (size != MO_32) {
11583             unallocated_encoding(s);
11584             return;
11585         }
11586         feature = dc_isar_feature(aa64_i8mm, s);
11587         break;
11588     case 0x04: /* SMMLA */
11589     case 0x14: /* UMMLA */
11590     case 0x05: /* USMMLA */
11591         if (!is_q || size != MO_32) {
11592             unallocated_encoding(s);
11593             return;
11594         }
11595         feature = dc_isar_feature(aa64_i8mm, s);
11596         break;
11597     case 0x18: /* FCMLA, #0 */
11598     case 0x19: /* FCMLA, #90 */
11599     case 0x1a: /* FCMLA, #180 */
11600     case 0x1b: /* FCMLA, #270 */
11601     case 0x1c: /* FCADD, #90 */
11602     case 0x1e: /* FCADD, #270 */
11603         if (size == 0
11604             || (size == 1 && !dc_isar_feature(aa64_fp16, s))
11605             || (size == 3 && !is_q)) {
11606             unallocated_encoding(s);
11607             return;
11608         }
11609         feature = dc_isar_feature(aa64_fcma, s);
11610         break;
11611     case 0x1d: /* BFMMLA */
11612         if (size != MO_16 || !is_q) {
11613             unallocated_encoding(s);
11614             return;
11615         }
11616         feature = dc_isar_feature(aa64_bf16, s);
11617         break;
11618     case 0x1f:
11619         switch (size) {
11620         case 1: /* BFDOT */
11621         case 3: /* BFMLAL{B,T} */
11622             feature = dc_isar_feature(aa64_bf16, s);
11623             break;
11624         default:
11625             unallocated_encoding(s);
11626             return;
11627         }
11628         break;
11629     default:
11630         unallocated_encoding(s);
11631         return;
11632     }
11633     if (!feature) {
11634         unallocated_encoding(s);
11635         return;
11636     }
11637     if (!fp_access_check(s)) {
11638         return;
11639     }
11640 
11641     switch (opcode) {
11642     case 0x0: /* SQRDMLAH (vector) */
11643         gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
11644         return;
11645 
11646     case 0x1: /* SQRDMLSH (vector) */
11647         gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
11648         return;
11649 
11650     case 0x2: /* SDOT / UDOT */
11651         gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
11652                          u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
11653         return;
11654 
11655     case 0x3: /* USDOT */
11656         gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
11657         return;
11658 
11659     case 0x04: /* SMMLA, UMMLA */
11660         gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
11661                          u ? gen_helper_gvec_ummla_b
11662                          : gen_helper_gvec_smmla_b);
11663         return;
11664     case 0x05: /* USMMLA */
11665         gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
11666         return;
11667 
11668     case 0x8: /* FCMLA, #0 */
11669     case 0x9: /* FCMLA, #90 */
11670     case 0xa: /* FCMLA, #180 */
11671     case 0xb: /* FCMLA, #270 */
11672         rot = extract32(opcode, 0, 2);
11673         switch (size) {
11674         case 1:
11675             gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
11676                               gen_helper_gvec_fcmlah);
11677             break;
11678         case 2:
11679             gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
11680                               gen_helper_gvec_fcmlas);
11681             break;
11682         case 3:
11683             gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
11684                               gen_helper_gvec_fcmlad);
11685             break;
11686         default:
11687             g_assert_not_reached();
11688         }
11689         return;
11690 
11691     case 0xc: /* FCADD, #90 */
11692     case 0xe: /* FCADD, #270 */
11693         rot = extract32(opcode, 1, 1);
11694         switch (size) {
11695         case 1:
11696             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11697                               gen_helper_gvec_fcaddh);
11698             break;
11699         case 2:
11700             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11701                               gen_helper_gvec_fcadds);
11702             break;
11703         case 3:
11704             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11705                               gen_helper_gvec_fcaddd);
11706             break;
11707         default:
11708             g_assert_not_reached();
11709         }
11710         return;
11711 
11712     case 0xd: /* BFMMLA */
11713         gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
11714         return;
11715     case 0xf:
11716         switch (size) {
11717         case 1: /* BFDOT */
11718             gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
11719             break;
11720         case 3: /* BFMLAL{B,T} */
11721             gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
11722                               gen_helper_gvec_bfmlal);
11723             break;
11724         default:
11725             g_assert_not_reached();
11726         }
11727         return;
11728 
11729     default:
11730         g_assert_not_reached();
11731     }
11732 }
11733 
11734 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11735                                   int size, int rn, int rd)
11736 {
11737     /* Handle 2-reg-misc ops which are widening (so each size element
11738      * in the source becomes a 2*size element in the destination.
11739      * The only instruction like this is FCVTL.
11740      */
11741     int pass;
11742 
11743     if (size == 3) {
11744         /* 32 -> 64 bit fp conversion */
11745         TCGv_i64 tcg_res[2];
11746         int srcelt = is_q ? 2 : 0;
11747 
11748         for (pass = 0; pass < 2; pass++) {
11749             TCGv_i32 tcg_op = tcg_temp_new_i32();
11750             tcg_res[pass] = tcg_temp_new_i64();
11751 
11752             read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11753             gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11754         }
11755         for (pass = 0; pass < 2; pass++) {
11756             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11757         }
11758     } else {
11759         /* 16 -> 32 bit fp conversion */
11760         int srcelt = is_q ? 4 : 0;
11761         TCGv_i32 tcg_res[4];
11762         TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
11763         TCGv_i32 ahp = get_ahp_flag();
11764 
11765         for (pass = 0; pass < 4; pass++) {
11766             tcg_res[pass] = tcg_temp_new_i32();
11767 
11768             read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11769             gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11770                                            fpst, ahp);
11771         }
11772         for (pass = 0; pass < 4; pass++) {
11773             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11774         }
11775     }
11776 }
11777 
11778 static void handle_rev(DisasContext *s, int opcode, bool u,
11779                        bool is_q, int size, int rn, int rd)
11780 {
11781     int op = (opcode << 1) | u;
11782     int opsz = op + size;
11783     int grp_size = 3 - opsz;
11784     int dsize = is_q ? 128 : 64;
11785     int i;
11786 
11787     if (opsz >= 3) {
11788         unallocated_encoding(s);
11789         return;
11790     }
11791 
11792     if (!fp_access_check(s)) {
11793         return;
11794     }
11795 
11796     if (size == 0) {
11797         /* Special case bytes, use bswap op on each group of elements */
11798         int groups = dsize / (8 << grp_size);
11799 
11800         for (i = 0; i < groups; i++) {
11801             TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11802 
11803             read_vec_element(s, tcg_tmp, rn, i, grp_size);
11804             switch (grp_size) {
11805             case MO_16:
11806                 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
11807                 break;
11808             case MO_32:
11809                 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
11810                 break;
11811             case MO_64:
11812                 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11813                 break;
11814             default:
11815                 g_assert_not_reached();
11816             }
11817             write_vec_element(s, tcg_tmp, rd, i, grp_size);
11818         }
11819         clear_vec_high(s, is_q, rd);
11820     } else {
11821         int revmask = (1 << grp_size) - 1;
11822         int esize = 8 << size;
11823         int elements = dsize / esize;
11824         TCGv_i64 tcg_rn = tcg_temp_new_i64();
11825         TCGv_i64 tcg_rd[2];
11826 
11827         for (i = 0; i < 2; i++) {
11828             tcg_rd[i] = tcg_temp_new_i64();
11829             tcg_gen_movi_i64(tcg_rd[i], 0);
11830         }
11831 
11832         for (i = 0; i < elements; i++) {
11833             int e_rev = (i & 0xf) ^ revmask;
11834             int w = (e_rev * esize) / 64;
11835             int o = (e_rev * esize) % 64;
11836 
11837             read_vec_element(s, tcg_rn, rn, i, size);
11838             tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize);
11839         }
11840 
11841         for (i = 0; i < 2; i++) {
11842             write_vec_element(s, tcg_rd[i], rd, i, MO_64);
11843         }
11844         clear_vec_high(s, true, rd);
11845     }
11846 }
11847 
11848 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11849                                   bool is_q, int size, int rn, int rd)
11850 {
11851     /* Implement the pairwise operations from 2-misc:
11852      * SADDLP, UADDLP, SADALP, UADALP.
11853      * These all add pairs of elements in the input to produce a
11854      * double-width result element in the output (possibly accumulating).
11855      */
11856     bool accum = (opcode == 0x6);
11857     int maxpass = is_q ? 2 : 1;
11858     int pass;
11859     TCGv_i64 tcg_res[2];
11860 
11861     if (size == 2) {
11862         /* 32 + 32 -> 64 op */
11863         MemOp memop = size + (u ? 0 : MO_SIGN);
11864 
11865         for (pass = 0; pass < maxpass; pass++) {
11866             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11867             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11868 
11869             tcg_res[pass] = tcg_temp_new_i64();
11870 
11871             read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11872             read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11873             tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11874             if (accum) {
11875                 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11876                 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11877             }
11878         }
11879     } else {
11880         for (pass = 0; pass < maxpass; pass++) {
11881             TCGv_i64 tcg_op = tcg_temp_new_i64();
11882             NeonGenOne64OpFn *genfn;
11883             static NeonGenOne64OpFn * const fns[2][2] = {
11884                 { gen_helper_neon_addlp_s8,  gen_helper_neon_addlp_u8 },
11885                 { gen_helper_neon_addlp_s16,  gen_helper_neon_addlp_u16 },
11886             };
11887 
11888             genfn = fns[size][u];
11889 
11890             tcg_res[pass] = tcg_temp_new_i64();
11891 
11892             read_vec_element(s, tcg_op, rn, pass, MO_64);
11893             genfn(tcg_res[pass], tcg_op);
11894 
11895             if (accum) {
11896                 read_vec_element(s, tcg_op, rd, pass, MO_64);
11897                 if (size == 0) {
11898                     gen_helper_neon_addl_u16(tcg_res[pass],
11899                                              tcg_res[pass], tcg_op);
11900                 } else {
11901                     gen_helper_neon_addl_u32(tcg_res[pass],
11902                                              tcg_res[pass], tcg_op);
11903                 }
11904             }
11905         }
11906     }
11907     if (!is_q) {
11908         tcg_res[1] = tcg_constant_i64(0);
11909     }
11910     for (pass = 0; pass < 2; pass++) {
11911         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11912     }
11913 }
11914 
11915 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11916 {
11917     /* Implement SHLL and SHLL2 */
11918     int pass;
11919     int part = is_q ? 2 : 0;
11920     TCGv_i64 tcg_res[2];
11921 
11922     for (pass = 0; pass < 2; pass++) {
11923         static NeonGenWidenFn * const widenfns[3] = {
11924             gen_helper_neon_widen_u8,
11925             gen_helper_neon_widen_u16,
11926             tcg_gen_extu_i32_i64,
11927         };
11928         NeonGenWidenFn *widenfn = widenfns[size];
11929         TCGv_i32 tcg_op = tcg_temp_new_i32();
11930 
11931         read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11932         tcg_res[pass] = tcg_temp_new_i64();
11933         widenfn(tcg_res[pass], tcg_op);
11934         tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11935     }
11936 
11937     for (pass = 0; pass < 2; pass++) {
11938         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11939     }
11940 }
11941 
11942 /* AdvSIMD two reg misc
11943  *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
11944  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11945  * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
11946  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11947  */
11948 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
11949 {
11950     int size = extract32(insn, 22, 2);
11951     int opcode = extract32(insn, 12, 5);
11952     bool u = extract32(insn, 29, 1);
11953     bool is_q = extract32(insn, 30, 1);
11954     int rn = extract32(insn, 5, 5);
11955     int rd = extract32(insn, 0, 5);
11956     bool need_fpstatus = false;
11957     int rmode = -1;
11958     TCGv_i32 tcg_rmode;
11959     TCGv_ptr tcg_fpstatus;
11960 
11961     switch (opcode) {
11962     case 0x0: /* REV64, REV32 */
11963     case 0x1: /* REV16 */
11964         handle_rev(s, opcode, u, is_q, size, rn, rd);
11965         return;
11966     case 0x5: /* CNT, NOT, RBIT */
11967         if (u && size == 0) {
11968             /* NOT */
11969             break;
11970         } else if (u && size == 1) {
11971             /* RBIT */
11972             break;
11973         } else if (!u && size == 0) {
11974             /* CNT */
11975             break;
11976         }
11977         unallocated_encoding(s);
11978         return;
11979     case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11980     case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11981         if (size == 3) {
11982             unallocated_encoding(s);
11983             return;
11984         }
11985         if (!fp_access_check(s)) {
11986             return;
11987         }
11988 
11989         handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
11990         return;
11991     case 0x4: /* CLS, CLZ */
11992         if (size == 3) {
11993             unallocated_encoding(s);
11994             return;
11995         }
11996         break;
11997     case 0x2: /* SADDLP, UADDLP */
11998     case 0x6: /* SADALP, UADALP */
11999         if (size == 3) {
12000             unallocated_encoding(s);
12001             return;
12002         }
12003         if (!fp_access_check(s)) {
12004             return;
12005         }
12006         handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
12007         return;
12008     case 0x13: /* SHLL, SHLL2 */
12009         if (u == 0 || size == 3) {
12010             unallocated_encoding(s);
12011             return;
12012         }
12013         if (!fp_access_check(s)) {
12014             return;
12015         }
12016         handle_shll(s, is_q, size, rn, rd);
12017         return;
12018     case 0xa: /* CMLT */
12019         if (u == 1) {
12020             unallocated_encoding(s);
12021             return;
12022         }
12023         /* fall through */
12024     case 0x8: /* CMGT, CMGE */
12025     case 0x9: /* CMEQ, CMLE */
12026     case 0xb: /* ABS, NEG */
12027         if (size == 3 && !is_q) {
12028             unallocated_encoding(s);
12029             return;
12030         }
12031         break;
12032     case 0x3: /* SUQADD, USQADD */
12033         if (size == 3 && !is_q) {
12034             unallocated_encoding(s);
12035             return;
12036         }
12037         if (!fp_access_check(s)) {
12038             return;
12039         }
12040         handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
12041         return;
12042     case 0x7: /* SQABS, SQNEG */
12043         if (size == 3 && !is_q) {
12044             unallocated_encoding(s);
12045             return;
12046         }
12047         break;
12048     case 0xc ... 0xf:
12049     case 0x16 ... 0x1f:
12050     {
12051         /* Floating point: U, size[1] and opcode indicate operation;
12052          * size[0] indicates single or double precision.
12053          */
12054         int is_double = extract32(size, 0, 1);
12055         opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
12056         size = is_double ? 3 : 2;
12057         switch (opcode) {
12058         case 0x2f: /* FABS */
12059         case 0x6f: /* FNEG */
12060             if (size == 3 && !is_q) {
12061                 unallocated_encoding(s);
12062                 return;
12063             }
12064             break;
12065         case 0x1d: /* SCVTF */
12066         case 0x5d: /* UCVTF */
12067         {
12068             bool is_signed = (opcode == 0x1d) ? true : false;
12069             int elements = is_double ? 2 : is_q ? 4 : 2;
12070             if (is_double && !is_q) {
12071                 unallocated_encoding(s);
12072                 return;
12073             }
12074             if (!fp_access_check(s)) {
12075                 return;
12076             }
12077             handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
12078             return;
12079         }
12080         case 0x2c: /* FCMGT (zero) */
12081         case 0x2d: /* FCMEQ (zero) */
12082         case 0x2e: /* FCMLT (zero) */
12083         case 0x6c: /* FCMGE (zero) */
12084         case 0x6d: /* FCMLE (zero) */
12085             if (size == 3 && !is_q) {
12086                 unallocated_encoding(s);
12087                 return;
12088             }
12089             handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
12090             return;
12091         case 0x7f: /* FSQRT */
12092             if (size == 3 && !is_q) {
12093                 unallocated_encoding(s);
12094                 return;
12095             }
12096             break;
12097         case 0x1a: /* FCVTNS */
12098         case 0x1b: /* FCVTMS */
12099         case 0x3a: /* FCVTPS */
12100         case 0x3b: /* FCVTZS */
12101         case 0x5a: /* FCVTNU */
12102         case 0x5b: /* FCVTMU */
12103         case 0x7a: /* FCVTPU */
12104         case 0x7b: /* FCVTZU */
12105             need_fpstatus = true;
12106             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12107             if (size == 3 && !is_q) {
12108                 unallocated_encoding(s);
12109                 return;
12110             }
12111             break;
12112         case 0x5c: /* FCVTAU */
12113         case 0x1c: /* FCVTAS */
12114             need_fpstatus = true;
12115             rmode = FPROUNDING_TIEAWAY;
12116             if (size == 3 && !is_q) {
12117                 unallocated_encoding(s);
12118                 return;
12119             }
12120             break;
12121         case 0x3c: /* URECPE */
12122             if (size == 3) {
12123                 unallocated_encoding(s);
12124                 return;
12125             }
12126             /* fall through */
12127         case 0x3d: /* FRECPE */
12128         case 0x7d: /* FRSQRTE */
12129             if (size == 3 && !is_q) {
12130                 unallocated_encoding(s);
12131                 return;
12132             }
12133             if (!fp_access_check(s)) {
12134                 return;
12135             }
12136             handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
12137             return;
12138         case 0x56: /* FCVTXN, FCVTXN2 */
12139             if (size == 2) {
12140                 unallocated_encoding(s);
12141                 return;
12142             }
12143             /* fall through */
12144         case 0x16: /* FCVTN, FCVTN2 */
12145             /* handle_2misc_narrow does a 2*size -> size operation, but these
12146              * instructions encode the source size rather than dest size.
12147              */
12148             if (!fp_access_check(s)) {
12149                 return;
12150             }
12151             handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12152             return;
12153         case 0x36: /* BFCVTN, BFCVTN2 */
12154             if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
12155                 unallocated_encoding(s);
12156                 return;
12157             }
12158             if (!fp_access_check(s)) {
12159                 return;
12160             }
12161             handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
12162             return;
12163         case 0x17: /* FCVTL, FCVTL2 */
12164             if (!fp_access_check(s)) {
12165                 return;
12166             }
12167             handle_2misc_widening(s, opcode, is_q, size, rn, rd);
12168             return;
12169         case 0x18: /* FRINTN */
12170         case 0x19: /* FRINTM */
12171         case 0x38: /* FRINTP */
12172         case 0x39: /* FRINTZ */
12173             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
12174             /* fall through */
12175         case 0x59: /* FRINTX */
12176         case 0x79: /* FRINTI */
12177             need_fpstatus = true;
12178             if (size == 3 && !is_q) {
12179                 unallocated_encoding(s);
12180                 return;
12181             }
12182             break;
12183         case 0x58: /* FRINTA */
12184             rmode = FPROUNDING_TIEAWAY;
12185             need_fpstatus = true;
12186             if (size == 3 && !is_q) {
12187                 unallocated_encoding(s);
12188                 return;
12189             }
12190             break;
12191         case 0x7c: /* URSQRTE */
12192             if (size == 3) {
12193                 unallocated_encoding(s);
12194                 return;
12195             }
12196             break;
12197         case 0x1e: /* FRINT32Z */
12198         case 0x1f: /* FRINT64Z */
12199             rmode = FPROUNDING_ZERO;
12200             /* fall through */
12201         case 0x5e: /* FRINT32X */
12202         case 0x5f: /* FRINT64X */
12203             need_fpstatus = true;
12204             if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12205                 unallocated_encoding(s);
12206                 return;
12207             }
12208             break;
12209         default:
12210             unallocated_encoding(s);
12211             return;
12212         }
12213         break;
12214     }
12215     default:
12216         unallocated_encoding(s);
12217         return;
12218     }
12219 
12220     if (!fp_access_check(s)) {
12221         return;
12222     }
12223 
12224     if (need_fpstatus || rmode >= 0) {
12225         tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12226     } else {
12227         tcg_fpstatus = NULL;
12228     }
12229     if (rmode >= 0) {
12230         tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
12231     } else {
12232         tcg_rmode = NULL;
12233     }
12234 
12235     switch (opcode) {
12236     case 0x5:
12237         if (u && size == 0) { /* NOT */
12238             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12239             return;
12240         }
12241         break;
12242     case 0x8: /* CMGT, CMGE */
12243         if (u) {
12244             gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12245         } else {
12246             gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12247         }
12248         return;
12249     case 0x9: /* CMEQ, CMLE */
12250         if (u) {
12251             gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12252         } else {
12253             gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12254         }
12255         return;
12256     case 0xa: /* CMLT */
12257         gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12258         return;
12259     case 0xb:
12260         if (u) { /* ABS, NEG */
12261             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12262         } else {
12263             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12264         }
12265         return;
12266     }
12267 
12268     if (size == 3) {
12269         /* All 64-bit element operations can be shared with scalar 2misc */
12270         int pass;
12271 
12272         /* Coverity claims (size == 3 && !is_q) has been eliminated
12273          * from all paths leading to here.
12274          */
12275         tcg_debug_assert(is_q);
12276         for (pass = 0; pass < 2; pass++) {
12277             TCGv_i64 tcg_op = tcg_temp_new_i64();
12278             TCGv_i64 tcg_res = tcg_temp_new_i64();
12279 
12280             read_vec_element(s, tcg_op, rn, pass, MO_64);
12281 
12282             handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12283                             tcg_rmode, tcg_fpstatus);
12284 
12285             write_vec_element(s, tcg_res, rd, pass, MO_64);
12286         }
12287     } else {
12288         int pass;
12289 
12290         for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12291             TCGv_i32 tcg_op = tcg_temp_new_i32();
12292             TCGv_i32 tcg_res = tcg_temp_new_i32();
12293 
12294             read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12295 
12296             if (size == 2) {
12297                 /* Special cases for 32 bit elements */
12298                 switch (opcode) {
12299                 case 0x4: /* CLS */
12300                     if (u) {
12301                         tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12302                     } else {
12303                         tcg_gen_clrsb_i32(tcg_res, tcg_op);
12304                     }
12305                     break;
12306                 case 0x7: /* SQABS, SQNEG */
12307                     if (u) {
12308                         gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12309                     } else {
12310                         gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12311                     }
12312                     break;
12313                 case 0x2f: /* FABS */
12314                     gen_helper_vfp_abss(tcg_res, tcg_op);
12315                     break;
12316                 case 0x6f: /* FNEG */
12317                     gen_helper_vfp_negs(tcg_res, tcg_op);
12318                     break;
12319                 case 0x7f: /* FSQRT */
12320                     gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12321                     break;
12322                 case 0x1a: /* FCVTNS */
12323                 case 0x1b: /* FCVTMS */
12324                 case 0x1c: /* FCVTAS */
12325                 case 0x3a: /* FCVTPS */
12326                 case 0x3b: /* FCVTZS */
12327                     gen_helper_vfp_tosls(tcg_res, tcg_op,
12328                                          tcg_constant_i32(0), tcg_fpstatus);
12329                     break;
12330                 case 0x5a: /* FCVTNU */
12331                 case 0x5b: /* FCVTMU */
12332                 case 0x5c: /* FCVTAU */
12333                 case 0x7a: /* FCVTPU */
12334                 case 0x7b: /* FCVTZU */
12335                     gen_helper_vfp_touls(tcg_res, tcg_op,
12336                                          tcg_constant_i32(0), tcg_fpstatus);
12337                     break;
12338                 case 0x18: /* FRINTN */
12339                 case 0x19: /* FRINTM */
12340                 case 0x38: /* FRINTP */
12341                 case 0x39: /* FRINTZ */
12342                 case 0x58: /* FRINTA */
12343                 case 0x79: /* FRINTI */
12344                     gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12345                     break;
12346                 case 0x59: /* FRINTX */
12347                     gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12348                     break;
12349                 case 0x7c: /* URSQRTE */
12350                     gen_helper_rsqrte_u32(tcg_res, tcg_op);
12351                     break;
12352                 case 0x1e: /* FRINT32Z */
12353                 case 0x5e: /* FRINT32X */
12354                     gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12355                     break;
12356                 case 0x1f: /* FRINT64Z */
12357                 case 0x5f: /* FRINT64X */
12358                     gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12359                     break;
12360                 default:
12361                     g_assert_not_reached();
12362                 }
12363             } else {
12364                 /* Use helpers for 8 and 16 bit elements */
12365                 switch (opcode) {
12366                 case 0x5: /* CNT, RBIT */
12367                     /* For these two insns size is part of the opcode specifier
12368                      * (handled earlier); they always operate on byte elements.
12369                      */
12370                     if (u) {
12371                         gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12372                     } else {
12373                         gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12374                     }
12375                     break;
12376                 case 0x7: /* SQABS, SQNEG */
12377                 {
12378                     NeonGenOneOpEnvFn *genfn;
12379                     static NeonGenOneOpEnvFn * const fns[2][2] = {
12380                         { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12381                         { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12382                     };
12383                     genfn = fns[size][u];
12384                     genfn(tcg_res, cpu_env, tcg_op);
12385                     break;
12386                 }
12387                 case 0x4: /* CLS, CLZ */
12388                     if (u) {
12389                         if (size == 0) {
12390                             gen_helper_neon_clz_u8(tcg_res, tcg_op);
12391                         } else {
12392                             gen_helper_neon_clz_u16(tcg_res, tcg_op);
12393                         }
12394                     } else {
12395                         if (size == 0) {
12396                             gen_helper_neon_cls_s8(tcg_res, tcg_op);
12397                         } else {
12398                             gen_helper_neon_cls_s16(tcg_res, tcg_op);
12399                         }
12400                     }
12401                     break;
12402                 default:
12403                     g_assert_not_reached();
12404                 }
12405             }
12406 
12407             write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12408         }
12409     }
12410     clear_vec_high(s, is_q, rd);
12411 
12412     if (tcg_rmode) {
12413         gen_restore_rmode(tcg_rmode, tcg_fpstatus);
12414     }
12415 }
12416 
12417 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12418  *
12419  *   31  30  29 28  27     24  23 22 21       17 16    12 11 10 9    5 4    0
12420  * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12421  * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
12422  * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12423  *   mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12424  *   val:  0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12425  *
12426  * This actually covers two groups where scalar access is governed by
12427  * bit 28. A bunch of the instructions (float to integral) only exist
12428  * in the vector form and are un-allocated for the scalar decode. Also
12429  * in the scalar decode Q is always 1.
12430  */
12431 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12432 {
12433     int fpop, opcode, a, u;
12434     int rn, rd;
12435     bool is_q;
12436     bool is_scalar;
12437     bool only_in_vector = false;
12438 
12439     int pass;
12440     TCGv_i32 tcg_rmode = NULL;
12441     TCGv_ptr tcg_fpstatus = NULL;
12442     bool need_fpst = true;
12443     int rmode = -1;
12444 
12445     if (!dc_isar_feature(aa64_fp16, s)) {
12446         unallocated_encoding(s);
12447         return;
12448     }
12449 
12450     rd = extract32(insn, 0, 5);
12451     rn = extract32(insn, 5, 5);
12452 
12453     a = extract32(insn, 23, 1);
12454     u = extract32(insn, 29, 1);
12455     is_scalar = extract32(insn, 28, 1);
12456     is_q = extract32(insn, 30, 1);
12457 
12458     opcode = extract32(insn, 12, 5);
12459     fpop = deposit32(opcode, 5, 1, a);
12460     fpop = deposit32(fpop, 6, 1, u);
12461 
12462     switch (fpop) {
12463     case 0x1d: /* SCVTF */
12464     case 0x5d: /* UCVTF */
12465     {
12466         int elements;
12467 
12468         if (is_scalar) {
12469             elements = 1;
12470         } else {
12471             elements = (is_q ? 8 : 4);
12472         }
12473 
12474         if (!fp_access_check(s)) {
12475             return;
12476         }
12477         handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
12478         return;
12479     }
12480     break;
12481     case 0x2c: /* FCMGT (zero) */
12482     case 0x2d: /* FCMEQ (zero) */
12483     case 0x2e: /* FCMLT (zero) */
12484     case 0x6c: /* FCMGE (zero) */
12485     case 0x6d: /* FCMLE (zero) */
12486         handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
12487         return;
12488     case 0x3d: /* FRECPE */
12489     case 0x3f: /* FRECPX */
12490         break;
12491     case 0x18: /* FRINTN */
12492         only_in_vector = true;
12493         rmode = FPROUNDING_TIEEVEN;
12494         break;
12495     case 0x19: /* FRINTM */
12496         only_in_vector = true;
12497         rmode = FPROUNDING_NEGINF;
12498         break;
12499     case 0x38: /* FRINTP */
12500         only_in_vector = true;
12501         rmode = FPROUNDING_POSINF;
12502         break;
12503     case 0x39: /* FRINTZ */
12504         only_in_vector = true;
12505         rmode = FPROUNDING_ZERO;
12506         break;
12507     case 0x58: /* FRINTA */
12508         only_in_vector = true;
12509         rmode = FPROUNDING_TIEAWAY;
12510         break;
12511     case 0x59: /* FRINTX */
12512     case 0x79: /* FRINTI */
12513         only_in_vector = true;
12514         /* current rounding mode */
12515         break;
12516     case 0x1a: /* FCVTNS */
12517         rmode = FPROUNDING_TIEEVEN;
12518         break;
12519     case 0x1b: /* FCVTMS */
12520         rmode = FPROUNDING_NEGINF;
12521         break;
12522     case 0x1c: /* FCVTAS */
12523         rmode = FPROUNDING_TIEAWAY;
12524         break;
12525     case 0x3a: /* FCVTPS */
12526         rmode = FPROUNDING_POSINF;
12527         break;
12528     case 0x3b: /* FCVTZS */
12529         rmode = FPROUNDING_ZERO;
12530         break;
12531     case 0x5a: /* FCVTNU */
12532         rmode = FPROUNDING_TIEEVEN;
12533         break;
12534     case 0x5b: /* FCVTMU */
12535         rmode = FPROUNDING_NEGINF;
12536         break;
12537     case 0x5c: /* FCVTAU */
12538         rmode = FPROUNDING_TIEAWAY;
12539         break;
12540     case 0x7a: /* FCVTPU */
12541         rmode = FPROUNDING_POSINF;
12542         break;
12543     case 0x7b: /* FCVTZU */
12544         rmode = FPROUNDING_ZERO;
12545         break;
12546     case 0x2f: /* FABS */
12547     case 0x6f: /* FNEG */
12548         need_fpst = false;
12549         break;
12550     case 0x7d: /* FRSQRTE */
12551     case 0x7f: /* FSQRT (vector) */
12552         break;
12553     default:
12554         unallocated_encoding(s);
12555         return;
12556     }
12557 
12558 
12559     /* Check additional constraints for the scalar encoding */
12560     if (is_scalar) {
12561         if (!is_q) {
12562             unallocated_encoding(s);
12563             return;
12564         }
12565         /* FRINTxx is only in the vector form */
12566         if (only_in_vector) {
12567             unallocated_encoding(s);
12568             return;
12569         }
12570     }
12571 
12572     if (!fp_access_check(s)) {
12573         return;
12574     }
12575 
12576     if (rmode >= 0 || need_fpst) {
12577         tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
12578     }
12579 
12580     if (rmode >= 0) {
12581         tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
12582     }
12583 
12584     if (is_scalar) {
12585         TCGv_i32 tcg_op = read_fp_hreg(s, rn);
12586         TCGv_i32 tcg_res = tcg_temp_new_i32();
12587 
12588         switch (fpop) {
12589         case 0x1a: /* FCVTNS */
12590         case 0x1b: /* FCVTMS */
12591         case 0x1c: /* FCVTAS */
12592         case 0x3a: /* FCVTPS */
12593         case 0x3b: /* FCVTZS */
12594             gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12595             break;
12596         case 0x3d: /* FRECPE */
12597             gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12598             break;
12599         case 0x3f: /* FRECPX */
12600             gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12601             break;
12602         case 0x5a: /* FCVTNU */
12603         case 0x5b: /* FCVTMU */
12604         case 0x5c: /* FCVTAU */
12605         case 0x7a: /* FCVTPU */
12606         case 0x7b: /* FCVTZU */
12607             gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12608             break;
12609         case 0x6f: /* FNEG */
12610             tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12611             break;
12612         case 0x7d: /* FRSQRTE */
12613             gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12614             break;
12615         default:
12616             g_assert_not_reached();
12617         }
12618 
12619         /* limit any sign extension going on */
12620         tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12621         write_fp_sreg(s, rd, tcg_res);
12622     } else {
12623         for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12624             TCGv_i32 tcg_op = tcg_temp_new_i32();
12625             TCGv_i32 tcg_res = tcg_temp_new_i32();
12626 
12627             read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12628 
12629             switch (fpop) {
12630             case 0x1a: /* FCVTNS */
12631             case 0x1b: /* FCVTMS */
12632             case 0x1c: /* FCVTAS */
12633             case 0x3a: /* FCVTPS */
12634             case 0x3b: /* FCVTZS */
12635                 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12636                 break;
12637             case 0x3d: /* FRECPE */
12638                 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12639                 break;
12640             case 0x5a: /* FCVTNU */
12641             case 0x5b: /* FCVTMU */
12642             case 0x5c: /* FCVTAU */
12643             case 0x7a: /* FCVTPU */
12644             case 0x7b: /* FCVTZU */
12645                 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12646                 break;
12647             case 0x18: /* FRINTN */
12648             case 0x19: /* FRINTM */
12649             case 0x38: /* FRINTP */
12650             case 0x39: /* FRINTZ */
12651             case 0x58: /* FRINTA */
12652             case 0x79: /* FRINTI */
12653                 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12654                 break;
12655             case 0x59: /* FRINTX */
12656                 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12657                 break;
12658             case 0x2f: /* FABS */
12659                 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12660                 break;
12661             case 0x6f: /* FNEG */
12662                 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12663                 break;
12664             case 0x7d: /* FRSQRTE */
12665                 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12666                 break;
12667             case 0x7f: /* FSQRT */
12668                 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12669                 break;
12670             default:
12671                 g_assert_not_reached();
12672             }
12673 
12674             write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12675         }
12676 
12677         clear_vec_high(s, is_q, rd);
12678     }
12679 
12680     if (tcg_rmode) {
12681         gen_restore_rmode(tcg_rmode, tcg_fpstatus);
12682     }
12683 }
12684 
12685 /* AdvSIMD scalar x indexed element
12686  *  31 30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
12687  * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12688  * | 0 1 | U | 1 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
12689  * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12690  * AdvSIMD vector x indexed element
12691  *   31  30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
12692  * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12693  * | 0 | Q | U | 0 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
12694  * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12695  */
12696 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12697 {
12698     /* This encoding has two kinds of instruction:
12699      *  normal, where we perform elt x idxelt => elt for each
12700      *     element in the vector
12701      *  long, where we perform elt x idxelt and generate a result of
12702      *     double the width of the input element
12703      * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12704      */
12705     bool is_scalar = extract32(insn, 28, 1);
12706     bool is_q = extract32(insn, 30, 1);
12707     bool u = extract32(insn, 29, 1);
12708     int size = extract32(insn, 22, 2);
12709     int l = extract32(insn, 21, 1);
12710     int m = extract32(insn, 20, 1);
12711     /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12712     int rm = extract32(insn, 16, 4);
12713     int opcode = extract32(insn, 12, 4);
12714     int h = extract32(insn, 11, 1);
12715     int rn = extract32(insn, 5, 5);
12716     int rd = extract32(insn, 0, 5);
12717     bool is_long = false;
12718     int is_fp = 0;
12719     bool is_fp16 = false;
12720     int index;
12721     TCGv_ptr fpst;
12722 
12723     switch (16 * u + opcode) {
12724     case 0x08: /* MUL */
12725     case 0x10: /* MLA */
12726     case 0x14: /* MLS */
12727         if (is_scalar) {
12728             unallocated_encoding(s);
12729             return;
12730         }
12731         break;
12732     case 0x02: /* SMLAL, SMLAL2 */
12733     case 0x12: /* UMLAL, UMLAL2 */
12734     case 0x06: /* SMLSL, SMLSL2 */
12735     case 0x16: /* UMLSL, UMLSL2 */
12736     case 0x0a: /* SMULL, SMULL2 */
12737     case 0x1a: /* UMULL, UMULL2 */
12738         if (is_scalar) {
12739             unallocated_encoding(s);
12740             return;
12741         }
12742         is_long = true;
12743         break;
12744     case 0x03: /* SQDMLAL, SQDMLAL2 */
12745     case 0x07: /* SQDMLSL, SQDMLSL2 */
12746     case 0x0b: /* SQDMULL, SQDMULL2 */
12747         is_long = true;
12748         break;
12749     case 0x0c: /* SQDMULH */
12750     case 0x0d: /* SQRDMULH */
12751         break;
12752     case 0x01: /* FMLA */
12753     case 0x05: /* FMLS */
12754     case 0x09: /* FMUL */
12755     case 0x19: /* FMULX */
12756         is_fp = 1;
12757         break;
12758     case 0x1d: /* SQRDMLAH */
12759     case 0x1f: /* SQRDMLSH */
12760         if (!dc_isar_feature(aa64_rdm, s)) {
12761             unallocated_encoding(s);
12762             return;
12763         }
12764         break;
12765     case 0x0e: /* SDOT */
12766     case 0x1e: /* UDOT */
12767         if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
12768             unallocated_encoding(s);
12769             return;
12770         }
12771         break;
12772     case 0x0f:
12773         switch (size) {
12774         case 0: /* SUDOT */
12775         case 2: /* USDOT */
12776             if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
12777                 unallocated_encoding(s);
12778                 return;
12779             }
12780             size = MO_32;
12781             break;
12782         case 1: /* BFDOT */
12783             if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
12784                 unallocated_encoding(s);
12785                 return;
12786             }
12787             size = MO_32;
12788             break;
12789         case 3: /* BFMLAL{B,T} */
12790             if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
12791                 unallocated_encoding(s);
12792                 return;
12793             }
12794             /* can't set is_fp without other incorrect size checks */
12795             size = MO_16;
12796             break;
12797         default:
12798             unallocated_encoding(s);
12799             return;
12800         }
12801         break;
12802     case 0x11: /* FCMLA #0 */
12803     case 0x13: /* FCMLA #90 */
12804     case 0x15: /* FCMLA #180 */
12805     case 0x17: /* FCMLA #270 */
12806         if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
12807             unallocated_encoding(s);
12808             return;
12809         }
12810         is_fp = 2;
12811         break;
12812     case 0x00: /* FMLAL */
12813     case 0x04: /* FMLSL */
12814     case 0x18: /* FMLAL2 */
12815     case 0x1c: /* FMLSL2 */
12816         if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
12817             unallocated_encoding(s);
12818             return;
12819         }
12820         size = MO_16;
12821         /* is_fp, but we pass cpu_env not fp_status.  */
12822         break;
12823     default:
12824         unallocated_encoding(s);
12825         return;
12826     }
12827 
12828     switch (is_fp) {
12829     case 1: /* normal fp */
12830         /* convert insn encoded size to MemOp size */
12831         switch (size) {
12832         case 0: /* half-precision */
12833             size = MO_16;
12834             is_fp16 = true;
12835             break;
12836         case MO_32: /* single precision */
12837         case MO_64: /* double precision */
12838             break;
12839         default:
12840             unallocated_encoding(s);
12841             return;
12842         }
12843         break;
12844 
12845     case 2: /* complex fp */
12846         /* Each indexable element is a complex pair.  */
12847         size += 1;
12848         switch (size) {
12849         case MO_32:
12850             if (h && !is_q) {
12851                 unallocated_encoding(s);
12852                 return;
12853             }
12854             is_fp16 = true;
12855             break;
12856         case MO_64:
12857             break;
12858         default:
12859             unallocated_encoding(s);
12860             return;
12861         }
12862         break;
12863 
12864     default: /* integer */
12865         switch (size) {
12866         case MO_8:
12867         case MO_64:
12868             unallocated_encoding(s);
12869             return;
12870         }
12871         break;
12872     }
12873     if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
12874         unallocated_encoding(s);
12875         return;
12876     }
12877 
12878     /* Given MemOp size, adjust register and indexing.  */
12879     switch (size) {
12880     case MO_16:
12881         index = h << 2 | l << 1 | m;
12882         break;
12883     case MO_32:
12884         index = h << 1 | l;
12885         rm |= m << 4;
12886         break;
12887     case MO_64:
12888         if (l || !is_q) {
12889             unallocated_encoding(s);
12890             return;
12891         }
12892         index = h;
12893         rm |= m << 4;
12894         break;
12895     default:
12896         g_assert_not_reached();
12897     }
12898 
12899     if (!fp_access_check(s)) {
12900         return;
12901     }
12902 
12903     if (is_fp) {
12904         fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
12905     } else {
12906         fpst = NULL;
12907     }
12908 
12909     switch (16 * u + opcode) {
12910     case 0x0e: /* SDOT */
12911     case 0x1e: /* UDOT */
12912         gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12913                          u ? gen_helper_gvec_udot_idx_b
12914                          : gen_helper_gvec_sdot_idx_b);
12915         return;
12916     case 0x0f:
12917         switch (extract32(insn, 22, 2)) {
12918         case 0: /* SUDOT */
12919             gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12920                              gen_helper_gvec_sudot_idx_b);
12921             return;
12922         case 1: /* BFDOT */
12923             gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12924                              gen_helper_gvec_bfdot_idx);
12925             return;
12926         case 2: /* USDOT */
12927             gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12928                              gen_helper_gvec_usdot_idx_b);
12929             return;
12930         case 3: /* BFMLAL{B,T} */
12931             gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
12932                               gen_helper_gvec_bfmlal_idx);
12933             return;
12934         }
12935         g_assert_not_reached();
12936     case 0x11: /* FCMLA #0 */
12937     case 0x13: /* FCMLA #90 */
12938     case 0x15: /* FCMLA #180 */
12939     case 0x17: /* FCMLA #270 */
12940         {
12941             int rot = extract32(insn, 13, 2);
12942             int data = (index << 2) | rot;
12943             tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
12944                                vec_full_reg_offset(s, rn),
12945                                vec_full_reg_offset(s, rm),
12946                                vec_full_reg_offset(s, rd), fpst,
12947                                is_q ? 16 : 8, vec_full_reg_size(s), data,
12948                                size == MO_64
12949                                ? gen_helper_gvec_fcmlas_idx
12950                                : gen_helper_gvec_fcmlah_idx);
12951         }
12952         return;
12953 
12954     case 0x00: /* FMLAL */
12955     case 0x04: /* FMLSL */
12956     case 0x18: /* FMLAL2 */
12957     case 0x1c: /* FMLSL2 */
12958         {
12959             int is_s = extract32(opcode, 2, 1);
12960             int is_2 = u;
12961             int data = (index << 2) | (is_2 << 1) | is_s;
12962             tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
12963                                vec_full_reg_offset(s, rn),
12964                                vec_full_reg_offset(s, rm), cpu_env,
12965                                is_q ? 16 : 8, vec_full_reg_size(s),
12966                                data, gen_helper_gvec_fmlal_idx_a64);
12967         }
12968         return;
12969 
12970     case 0x08: /* MUL */
12971         if (!is_long && !is_scalar) {
12972             static gen_helper_gvec_3 * const fns[3] = {
12973                 gen_helper_gvec_mul_idx_h,
12974                 gen_helper_gvec_mul_idx_s,
12975                 gen_helper_gvec_mul_idx_d,
12976             };
12977             tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
12978                                vec_full_reg_offset(s, rn),
12979                                vec_full_reg_offset(s, rm),
12980                                is_q ? 16 : 8, vec_full_reg_size(s),
12981                                index, fns[size - 1]);
12982             return;
12983         }
12984         break;
12985 
12986     case 0x10: /* MLA */
12987         if (!is_long && !is_scalar) {
12988             static gen_helper_gvec_4 * const fns[3] = {
12989                 gen_helper_gvec_mla_idx_h,
12990                 gen_helper_gvec_mla_idx_s,
12991                 gen_helper_gvec_mla_idx_d,
12992             };
12993             tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
12994                                vec_full_reg_offset(s, rn),
12995                                vec_full_reg_offset(s, rm),
12996                                vec_full_reg_offset(s, rd),
12997                                is_q ? 16 : 8, vec_full_reg_size(s),
12998                                index, fns[size - 1]);
12999             return;
13000         }
13001         break;
13002 
13003     case 0x14: /* MLS */
13004         if (!is_long && !is_scalar) {
13005             static gen_helper_gvec_4 * const fns[3] = {
13006                 gen_helper_gvec_mls_idx_h,
13007                 gen_helper_gvec_mls_idx_s,
13008                 gen_helper_gvec_mls_idx_d,
13009             };
13010             tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
13011                                vec_full_reg_offset(s, rn),
13012                                vec_full_reg_offset(s, rm),
13013                                vec_full_reg_offset(s, rd),
13014                                is_q ? 16 : 8, vec_full_reg_size(s),
13015                                index, fns[size - 1]);
13016             return;
13017         }
13018         break;
13019     }
13020 
13021     if (size == 3) {
13022         TCGv_i64 tcg_idx = tcg_temp_new_i64();
13023         int pass;
13024 
13025         assert(is_fp && is_q && !is_long);
13026 
13027         read_vec_element(s, tcg_idx, rm, index, MO_64);
13028 
13029         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13030             TCGv_i64 tcg_op = tcg_temp_new_i64();
13031             TCGv_i64 tcg_res = tcg_temp_new_i64();
13032 
13033             read_vec_element(s, tcg_op, rn, pass, MO_64);
13034 
13035             switch (16 * u + opcode) {
13036             case 0x05: /* FMLS */
13037                 /* As usual for ARM, separate negation for fused multiply-add */
13038                 gen_helper_vfp_negd(tcg_op, tcg_op);
13039                 /* fall through */
13040             case 0x01: /* FMLA */
13041                 read_vec_element(s, tcg_res, rd, pass, MO_64);
13042                 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
13043                 break;
13044             case 0x09: /* FMUL */
13045                 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
13046                 break;
13047             case 0x19: /* FMULX */
13048                 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
13049                 break;
13050             default:
13051                 g_assert_not_reached();
13052             }
13053 
13054             write_vec_element(s, tcg_res, rd, pass, MO_64);
13055         }
13056 
13057         clear_vec_high(s, !is_scalar, rd);
13058     } else if (!is_long) {
13059         /* 32 bit floating point, or 16 or 32 bit integer.
13060          * For the 16 bit scalar case we use the usual Neon helpers and
13061          * rely on the fact that 0 op 0 == 0 with no side effects.
13062          */
13063         TCGv_i32 tcg_idx = tcg_temp_new_i32();
13064         int pass, maxpasses;
13065 
13066         if (is_scalar) {
13067             maxpasses = 1;
13068         } else {
13069             maxpasses = is_q ? 4 : 2;
13070         }
13071 
13072         read_vec_element_i32(s, tcg_idx, rm, index, size);
13073 
13074         if (size == 1 && !is_scalar) {
13075             /* The simplest way to handle the 16x16 indexed ops is to duplicate
13076              * the index into both halves of the 32 bit tcg_idx and then use
13077              * the usual Neon helpers.
13078              */
13079             tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13080         }
13081 
13082         for (pass = 0; pass < maxpasses; pass++) {
13083             TCGv_i32 tcg_op = tcg_temp_new_i32();
13084             TCGv_i32 tcg_res = tcg_temp_new_i32();
13085 
13086             read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
13087 
13088             switch (16 * u + opcode) {
13089             case 0x08: /* MUL */
13090             case 0x10: /* MLA */
13091             case 0x14: /* MLS */
13092             {
13093                 static NeonGenTwoOpFn * const fns[2][2] = {
13094                     { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
13095                     { tcg_gen_add_i32, tcg_gen_sub_i32 },
13096                 };
13097                 NeonGenTwoOpFn *genfn;
13098                 bool is_sub = opcode == 0x4;
13099 
13100                 if (size == 1) {
13101                     gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
13102                 } else {
13103                     tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
13104                 }
13105                 if (opcode == 0x8) {
13106                     break;
13107                 }
13108                 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
13109                 genfn = fns[size - 1][is_sub];
13110                 genfn(tcg_res, tcg_op, tcg_res);
13111                 break;
13112             }
13113             case 0x05: /* FMLS */
13114             case 0x01: /* FMLA */
13115                 read_vec_element_i32(s, tcg_res, rd, pass,
13116                                      is_scalar ? size : MO_32);
13117                 switch (size) {
13118                 case 1:
13119                     if (opcode == 0x5) {
13120                         /* As usual for ARM, separate negation for fused
13121                          * multiply-add */
13122                         tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
13123                     }
13124                     if (is_scalar) {
13125                         gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
13126                                                    tcg_res, fpst);
13127                     } else {
13128                         gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
13129                                                     tcg_res, fpst);
13130                     }
13131                     break;
13132                 case 2:
13133                     if (opcode == 0x5) {
13134                         /* As usual for ARM, separate negation for
13135                          * fused multiply-add */
13136                         tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
13137                     }
13138                     gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
13139                                            tcg_res, fpst);
13140                     break;
13141                 default:
13142                     g_assert_not_reached();
13143                 }
13144                 break;
13145             case 0x09: /* FMUL */
13146                 switch (size) {
13147                 case 1:
13148                     if (is_scalar) {
13149                         gen_helper_advsimd_mulh(tcg_res, tcg_op,
13150                                                 tcg_idx, fpst);
13151                     } else {
13152                         gen_helper_advsimd_mul2h(tcg_res, tcg_op,
13153                                                  tcg_idx, fpst);
13154                     }
13155                     break;
13156                 case 2:
13157                     gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
13158                     break;
13159                 default:
13160                     g_assert_not_reached();
13161                 }
13162                 break;
13163             case 0x19: /* FMULX */
13164                 switch (size) {
13165                 case 1:
13166                     if (is_scalar) {
13167                         gen_helper_advsimd_mulxh(tcg_res, tcg_op,
13168                                                  tcg_idx, fpst);
13169                     } else {
13170                         gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
13171                                                   tcg_idx, fpst);
13172                     }
13173                     break;
13174                 case 2:
13175                     gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
13176                     break;
13177                 default:
13178                     g_assert_not_reached();
13179                 }
13180                 break;
13181             case 0x0c: /* SQDMULH */
13182                 if (size == 1) {
13183                     gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
13184                                                tcg_op, tcg_idx);
13185                 } else {
13186                     gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
13187                                                tcg_op, tcg_idx);
13188                 }
13189                 break;
13190             case 0x0d: /* SQRDMULH */
13191                 if (size == 1) {
13192                     gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
13193                                                 tcg_op, tcg_idx);
13194                 } else {
13195                     gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13196                                                 tcg_op, tcg_idx);
13197                 }
13198                 break;
13199             case 0x1d: /* SQRDMLAH */
13200                 read_vec_element_i32(s, tcg_res, rd, pass,
13201                                      is_scalar ? size : MO_32);
13202                 if (size == 1) {
13203                     gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13204                                                 tcg_op, tcg_idx, tcg_res);
13205                 } else {
13206                     gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13207                                                 tcg_op, tcg_idx, tcg_res);
13208                 }
13209                 break;
13210             case 0x1f: /* SQRDMLSH */
13211                 read_vec_element_i32(s, tcg_res, rd, pass,
13212                                      is_scalar ? size : MO_32);
13213                 if (size == 1) {
13214                     gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13215                                                 tcg_op, tcg_idx, tcg_res);
13216                 } else {
13217                     gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13218                                                 tcg_op, tcg_idx, tcg_res);
13219                 }
13220                 break;
13221             default:
13222                 g_assert_not_reached();
13223             }
13224 
13225             if (is_scalar) {
13226                 write_fp_sreg(s, rd, tcg_res);
13227             } else {
13228                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13229             }
13230         }
13231 
13232         clear_vec_high(s, is_q, rd);
13233     } else {
13234         /* long ops: 16x16->32 or 32x32->64 */
13235         TCGv_i64 tcg_res[2];
13236         int pass;
13237         bool satop = extract32(opcode, 0, 1);
13238         MemOp memop = MO_32;
13239 
13240         if (satop || !u) {
13241             memop |= MO_SIGN;
13242         }
13243 
13244         if (size == 2) {
13245             TCGv_i64 tcg_idx = tcg_temp_new_i64();
13246 
13247             read_vec_element(s, tcg_idx, rm, index, memop);
13248 
13249             for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13250                 TCGv_i64 tcg_op = tcg_temp_new_i64();
13251                 TCGv_i64 tcg_passres;
13252                 int passelt;
13253 
13254                 if (is_scalar) {
13255                     passelt = 0;
13256                 } else {
13257                     passelt = pass + (is_q * 2);
13258                 }
13259 
13260                 read_vec_element(s, tcg_op, rn, passelt, memop);
13261 
13262                 tcg_res[pass] = tcg_temp_new_i64();
13263 
13264                 if (opcode == 0xa || opcode == 0xb) {
13265                     /* Non-accumulating ops */
13266                     tcg_passres = tcg_res[pass];
13267                 } else {
13268                     tcg_passres = tcg_temp_new_i64();
13269                 }
13270 
13271                 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13272 
13273                 if (satop) {
13274                     /* saturating, doubling */
13275                     gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13276                                                       tcg_passres, tcg_passres);
13277                 }
13278 
13279                 if (opcode == 0xa || opcode == 0xb) {
13280                     continue;
13281                 }
13282 
13283                 /* Accumulating op: handle accumulate step */
13284                 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13285 
13286                 switch (opcode) {
13287                 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13288                     tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13289                     break;
13290                 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13291                     tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13292                     break;
13293                 case 0x7: /* SQDMLSL, SQDMLSL2 */
13294                     tcg_gen_neg_i64(tcg_passres, tcg_passres);
13295                     /* fall through */
13296                 case 0x3: /* SQDMLAL, SQDMLAL2 */
13297                     gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13298                                                       tcg_res[pass],
13299                                                       tcg_passres);
13300                     break;
13301                 default:
13302                     g_assert_not_reached();
13303                 }
13304             }
13305 
13306             clear_vec_high(s, !is_scalar, rd);
13307         } else {
13308             TCGv_i32 tcg_idx = tcg_temp_new_i32();
13309 
13310             assert(size == 1);
13311             read_vec_element_i32(s, tcg_idx, rm, index, size);
13312 
13313             if (!is_scalar) {
13314                 /* The simplest way to handle the 16x16 indexed ops is to
13315                  * duplicate the index into both halves of the 32 bit tcg_idx
13316                  * and then use the usual Neon helpers.
13317                  */
13318                 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13319             }
13320 
13321             for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13322                 TCGv_i32 tcg_op = tcg_temp_new_i32();
13323                 TCGv_i64 tcg_passres;
13324 
13325                 if (is_scalar) {
13326                     read_vec_element_i32(s, tcg_op, rn, pass, size);
13327                 } else {
13328                     read_vec_element_i32(s, tcg_op, rn,
13329                                          pass + (is_q * 2), MO_32);
13330                 }
13331 
13332                 tcg_res[pass] = tcg_temp_new_i64();
13333 
13334                 if (opcode == 0xa || opcode == 0xb) {
13335                     /* Non-accumulating ops */
13336                     tcg_passres = tcg_res[pass];
13337                 } else {
13338                     tcg_passres = tcg_temp_new_i64();
13339                 }
13340 
13341                 if (memop & MO_SIGN) {
13342                     gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13343                 } else {
13344                     gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13345                 }
13346                 if (satop) {
13347                     gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13348                                                       tcg_passres, tcg_passres);
13349                 }
13350 
13351                 if (opcode == 0xa || opcode == 0xb) {
13352                     continue;
13353                 }
13354 
13355                 /* Accumulating op: handle accumulate step */
13356                 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13357 
13358                 switch (opcode) {
13359                 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13360                     gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13361                                              tcg_passres);
13362                     break;
13363                 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13364                     gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13365                                              tcg_passres);
13366                     break;
13367                 case 0x7: /* SQDMLSL, SQDMLSL2 */
13368                     gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13369                     /* fall through */
13370                 case 0x3: /* SQDMLAL, SQDMLAL2 */
13371                     gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13372                                                       tcg_res[pass],
13373                                                       tcg_passres);
13374                     break;
13375                 default:
13376                     g_assert_not_reached();
13377                 }
13378             }
13379 
13380             if (is_scalar) {
13381                 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13382             }
13383         }
13384 
13385         if (is_scalar) {
13386             tcg_res[1] = tcg_constant_i64(0);
13387         }
13388 
13389         for (pass = 0; pass < 2; pass++) {
13390             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13391         }
13392     }
13393 }
13394 
13395 /* Crypto AES
13396  *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
13397  * +-----------------+------+-----------+--------+-----+------+------+
13398  * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13399  * +-----------------+------+-----------+--------+-----+------+------+
13400  */
13401 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
13402 {
13403     int size = extract32(insn, 22, 2);
13404     int opcode = extract32(insn, 12, 5);
13405     int rn = extract32(insn, 5, 5);
13406     int rd = extract32(insn, 0, 5);
13407     int decrypt;
13408     gen_helper_gvec_2 *genfn2 = NULL;
13409     gen_helper_gvec_3 *genfn3 = NULL;
13410 
13411     if (!dc_isar_feature(aa64_aes, s) || size != 0) {
13412         unallocated_encoding(s);
13413         return;
13414     }
13415 
13416     switch (opcode) {
13417     case 0x4: /* AESE */
13418         decrypt = 0;
13419         genfn3 = gen_helper_crypto_aese;
13420         break;
13421     case 0x6: /* AESMC */
13422         decrypt = 0;
13423         genfn2 = gen_helper_crypto_aesmc;
13424         break;
13425     case 0x5: /* AESD */
13426         decrypt = 1;
13427         genfn3 = gen_helper_crypto_aese;
13428         break;
13429     case 0x7: /* AESIMC */
13430         decrypt = 1;
13431         genfn2 = gen_helper_crypto_aesmc;
13432         break;
13433     default:
13434         unallocated_encoding(s);
13435         return;
13436     }
13437 
13438     if (!fp_access_check(s)) {
13439         return;
13440     }
13441     if (genfn2) {
13442         gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
13443     } else {
13444         gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
13445     }
13446 }
13447 
13448 /* Crypto three-reg SHA
13449  *  31             24 23  22  21 20  16  15 14    12 11 10 9    5 4    0
13450  * +-----------------+------+---+------+---+--------+-----+------+------+
13451  * | 0 1 0 1 1 1 1 0 | size | 0 |  Rm  | 0 | opcode | 0 0 |  Rn  |  Rd  |
13452  * +-----------------+------+---+------+---+--------+-----+------+------+
13453  */
13454 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
13455 {
13456     int size = extract32(insn, 22, 2);
13457     int opcode = extract32(insn, 12, 3);
13458     int rm = extract32(insn, 16, 5);
13459     int rn = extract32(insn, 5, 5);
13460     int rd = extract32(insn, 0, 5);
13461     gen_helper_gvec_3 *genfn;
13462     bool feature;
13463 
13464     if (size != 0) {
13465         unallocated_encoding(s);
13466         return;
13467     }
13468 
13469     switch (opcode) {
13470     case 0: /* SHA1C */
13471         genfn = gen_helper_crypto_sha1c;
13472         feature = dc_isar_feature(aa64_sha1, s);
13473         break;
13474     case 1: /* SHA1P */
13475         genfn = gen_helper_crypto_sha1p;
13476         feature = dc_isar_feature(aa64_sha1, s);
13477         break;
13478     case 2: /* SHA1M */
13479         genfn = gen_helper_crypto_sha1m;
13480         feature = dc_isar_feature(aa64_sha1, s);
13481         break;
13482     case 3: /* SHA1SU0 */
13483         genfn = gen_helper_crypto_sha1su0;
13484         feature = dc_isar_feature(aa64_sha1, s);
13485         break;
13486     case 4: /* SHA256H */
13487         genfn = gen_helper_crypto_sha256h;
13488         feature = dc_isar_feature(aa64_sha256, s);
13489         break;
13490     case 5: /* SHA256H2 */
13491         genfn = gen_helper_crypto_sha256h2;
13492         feature = dc_isar_feature(aa64_sha256, s);
13493         break;
13494     case 6: /* SHA256SU1 */
13495         genfn = gen_helper_crypto_sha256su1;
13496         feature = dc_isar_feature(aa64_sha256, s);
13497         break;
13498     default:
13499         unallocated_encoding(s);
13500         return;
13501     }
13502 
13503     if (!feature) {
13504         unallocated_encoding(s);
13505         return;
13506     }
13507 
13508     if (!fp_access_check(s)) {
13509         return;
13510     }
13511     gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
13512 }
13513 
13514 /* Crypto two-reg SHA
13515  *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
13516  * +-----------------+------+-----------+--------+-----+------+------+
13517  * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13518  * +-----------------+------+-----------+--------+-----+------+------+
13519  */
13520 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
13521 {
13522     int size = extract32(insn, 22, 2);
13523     int opcode = extract32(insn, 12, 5);
13524     int rn = extract32(insn, 5, 5);
13525     int rd = extract32(insn, 0, 5);
13526     gen_helper_gvec_2 *genfn;
13527     bool feature;
13528 
13529     if (size != 0) {
13530         unallocated_encoding(s);
13531         return;
13532     }
13533 
13534     switch (opcode) {
13535     case 0: /* SHA1H */
13536         feature = dc_isar_feature(aa64_sha1, s);
13537         genfn = gen_helper_crypto_sha1h;
13538         break;
13539     case 1: /* SHA1SU1 */
13540         feature = dc_isar_feature(aa64_sha1, s);
13541         genfn = gen_helper_crypto_sha1su1;
13542         break;
13543     case 2: /* SHA256SU0 */
13544         feature = dc_isar_feature(aa64_sha256, s);
13545         genfn = gen_helper_crypto_sha256su0;
13546         break;
13547     default:
13548         unallocated_encoding(s);
13549         return;
13550     }
13551 
13552     if (!feature) {
13553         unallocated_encoding(s);
13554         return;
13555     }
13556 
13557     if (!fp_access_check(s)) {
13558         return;
13559     }
13560     gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
13561 }
13562 
13563 static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
13564 {
13565     tcg_gen_rotli_i64(d, m, 1);
13566     tcg_gen_xor_i64(d, d, n);
13567 }
13568 
13569 static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
13570 {
13571     tcg_gen_rotli_vec(vece, d, m, 1);
13572     tcg_gen_xor_vec(vece, d, d, n);
13573 }
13574 
13575 void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
13576                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
13577 {
13578     static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
13579     static const GVecGen3 op = {
13580         .fni8 = gen_rax1_i64,
13581         .fniv = gen_rax1_vec,
13582         .opt_opc = vecop_list,
13583         .fno = gen_helper_crypto_rax1,
13584         .vece = MO_64,
13585     };
13586     tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
13587 }
13588 
13589 /* Crypto three-reg SHA512
13590  *  31                   21 20  16 15  14  13 12  11  10  9    5 4    0
13591  * +-----------------------+------+---+---+-----+--------+------+------+
13592  * | 1 1 0 0 1 1 1 0 0 1 1 |  Rm  | 1 | O | 0 0 | opcode |  Rn  |  Rd  |
13593  * +-----------------------+------+---+---+-----+--------+------+------+
13594  */
13595 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
13596 {
13597     int opcode = extract32(insn, 10, 2);
13598     int o =  extract32(insn, 14, 1);
13599     int rm = extract32(insn, 16, 5);
13600     int rn = extract32(insn, 5, 5);
13601     int rd = extract32(insn, 0, 5);
13602     bool feature;
13603     gen_helper_gvec_3 *oolfn = NULL;
13604     GVecGen3Fn *gvecfn = NULL;
13605 
13606     if (o == 0) {
13607         switch (opcode) {
13608         case 0: /* SHA512H */
13609             feature = dc_isar_feature(aa64_sha512, s);
13610             oolfn = gen_helper_crypto_sha512h;
13611             break;
13612         case 1: /* SHA512H2 */
13613             feature = dc_isar_feature(aa64_sha512, s);
13614             oolfn = gen_helper_crypto_sha512h2;
13615             break;
13616         case 2: /* SHA512SU1 */
13617             feature = dc_isar_feature(aa64_sha512, s);
13618             oolfn = gen_helper_crypto_sha512su1;
13619             break;
13620         case 3: /* RAX1 */
13621             feature = dc_isar_feature(aa64_sha3, s);
13622             gvecfn = gen_gvec_rax1;
13623             break;
13624         default:
13625             g_assert_not_reached();
13626         }
13627     } else {
13628         switch (opcode) {
13629         case 0: /* SM3PARTW1 */
13630             feature = dc_isar_feature(aa64_sm3, s);
13631             oolfn = gen_helper_crypto_sm3partw1;
13632             break;
13633         case 1: /* SM3PARTW2 */
13634             feature = dc_isar_feature(aa64_sm3, s);
13635             oolfn = gen_helper_crypto_sm3partw2;
13636             break;
13637         case 2: /* SM4EKEY */
13638             feature = dc_isar_feature(aa64_sm4, s);
13639             oolfn = gen_helper_crypto_sm4ekey;
13640             break;
13641         default:
13642             unallocated_encoding(s);
13643             return;
13644         }
13645     }
13646 
13647     if (!feature) {
13648         unallocated_encoding(s);
13649         return;
13650     }
13651 
13652     if (!fp_access_check(s)) {
13653         return;
13654     }
13655 
13656     if (oolfn) {
13657         gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
13658     } else {
13659         gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
13660     }
13661 }
13662 
13663 /* Crypto two-reg SHA512
13664  *  31                                     12  11  10  9    5 4    0
13665  * +-----------------------------------------+--------+------+------+
13666  * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode |  Rn  |  Rd  |
13667  * +-----------------------------------------+--------+------+------+
13668  */
13669 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13670 {
13671     int opcode = extract32(insn, 10, 2);
13672     int rn = extract32(insn, 5, 5);
13673     int rd = extract32(insn, 0, 5);
13674     bool feature;
13675 
13676     switch (opcode) {
13677     case 0: /* SHA512SU0 */
13678         feature = dc_isar_feature(aa64_sha512, s);
13679         break;
13680     case 1: /* SM4E */
13681         feature = dc_isar_feature(aa64_sm4, s);
13682         break;
13683     default:
13684         unallocated_encoding(s);
13685         return;
13686     }
13687 
13688     if (!feature) {
13689         unallocated_encoding(s);
13690         return;
13691     }
13692 
13693     if (!fp_access_check(s)) {
13694         return;
13695     }
13696 
13697     switch (opcode) {
13698     case 0: /* SHA512SU0 */
13699         gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
13700         break;
13701     case 1: /* SM4E */
13702         gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
13703         break;
13704     default:
13705         g_assert_not_reached();
13706     }
13707 }
13708 
13709 /* Crypto four-register
13710  *  31               23 22 21 20  16 15  14  10 9    5 4    0
13711  * +-------------------+-----+------+---+------+------+------+
13712  * | 1 1 0 0 1 1 1 0 0 | Op0 |  Rm  | 0 |  Ra  |  Rn  |  Rd  |
13713  * +-------------------+-----+------+---+------+------+------+
13714  */
13715 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13716 {
13717     int op0 = extract32(insn, 21, 2);
13718     int rm = extract32(insn, 16, 5);
13719     int ra = extract32(insn, 10, 5);
13720     int rn = extract32(insn, 5, 5);
13721     int rd = extract32(insn, 0, 5);
13722     bool feature;
13723 
13724     switch (op0) {
13725     case 0: /* EOR3 */
13726     case 1: /* BCAX */
13727         feature = dc_isar_feature(aa64_sha3, s);
13728         break;
13729     case 2: /* SM3SS1 */
13730         feature = dc_isar_feature(aa64_sm3, s);
13731         break;
13732     default:
13733         unallocated_encoding(s);
13734         return;
13735     }
13736 
13737     if (!feature) {
13738         unallocated_encoding(s);
13739         return;
13740     }
13741 
13742     if (!fp_access_check(s)) {
13743         return;
13744     }
13745 
13746     if (op0 < 2) {
13747         TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13748         int pass;
13749 
13750         tcg_op1 = tcg_temp_new_i64();
13751         tcg_op2 = tcg_temp_new_i64();
13752         tcg_op3 = tcg_temp_new_i64();
13753         tcg_res[0] = tcg_temp_new_i64();
13754         tcg_res[1] = tcg_temp_new_i64();
13755 
13756         for (pass = 0; pass < 2; pass++) {
13757             read_vec_element(s, tcg_op1, rn, pass, MO_64);
13758             read_vec_element(s, tcg_op2, rm, pass, MO_64);
13759             read_vec_element(s, tcg_op3, ra, pass, MO_64);
13760 
13761             if (op0 == 0) {
13762                 /* EOR3 */
13763                 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13764             } else {
13765                 /* BCAX */
13766                 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13767             }
13768             tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13769         }
13770         write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13771         write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13772     } else {
13773         TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13774 
13775         tcg_op1 = tcg_temp_new_i32();
13776         tcg_op2 = tcg_temp_new_i32();
13777         tcg_op3 = tcg_temp_new_i32();
13778         tcg_res = tcg_temp_new_i32();
13779         tcg_zero = tcg_constant_i32(0);
13780 
13781         read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13782         read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13783         read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13784 
13785         tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13786         tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13787         tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13788         tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13789 
13790         write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13791         write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13792         write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13793         write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13794     }
13795 }
13796 
13797 /* Crypto XAR
13798  *  31                   21 20  16 15    10 9    5 4    0
13799  * +-----------------------+------+--------+------+------+
13800  * | 1 1 0 0 1 1 1 0 1 0 0 |  Rm  |  imm6  |  Rn  |  Rd  |
13801  * +-----------------------+------+--------+------+------+
13802  */
13803 static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13804 {
13805     int rm = extract32(insn, 16, 5);
13806     int imm6 = extract32(insn, 10, 6);
13807     int rn = extract32(insn, 5, 5);
13808     int rd = extract32(insn, 0, 5);
13809 
13810     if (!dc_isar_feature(aa64_sha3, s)) {
13811         unallocated_encoding(s);
13812         return;
13813     }
13814 
13815     if (!fp_access_check(s)) {
13816         return;
13817     }
13818 
13819     gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
13820                  vec_full_reg_offset(s, rn),
13821                  vec_full_reg_offset(s, rm), imm6, 16,
13822                  vec_full_reg_size(s));
13823 }
13824 
13825 /* Crypto three-reg imm2
13826  *  31                   21 20  16 15  14 13 12  11  10  9    5 4    0
13827  * +-----------------------+------+-----+------+--------+------+------+
13828  * | 1 1 0 0 1 1 1 0 0 1 0 |  Rm  | 1 0 | imm2 | opcode |  Rn  |  Rd  |
13829  * +-----------------------+------+-----+------+--------+------+------+
13830  */
13831 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
13832 {
13833     static gen_helper_gvec_3 * const fns[4] = {
13834         gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
13835         gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
13836     };
13837     int opcode = extract32(insn, 10, 2);
13838     int imm2 = extract32(insn, 12, 2);
13839     int rm = extract32(insn, 16, 5);
13840     int rn = extract32(insn, 5, 5);
13841     int rd = extract32(insn, 0, 5);
13842 
13843     if (!dc_isar_feature(aa64_sm3, s)) {
13844         unallocated_encoding(s);
13845         return;
13846     }
13847 
13848     if (!fp_access_check(s)) {
13849         return;
13850     }
13851 
13852     gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
13853 }
13854 
13855 /* C3.6 Data processing - SIMD, inc Crypto
13856  *
13857  * As the decode gets a little complex we are using a table based
13858  * approach for this part of the decode.
13859  */
13860 static const AArch64DecodeTable data_proc_simd[] = {
13861     /* pattern  ,  mask     ,  fn                        */
13862     { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
13863     { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
13864     { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
13865     { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
13866     { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
13867     { 0x0e000400, 0x9fe08400, disas_simd_copy },
13868     { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
13869     /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13870     { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
13871     { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
13872     { 0x0e000000, 0xbf208c00, disas_simd_tb },
13873     { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
13874     { 0x2e000000, 0xbf208400, disas_simd_ext },
13875     { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
13876     { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
13877     { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
13878     { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
13879     { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
13880     { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
13881     { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
13882     { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
13883     { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
13884     { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
13885     { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
13886     { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
13887     { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
13888     { 0xce000000, 0xff808000, disas_crypto_four_reg },
13889     { 0xce800000, 0xffe00000, disas_crypto_xar },
13890     { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
13891     { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
13892     { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
13893     { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
13894     { 0x00000000, 0x00000000, NULL }
13895 };
13896 
13897 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
13898 {
13899     /* Note that this is called with all non-FP cases from
13900      * table C3-6 so it must UNDEF for entries not specifically
13901      * allocated to instructions in that table.
13902      */
13903     AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
13904     if (fn) {
13905         fn(s, insn);
13906     } else {
13907         unallocated_encoding(s);
13908     }
13909 }
13910 
13911 /* C3.6 Data processing - SIMD and floating point */
13912 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
13913 {
13914     if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
13915         disas_data_proc_fp(s, insn);
13916     } else {
13917         /* SIMD, including crypto */
13918         disas_data_proc_simd(s, insn);
13919     }
13920 }
13921 
13922 static bool trans_OK(DisasContext *s, arg_OK *a)
13923 {
13924     return true;
13925 }
13926 
13927 static bool trans_FAIL(DisasContext *s, arg_OK *a)
13928 {
13929     s->is_nonstreaming = true;
13930     return true;
13931 }
13932 
13933 /**
13934  * is_guarded_page:
13935  * @env: The cpu environment
13936  * @s: The DisasContext
13937  *
13938  * Return true if the page is guarded.
13939  */
13940 static bool is_guarded_page(CPUARMState *env, DisasContext *s)
13941 {
13942     uint64_t addr = s->base.pc_first;
13943 #ifdef CONFIG_USER_ONLY
13944     return page_get_flags(addr) & PAGE_BTI;
13945 #else
13946     CPUTLBEntryFull *full;
13947     void *host;
13948     int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
13949     int flags;
13950 
13951     /*
13952      * We test this immediately after reading an insn, which means
13953      * that the TLB entry must be present and valid, and thus this
13954      * access will never raise an exception.
13955      */
13956     flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
13957                               false, &host, &full, 0);
13958     assert(!(flags & TLB_INVALID_MASK));
13959 
13960     return full->guarded;
13961 #endif
13962 }
13963 
13964 /**
13965  * btype_destination_ok:
13966  * @insn: The instruction at the branch destination
13967  * @bt: SCTLR_ELx.BT
13968  * @btype: PSTATE.BTYPE, and is non-zero
13969  *
13970  * On a guarded page, there are a limited number of insns
13971  * that may be present at the branch target:
13972  *   - branch target identifiers,
13973  *   - paciasp, pacibsp,
13974  *   - BRK insn
13975  *   - HLT insn
13976  * Anything else causes a Branch Target Exception.
13977  *
13978  * Return true if the branch is compatible, false to raise BTITRAP.
13979  */
13980 static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
13981 {
13982     if ((insn & 0xfffff01fu) == 0xd503201fu) {
13983         /* HINT space */
13984         switch (extract32(insn, 5, 7)) {
13985         case 0b011001: /* PACIASP */
13986         case 0b011011: /* PACIBSP */
13987             /*
13988              * If SCTLR_ELx.BT, then PACI*SP are not compatible
13989              * with btype == 3.  Otherwise all btype are ok.
13990              */
13991             return !bt || btype != 3;
13992         case 0b100000: /* BTI */
13993             /* Not compatible with any btype.  */
13994             return false;
13995         case 0b100010: /* BTI c */
13996             /* Not compatible with btype == 3 */
13997             return btype != 3;
13998         case 0b100100: /* BTI j */
13999             /* Not compatible with btype == 2 */
14000             return btype != 2;
14001         case 0b100110: /* BTI jc */
14002             /* Compatible with any btype.  */
14003             return true;
14004         }
14005     } else {
14006         switch (insn & 0xffe0001fu) {
14007         case 0xd4200000u: /* BRK */
14008         case 0xd4400000u: /* HLT */
14009             /* Give priority to the breakpoint exception.  */
14010             return true;
14011         }
14012     }
14013     return false;
14014 }
14015 
14016 /* C3.1 A64 instruction index by encoding */
14017 static void disas_a64_legacy(DisasContext *s, uint32_t insn)
14018 {
14019     switch (extract32(insn, 25, 4)) {
14020     case 0xa: case 0xb: /* Branch, exception generation and system insns */
14021         disas_b_exc_sys(s, insn);
14022         break;
14023     case 0x4:
14024     case 0x6:
14025     case 0xc:
14026     case 0xe:      /* Loads and stores */
14027         disas_ldst(s, insn);
14028         break;
14029     case 0x5:
14030     case 0xd:      /* Data processing - register */
14031         disas_data_proc_reg(s, insn);
14032         break;
14033     case 0x7:
14034     case 0xf:      /* Data processing - SIMD and floating point */
14035         disas_data_proc_simd_fp(s, insn);
14036         break;
14037     default:
14038         unallocated_encoding(s);
14039         break;
14040     }
14041 }
14042 
14043 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
14044                                           CPUState *cpu)
14045 {
14046     DisasContext *dc = container_of(dcbase, DisasContext, base);
14047     CPUARMState *env = cpu->env_ptr;
14048     ARMCPU *arm_cpu = env_archcpu(env);
14049     CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
14050     int bound, core_mmu_idx;
14051 
14052     dc->isar = &arm_cpu->isar;
14053     dc->condjmp = 0;
14054     dc->pc_save = dc->base.pc_first;
14055     dc->aarch64 = true;
14056     dc->thumb = false;
14057     dc->sctlr_b = 0;
14058     dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
14059     dc->condexec_mask = 0;
14060     dc->condexec_cond = 0;
14061     core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
14062     dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
14063     dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
14064     dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
14065     dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
14066     dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
14067 #if !defined(CONFIG_USER_ONLY)
14068     dc->user = (dc->current_el == 0);
14069 #endif
14070     dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
14071     dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
14072     dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
14073     dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE);
14074     dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC);
14075     dc->fgt_eret = EX_TBFLAG_A64(tb_flags, FGT_ERET);
14076     dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
14077     dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
14078     dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
14079     dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
14080     dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
14081     dc->bt = EX_TBFLAG_A64(tb_flags, BT);
14082     dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
14083     dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
14084     dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
14085     dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
14086     dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
14087     dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
14088     dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
14089     dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
14090     dc->vec_len = 0;
14091     dc->vec_stride = 0;
14092     dc->cp_regs = arm_cpu->cp_regs;
14093     dc->features = env->features;
14094     dc->dcz_blocksize = arm_cpu->dcz_blocksize;
14095 
14096 #ifdef CONFIG_USER_ONLY
14097     /* In sve_probe_page, we assume TBI is enabled. */
14098     tcg_debug_assert(dc->tbid & 1);
14099 #endif
14100 
14101     /* Single step state. The code-generation logic here is:
14102      *  SS_ACTIVE == 0:
14103      *   generate code with no special handling for single-stepping (except
14104      *   that anything that can make us go to SS_ACTIVE == 1 must end the TB;
14105      *   this happens anyway because those changes are all system register or
14106      *   PSTATE writes).
14107      *  SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
14108      *   emit code for one insn
14109      *   emit code to clear PSTATE.SS
14110      *   emit code to generate software step exception for completed step
14111      *   end TB (as usual for having generated an exception)
14112      *  SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
14113      *   emit code to generate a software step exception
14114      *   end the TB
14115      */
14116     dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
14117     dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
14118     dc->is_ldex = false;
14119 
14120     /* Bound the number of insns to execute to those left on the page.  */
14121     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
14122 
14123     /* If architectural single step active, limit to 1.  */
14124     if (dc->ss_active) {
14125         bound = 1;
14126     }
14127     dc->base.max_insns = MIN(dc->base.max_insns, bound);
14128 }
14129 
14130 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
14131 {
14132 }
14133 
14134 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
14135 {
14136     DisasContext *dc = container_of(dcbase, DisasContext, base);
14137     target_ulong pc_arg = dc->base.pc_next;
14138 
14139     if (tb_cflags(dcbase->tb) & CF_PCREL) {
14140         pc_arg &= ~TARGET_PAGE_MASK;
14141     }
14142     tcg_gen_insn_start(pc_arg, 0, 0);
14143     dc->insn_start = tcg_last_op();
14144 }
14145 
14146 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
14147 {
14148     DisasContext *s = container_of(dcbase, DisasContext, base);
14149     CPUARMState *env = cpu->env_ptr;
14150     uint64_t pc = s->base.pc_next;
14151     uint32_t insn;
14152 
14153     /* Singlestep exceptions have the highest priority. */
14154     if (s->ss_active && !s->pstate_ss) {
14155         /* Singlestep state is Active-pending.
14156          * If we're in this state at the start of a TB then either
14157          *  a) we just took an exception to an EL which is being debugged
14158          *     and this is the first insn in the exception handler
14159          *  b) debug exceptions were masked and we just unmasked them
14160          *     without changing EL (eg by clearing PSTATE.D)
14161          * In either case we're going to take a swstep exception in the
14162          * "did not step an insn" case, and so the syndrome ISV and EX
14163          * bits should be zero.
14164          */
14165         assert(s->base.num_insns == 1);
14166         gen_swstep_exception(s, 0, 0);
14167         s->base.is_jmp = DISAS_NORETURN;
14168         s->base.pc_next = pc + 4;
14169         return;
14170     }
14171 
14172     if (pc & 3) {
14173         /*
14174          * PC alignment fault.  This has priority over the instruction abort
14175          * that we would receive from a translation fault via arm_ldl_code.
14176          * This should only be possible after an indirect branch, at the
14177          * start of the TB.
14178          */
14179         assert(s->base.num_insns == 1);
14180         gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
14181         s->base.is_jmp = DISAS_NORETURN;
14182         s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
14183         return;
14184     }
14185 
14186     s->pc_curr = pc;
14187     insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
14188     s->insn = insn;
14189     s->base.pc_next = pc + 4;
14190 
14191     s->fp_access_checked = false;
14192     s->sve_access_checked = false;
14193 
14194     if (s->pstate_il) {
14195         /*
14196          * Illegal execution state. This has priority over BTI
14197          * exceptions, but comes after instruction abort exceptions.
14198          */
14199         gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
14200         return;
14201     }
14202 
14203     if (dc_isar_feature(aa64_bti, s)) {
14204         if (s->base.num_insns == 1) {
14205             /*
14206              * At the first insn of the TB, compute s->guarded_page.
14207              * We delayed computing this until successfully reading
14208              * the first insn of the TB, above.  This (mostly) ensures
14209              * that the softmmu tlb entry has been populated, and the
14210              * page table GP bit is available.
14211              *
14212              * Note that we need to compute this even if btype == 0,
14213              * because this value is used for BR instructions later
14214              * where ENV is not available.
14215              */
14216             s->guarded_page = is_guarded_page(env, s);
14217 
14218             /* First insn can have btype set to non-zero.  */
14219             tcg_debug_assert(s->btype >= 0);
14220 
14221             /*
14222              * Note that the Branch Target Exception has fairly high
14223              * priority -- below debugging exceptions but above most
14224              * everything else.  This allows us to handle this now
14225              * instead of waiting until the insn is otherwise decoded.
14226              */
14227             if (s->btype != 0
14228                 && s->guarded_page
14229                 && !btype_destination_ok(insn, s->bt, s->btype)) {
14230                 gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype));
14231                 return;
14232             }
14233         } else {
14234             /* Not the first insn: btype must be 0.  */
14235             tcg_debug_assert(s->btype == 0);
14236         }
14237     }
14238 
14239     s->is_nonstreaming = false;
14240     if (s->sme_trap_nonstreaming) {
14241         disas_sme_fa64(s, insn);
14242     }
14243 
14244     if (!disas_a64(s, insn) &&
14245         !disas_sme(s, insn) &&
14246         !disas_sve(s, insn)) {
14247         disas_a64_legacy(s, insn);
14248     }
14249 
14250     /*
14251      * After execution of most insns, btype is reset to 0.
14252      * Note that we set btype == -1 when the insn sets btype.
14253      */
14254     if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14255         reset_btype(s);
14256     }
14257 }
14258 
14259 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14260 {
14261     DisasContext *dc = container_of(dcbase, DisasContext, base);
14262 
14263     if (unlikely(dc->ss_active)) {
14264         /* Note that this means single stepping WFI doesn't halt the CPU.
14265          * For conditional branch insns this is harmless unreachable code as
14266          * gen_goto_tb() has already handled emitting the debug exception
14267          * (and thus a tb-jump is not possible when singlestepping).
14268          */
14269         switch (dc->base.is_jmp) {
14270         default:
14271             gen_a64_update_pc(dc, 4);
14272             /* fall through */
14273         case DISAS_EXIT:
14274         case DISAS_JUMP:
14275             gen_step_complete_exception(dc);
14276             break;
14277         case DISAS_NORETURN:
14278             break;
14279         }
14280     } else {
14281         switch (dc->base.is_jmp) {
14282         case DISAS_NEXT:
14283         case DISAS_TOO_MANY:
14284             gen_goto_tb(dc, 1, 4);
14285             break;
14286         default:
14287         case DISAS_UPDATE_EXIT:
14288             gen_a64_update_pc(dc, 4);
14289             /* fall through */
14290         case DISAS_EXIT:
14291             tcg_gen_exit_tb(NULL, 0);
14292             break;
14293         case DISAS_UPDATE_NOCHAIN:
14294             gen_a64_update_pc(dc, 4);
14295             /* fall through */
14296         case DISAS_JUMP:
14297             tcg_gen_lookup_and_goto_ptr();
14298             break;
14299         case DISAS_NORETURN:
14300         case DISAS_SWI:
14301             break;
14302         case DISAS_WFE:
14303             gen_a64_update_pc(dc, 4);
14304             gen_helper_wfe(cpu_env);
14305             break;
14306         case DISAS_YIELD:
14307             gen_a64_update_pc(dc, 4);
14308             gen_helper_yield(cpu_env);
14309             break;
14310         case DISAS_WFI:
14311             /*
14312              * This is a special case because we don't want to just halt
14313              * the CPU if trying to debug across a WFI.
14314              */
14315             gen_a64_update_pc(dc, 4);
14316             gen_helper_wfi(cpu_env, tcg_constant_i32(4));
14317             /*
14318              * The helper doesn't necessarily throw an exception, but we
14319              * must go back to the main loop to check for interrupts anyway.
14320              */
14321             tcg_gen_exit_tb(NULL, 0);
14322             break;
14323         }
14324     }
14325 }
14326 
14327 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14328                                  CPUState *cpu, FILE *logfile)
14329 {
14330     DisasContext *dc = container_of(dcbase, DisasContext, base);
14331 
14332     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
14333     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
14334 }
14335 
14336 const TranslatorOps aarch64_translator_ops = {
14337     .init_disas_context = aarch64_tr_init_disas_context,
14338     .tb_start           = aarch64_tr_tb_start,
14339     .insn_start         = aarch64_tr_insn_start,
14340     .translate_insn     = aarch64_tr_translate_insn,
14341     .tb_stop            = aarch64_tr_tb_stop,
14342     .disas_log          = aarch64_tr_disas_log,
14343 };
14344