xref: /openbmc/qemu/tcg/aarch64/tcg-target.c.inc (revision c688cc165b32c924a01a69d90ac9aac6fac8c64c)
1/*
2 * Initial TCG Implementation for aarch64
3 *
4 * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH
5 * Written by Claudio Fontana
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or
8 * (at your option) any later version.
9 *
10 * See the COPYING file in the top-level directory for details.
11 */
12
13#include "qemu/bitops.h"
14
15/* Used for function call generation. */
16#define TCG_REG_CALL_STACK              TCG_REG_SP
17#define TCG_TARGET_STACK_ALIGN          16
18#define TCG_TARGET_CALL_STACK_OFFSET    0
19#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_NORMAL
20#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_NORMAL
21#ifdef CONFIG_DARWIN
22# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
23#else
24# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
25#endif
26#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
27
28/* We're going to re-use TCGType in setting of the SF bit, which controls
29   the size of the operation performed.  If we know the values match, it
30   makes things much cleaner.  */
31QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1);
32
33#ifdef CONFIG_DEBUG_TCG
34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
35    "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
36    "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
37    "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
38    "x24", "x25", "x26", "x27", "x28", "fp", "x30", "sp",
39
40    "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
41    "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
42    "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
43    "v24", "v25", "v26", "v27", "v28", "fp", "v30", "v31",
44};
45#endif /* CONFIG_DEBUG_TCG */
46
47static const int tcg_target_reg_alloc_order[] = {
48    TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
49    TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
50    TCG_REG_X28, /* we will reserve this for guest_base if configured */
51
52    TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
53    TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
54
55    TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
56    TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7,
57
58    /* X16 reserved as temporary */
59    /* X17 reserved as temporary */
60    /* X18 reserved by system */
61    /* X19 reserved for AREG0 */
62    /* X29 reserved as fp */
63    /* X30 reserved as temporary */
64
65    TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
66    TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
67    /* V8 - V15 are call-saved, and skipped.  */
68    TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
69    TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
70    TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
71    TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
72};
73
74static const int tcg_target_call_iarg_regs[8] = {
75    TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
76    TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7
77};
78
79static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
80{
81    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
82    tcg_debug_assert(slot >= 0 && slot <= 1);
83    return TCG_REG_X0 + slot;
84}
85
86#define TCG_REG_TMP0 TCG_REG_X16
87#define TCG_REG_TMP1 TCG_REG_X17
88#define TCG_REG_TMP2 TCG_REG_X30
89#define TCG_VEC_TMP0 TCG_REG_V31
90
91#define TCG_REG_GUEST_BASE TCG_REG_X28
92
93static bool reloc_pc26(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
94{
95    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
96    ptrdiff_t offset = target - src_rx;
97
98    if (offset == sextract64(offset, 0, 26)) {
99        /* read instruction, mask away previous PC_REL26 parameter contents,
100           set the proper offset, then write back the instruction. */
101        *src_rw = deposit32(*src_rw, 0, 26, offset);
102        return true;
103    }
104    return false;
105}
106
107static bool reloc_pc19(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
108{
109    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
110    ptrdiff_t offset = target - src_rx;
111
112    if (offset == sextract64(offset, 0, 19)) {
113        *src_rw = deposit32(*src_rw, 5, 19, offset);
114        return true;
115    }
116    return false;
117}
118
119static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
120{
121    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
122    ptrdiff_t offset = target - src_rx;
123
124    if (offset == sextract64(offset, 0, 14)) {
125        *src_rw = deposit32(*src_rw, 5, 14, offset);
126        return true;
127    }
128    return false;
129}
130
131static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
132                        intptr_t value, intptr_t addend)
133{
134    tcg_debug_assert(addend == 0);
135    switch (type) {
136    case R_AARCH64_JUMP26:
137    case R_AARCH64_CALL26:
138        return reloc_pc26(code_ptr, (const tcg_insn_unit *)value);
139    case R_AARCH64_CONDBR19:
140        return reloc_pc19(code_ptr, (const tcg_insn_unit *)value);
141    case R_AARCH64_TSTBR14:
142        return reloc_pc14(code_ptr, (const tcg_insn_unit *)value);
143    default:
144        g_assert_not_reached();
145    }
146}
147
148#define TCG_CT_CONST_AIMM 0x100
149#define TCG_CT_CONST_LIMM 0x200
150#define TCG_CT_CONST_ZERO 0x400
151#define TCG_CT_CONST_MONE 0x800
152#define TCG_CT_CONST_ORRI 0x1000
153#define TCG_CT_CONST_ANDI 0x2000
154#define TCG_CT_CONST_CMP  0x4000
155
156#define ALL_GENERAL_REGS  0xffffffffu
157#define ALL_VECTOR_REGS   0xffffffff00000000ull
158
159/* Match a constant valid for addition (12-bit, optionally shifted).  */
160static inline bool is_aimm(uint64_t val)
161{
162    return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0;
163}
164
165/* Match a constant valid for logical operations.  */
166static inline bool is_limm(uint64_t val)
167{
168    /* Taking a simplified view of the logical immediates for now, ignoring
169       the replication that can happen across the field.  Match bit patterns
170       of the forms
171           0....01....1
172           0..01..10..0
173       and their inverses.  */
174
175    /* Make things easier below, by testing the form with msb clear. */
176    if ((int64_t)val < 0) {
177        val = ~val;
178    }
179    if (val == 0) {
180        return false;
181    }
182    val += val & -val;
183    return (val & (val - 1)) == 0;
184}
185
186/* Return true if v16 is a valid 16-bit shifted immediate.  */
187static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
188{
189    if (v16 == (v16 & 0xff)) {
190        *cmode = 0x8;
191        *imm8 = v16 & 0xff;
192        return true;
193    } else if (v16 == (v16 & 0xff00)) {
194        *cmode = 0xa;
195        *imm8 = v16 >> 8;
196        return true;
197    }
198    return false;
199}
200
201/* Return true if v32 is a valid 32-bit shifted immediate.  */
202static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
203{
204    if (v32 == (v32 & 0xff)) {
205        *cmode = 0x0;
206        *imm8 = v32 & 0xff;
207        return true;
208    } else if (v32 == (v32 & 0xff00)) {
209        *cmode = 0x2;
210        *imm8 = (v32 >> 8) & 0xff;
211        return true;
212    } else if (v32 == (v32 & 0xff0000)) {
213        *cmode = 0x4;
214        *imm8 = (v32 >> 16) & 0xff;
215        return true;
216    } else if (v32 == (v32 & 0xff000000)) {
217        *cmode = 0x6;
218        *imm8 = v32 >> 24;
219        return true;
220    }
221    return false;
222}
223
224/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
225static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
226{
227    if ((v32 & 0xffff00ff) == 0xff) {
228        *cmode = 0xc;
229        *imm8 = (v32 >> 8) & 0xff;
230        return true;
231    } else if ((v32 & 0xff00ffff) == 0xffff) {
232        *cmode = 0xd;
233        *imm8 = (v32 >> 16) & 0xff;
234        return true;
235    }
236    return false;
237}
238
239/* Return true if v32 is a valid float32 immediate.  */
240static bool is_fimm32(uint32_t v32, int *cmode, int *imm8)
241{
242    if (extract32(v32, 0, 19) == 0
243        && (extract32(v32, 25, 6) == 0x20
244            || extract32(v32, 25, 6) == 0x1f)) {
245        *cmode = 0xf;
246        *imm8 = (extract32(v32, 31, 1) << 7)
247              | (extract32(v32, 25, 1) << 6)
248              | extract32(v32, 19, 6);
249        return true;
250    }
251    return false;
252}
253
254/* Return true if v64 is a valid float64 immediate.  */
255static bool is_fimm64(uint64_t v64, int *cmode, int *imm8)
256{
257    if (extract64(v64, 0, 48) == 0
258        && (extract64(v64, 54, 9) == 0x100
259            || extract64(v64, 54, 9) == 0x0ff)) {
260        *cmode = 0xf;
261        *imm8 = (extract64(v64, 63, 1) << 7)
262              | (extract64(v64, 54, 1) << 6)
263              | extract64(v64, 48, 6);
264        return true;
265    }
266    return false;
267}
268
269/*
270 * Return non-zero if v32 can be formed by MOVI+ORR.
271 * Place the parameters for MOVI in (cmode, imm8).
272 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
273 */
274static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
275{
276    int i;
277
278    for (i = 6; i > 0; i -= 2) {
279        /* Mask out one byte we can add with ORR.  */
280        uint32_t tmp = v32 & ~(0xffu << (i * 4));
281        if (is_shimm32(tmp, cmode, imm8) ||
282            is_soimm32(tmp, cmode, imm8)) {
283            break;
284        }
285    }
286    return i;
287}
288
289/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
290static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
291{
292    if (v32 == deposit32(v32, 16, 16, v32)) {
293        return is_shimm16(v32, cmode, imm8);
294    } else {
295        return is_shimm32(v32, cmode, imm8);
296    }
297}
298
299static bool tcg_target_const_match(int64_t val, int ct,
300                                   TCGType type, TCGCond cond, int vece)
301{
302    if (ct & TCG_CT_CONST) {
303        return 1;
304    }
305    if (type == TCG_TYPE_I32) {
306        val = (int32_t)val;
307    }
308
309    if (ct & TCG_CT_CONST_CMP) {
310        if (is_tst_cond(cond)) {
311            ct |= TCG_CT_CONST_LIMM;
312        } else {
313            ct |= TCG_CT_CONST_AIMM;
314        }
315    }
316
317    if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
318        return 1;
319    }
320    if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) {
321        return 1;
322    }
323    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
324        return 1;
325    }
326    if ((ct & TCG_CT_CONST_MONE) && val == -1) {
327        return 1;
328    }
329
330    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
331    case 0:
332        break;
333    case TCG_CT_CONST_ANDI:
334        val = ~val;
335        /* fallthru */
336    case TCG_CT_CONST_ORRI:
337        if (val == deposit64(val, 32, 32, val)) {
338            int cmode, imm8;
339            return is_shimm1632(val, &cmode, &imm8);
340        }
341        break;
342    default:
343        /* Both bits should not be set for the same insn.  */
344        g_assert_not_reached();
345    }
346
347    return 0;
348}
349
350enum aarch64_cond_code {
351    COND_EQ = 0x0,
352    COND_NE = 0x1,
353    COND_CS = 0x2,     /* Unsigned greater or equal */
354    COND_HS = COND_CS, /* ALIAS greater or equal */
355    COND_CC = 0x3,     /* Unsigned less than */
356    COND_LO = COND_CC, /* ALIAS Lower */
357    COND_MI = 0x4,     /* Negative */
358    COND_PL = 0x5,     /* Zero or greater */
359    COND_VS = 0x6,     /* Overflow */
360    COND_VC = 0x7,     /* No overflow */
361    COND_HI = 0x8,     /* Unsigned greater than */
362    COND_LS = 0x9,     /* Unsigned less or equal */
363    COND_GE = 0xa,
364    COND_LT = 0xb,
365    COND_GT = 0xc,
366    COND_LE = 0xd,
367    COND_AL = 0xe,
368    COND_NV = 0xf, /* behaves like COND_AL here */
369};
370
371static const enum aarch64_cond_code tcg_cond_to_aarch64[] = {
372    [TCG_COND_EQ] = COND_EQ,
373    [TCG_COND_NE] = COND_NE,
374    [TCG_COND_LT] = COND_LT,
375    [TCG_COND_GE] = COND_GE,
376    [TCG_COND_LE] = COND_LE,
377    [TCG_COND_GT] = COND_GT,
378    /* unsigned */
379    [TCG_COND_LTU] = COND_LO,
380    [TCG_COND_GTU] = COND_HI,
381    [TCG_COND_GEU] = COND_HS,
382    [TCG_COND_LEU] = COND_LS,
383    /* bit test */
384    [TCG_COND_TSTEQ] = COND_EQ,
385    [TCG_COND_TSTNE] = COND_NE,
386};
387
388typedef enum {
389    LDST_ST = 0,    /* store */
390    LDST_LD = 1,    /* load */
391    LDST_LD_S_X = 2,  /* load and sign-extend into Xt */
392    LDST_LD_S_W = 3,  /* load and sign-extend into Wt */
393} AArch64LdstType;
394
395/* We encode the format of the insn into the beginning of the name, so that
396   we can have the preprocessor help "typecheck" the insn vs the output
397   function.  Arm didn't provide us with nice names for the formats, so we
398   use the section number of the architecture reference manual in which the
399   instruction group is described.  */
400typedef enum {
401    /* Compare and branch (immediate).  */
402    I3201_CBZ       = 0x34000000,
403    I3201_CBNZ      = 0x35000000,
404
405    /* Conditional branch (immediate).  */
406    I3202_B_C       = 0x54000000,
407
408    /* Test and branch (immediate).  */
409    I3205_TBZ       = 0x36000000,
410    I3205_TBNZ      = 0x37000000,
411
412    /* Unconditional branch (immediate).  */
413    I3206_B         = 0x14000000,
414    I3206_BL        = 0x94000000,
415
416    /* Unconditional branch (register).  */
417    I3207_BR        = 0xd61f0000,
418    I3207_BLR       = 0xd63f0000,
419    I3207_RET       = 0xd65f0000,
420
421    /* AdvSIMD load/store single structure.  */
422    I3303_LD1R      = 0x0d40c000,
423
424    /* Load literal for loading the address at pc-relative offset */
425    I3305_LDR       = 0x58000000,
426    I3305_LDR_v64   = 0x5c000000,
427    I3305_LDR_v128  = 0x9c000000,
428
429    /* Load/store exclusive. */
430    I3306_LDXP      = 0xc8600000,
431    I3306_STXP      = 0xc8200000,
432
433    /* Load/store register.  Described here as 3.3.12, but the helper
434       that emits them can transform to 3.3.10 or 3.3.13.  */
435    I3312_STRB      = 0x38000000 | LDST_ST << 22 | MO_8 << 30,
436    I3312_STRH      = 0x38000000 | LDST_ST << 22 | MO_16 << 30,
437    I3312_STRW      = 0x38000000 | LDST_ST << 22 | MO_32 << 30,
438    I3312_STRX      = 0x38000000 | LDST_ST << 22 | MO_64 << 30,
439
440    I3312_LDRB      = 0x38000000 | LDST_LD << 22 | MO_8 << 30,
441    I3312_LDRH      = 0x38000000 | LDST_LD << 22 | MO_16 << 30,
442    I3312_LDRW      = 0x38000000 | LDST_LD << 22 | MO_32 << 30,
443    I3312_LDRX      = 0x38000000 | LDST_LD << 22 | MO_64 << 30,
444
445    I3312_LDRSBW    = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30,
446    I3312_LDRSHW    = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30,
447
448    I3312_LDRSBX    = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30,
449    I3312_LDRSHX    = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30,
450    I3312_LDRSWX    = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30,
451
452    I3312_LDRVS     = 0x3c000000 | LDST_LD << 22 | MO_32 << 30,
453    I3312_STRVS     = 0x3c000000 | LDST_ST << 22 | MO_32 << 30,
454
455    I3312_LDRVD     = 0x3c000000 | LDST_LD << 22 | MO_64 << 30,
456    I3312_STRVD     = 0x3c000000 | LDST_ST << 22 | MO_64 << 30,
457
458    I3312_LDRVQ     = 0x3c000000 | 3 << 22 | 0 << 30,
459    I3312_STRVQ     = 0x3c000000 | 2 << 22 | 0 << 30,
460
461    I3312_TO_I3310  = 0x00200800,
462    I3312_TO_I3313  = 0x01000000,
463
464    /* Load/store register pair instructions.  */
465    I3314_LDP       = 0x28400000,
466    I3314_STP       = 0x28000000,
467
468    /* Add/subtract immediate instructions.  */
469    I3401_ADDI      = 0x11000000,
470    I3401_ADDSI     = 0x31000000,
471    I3401_SUBI      = 0x51000000,
472    I3401_SUBSI     = 0x71000000,
473
474    /* Bitfield instructions.  */
475    I3402_BFM       = 0x33000000,
476    I3402_SBFM      = 0x13000000,
477    I3402_UBFM      = 0x53000000,
478
479    /* Extract instruction.  */
480    I3403_EXTR      = 0x13800000,
481
482    /* Logical immediate instructions.  */
483    I3404_ANDI      = 0x12000000,
484    I3404_ORRI      = 0x32000000,
485    I3404_EORI      = 0x52000000,
486    I3404_ANDSI     = 0x72000000,
487
488    /* Move wide immediate instructions.  */
489    I3405_MOVN      = 0x12800000,
490    I3405_MOVZ      = 0x52800000,
491    I3405_MOVK      = 0x72800000,
492
493    /* PC relative addressing instructions.  */
494    I3406_ADR       = 0x10000000,
495    I3406_ADRP      = 0x90000000,
496
497    /* Add/subtract extended register instructions. */
498    I3501_ADD       = 0x0b200000,
499
500    /* Add/subtract shifted register instructions (without a shift).  */
501    I3502_ADD       = 0x0b000000,
502    I3502_ADDS      = 0x2b000000,
503    I3502_SUB       = 0x4b000000,
504    I3502_SUBS      = 0x6b000000,
505
506    /* Add/subtract shifted register instructions (with a shift).  */
507    I3502S_ADD_LSL  = I3502_ADD,
508
509    /* Add/subtract with carry instructions.  */
510    I3503_ADC       = 0x1a000000,
511    I3503_ADCS      = 0x3a000000,
512    I3503_SBC       = 0x5a000000,
513    I3503_SBCS      = 0x7a000000,
514
515    /* Conditional select instructions.  */
516    I3506_CSEL      = 0x1a800000,
517    I3506_CSINC     = 0x1a800400,
518    I3506_CSINV     = 0x5a800000,
519    I3506_CSNEG     = 0x5a800400,
520
521    /* Data-processing (1 source) instructions.  */
522    I3507_CLZ       = 0x5ac01000,
523    I3507_RBIT      = 0x5ac00000,
524    I3507_REV       = 0x5ac00000, /* + size << 10 */
525
526    /* Data-processing (2 source) instructions.  */
527    I3508_LSLV      = 0x1ac02000,
528    I3508_LSRV      = 0x1ac02400,
529    I3508_ASRV      = 0x1ac02800,
530    I3508_RORV      = 0x1ac02c00,
531    I3508_SMULH     = 0x9b407c00,
532    I3508_UMULH     = 0x9bc07c00,
533    I3508_UDIV      = 0x1ac00800,
534    I3508_SDIV      = 0x1ac00c00,
535
536    /* Data-processing (3 source) instructions.  */
537    I3509_MADD      = 0x1b000000,
538    I3509_MSUB      = 0x1b008000,
539
540    /* Logical shifted register instructions (without a shift).  */
541    I3510_AND       = 0x0a000000,
542    I3510_BIC       = 0x0a200000,
543    I3510_ORR       = 0x2a000000,
544    I3510_ORN       = 0x2a200000,
545    I3510_EOR       = 0x4a000000,
546    I3510_EON       = 0x4a200000,
547    I3510_ANDS      = 0x6a000000,
548
549    /* Logical shifted register instructions (with a shift).  */
550    I3502S_AND_LSR  = I3510_AND | (1 << 22),
551
552    /* AdvSIMD copy */
553    I3605_DUP      = 0x0e000400,
554    I3605_INS      = 0x4e001c00,
555    I3605_UMOV     = 0x0e003c00,
556
557    /* AdvSIMD modified immediate */
558    I3606_MOVI      = 0x0f000400,
559    I3606_MVNI      = 0x2f000400,
560    I3606_BIC       = 0x2f001400,
561    I3606_ORR       = 0x0f001400,
562
563    /* AdvSIMD scalar shift by immediate */
564    I3609_SSHR      = 0x5f000400,
565    I3609_SSRA      = 0x5f001400,
566    I3609_SHL       = 0x5f005400,
567    I3609_USHR      = 0x7f000400,
568    I3609_USRA      = 0x7f001400,
569    I3609_SLI       = 0x7f005400,
570
571    /* AdvSIMD scalar three same */
572    I3611_SQADD     = 0x5e200c00,
573    I3611_SQSUB     = 0x5e202c00,
574    I3611_CMGT      = 0x5e203400,
575    I3611_CMGE      = 0x5e203c00,
576    I3611_SSHL      = 0x5e204400,
577    I3611_ADD       = 0x5e208400,
578    I3611_CMTST     = 0x5e208c00,
579    I3611_UQADD     = 0x7e200c00,
580    I3611_UQSUB     = 0x7e202c00,
581    I3611_CMHI      = 0x7e203400,
582    I3611_CMHS      = 0x7e203c00,
583    I3611_USHL      = 0x7e204400,
584    I3611_SUB       = 0x7e208400,
585    I3611_CMEQ      = 0x7e208c00,
586
587    /* AdvSIMD scalar two-reg misc */
588    I3612_CMGT0     = 0x5e208800,
589    I3612_CMEQ0     = 0x5e209800,
590    I3612_CMLT0     = 0x5e20a800,
591    I3612_ABS       = 0x5e20b800,
592    I3612_CMGE0     = 0x7e208800,
593    I3612_CMLE0     = 0x7e209800,
594    I3612_NEG       = 0x7e20b800,
595
596    /* AdvSIMD shift by immediate */
597    I3614_SSHR      = 0x0f000400,
598    I3614_SSRA      = 0x0f001400,
599    I3614_SHL       = 0x0f005400,
600    I3614_SLI       = 0x2f005400,
601    I3614_USHR      = 0x2f000400,
602    I3614_USRA      = 0x2f001400,
603
604    /* AdvSIMD three same.  */
605    I3616_ADD       = 0x0e208400,
606    I3616_AND       = 0x0e201c00,
607    I3616_BIC       = 0x0e601c00,
608    I3616_BIF       = 0x2ee01c00,
609    I3616_BIT       = 0x2ea01c00,
610    I3616_BSL       = 0x2e601c00,
611    I3616_EOR       = 0x2e201c00,
612    I3616_MUL       = 0x0e209c00,
613    I3616_ORR       = 0x0ea01c00,
614    I3616_ORN       = 0x0ee01c00,
615    I3616_SUB       = 0x2e208400,
616    I3616_CMGT      = 0x0e203400,
617    I3616_CMGE      = 0x0e203c00,
618    I3616_CMTST     = 0x0e208c00,
619    I3616_CMHI      = 0x2e203400,
620    I3616_CMHS      = 0x2e203c00,
621    I3616_CMEQ      = 0x2e208c00,
622    I3616_SMAX      = 0x0e206400,
623    I3616_SMIN      = 0x0e206c00,
624    I3616_SSHL      = 0x0e204400,
625    I3616_SQADD     = 0x0e200c00,
626    I3616_SQSUB     = 0x0e202c00,
627    I3616_UMAX      = 0x2e206400,
628    I3616_UMIN      = 0x2e206c00,
629    I3616_UQADD     = 0x2e200c00,
630    I3616_UQSUB     = 0x2e202c00,
631    I3616_USHL      = 0x2e204400,
632
633    /* AdvSIMD two-reg misc.  */
634    I3617_CMGT0     = 0x0e208800,
635    I3617_CMEQ0     = 0x0e209800,
636    I3617_CMLT0     = 0x0e20a800,
637    I3617_CMGE0     = 0x2e208800,
638    I3617_CMLE0     = 0x2e209800,
639    I3617_NOT       = 0x2e205800,
640    I3617_ABS       = 0x0e20b800,
641    I3617_NEG       = 0x2e20b800,
642
643    /* System instructions.  */
644    NOP             = 0xd503201f,
645    DMB_ISH         = 0xd50338bf,
646    DMB_LD          = 0x00000100,
647    DMB_ST          = 0x00000200,
648
649    BTI_C           = 0xd503245f,
650    BTI_J           = 0xd503249f,
651    BTI_JC          = 0xd50324df,
652} AArch64Insn;
653
654static inline uint32_t tcg_in32(TCGContext *s)
655{
656    uint32_t v = *(uint32_t *)s->code_ptr;
657    return v;
658}
659
660/* Emit an opcode with "type-checking" of the format.  */
661#define tcg_out_insn(S, FMT, OP, ...) \
662    glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__)
663
664static void tcg_out_insn_3303(TCGContext *s, AArch64Insn insn, bool q,
665                              TCGReg rt, TCGReg rn, unsigned size)
666{
667    tcg_out32(s, insn | (rt & 0x1f) | (rn << 5) | (size << 10) | (q << 30));
668}
669
670static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn,
671                              int imm19, TCGReg rt)
672{
673    tcg_out32(s, insn | (imm19 & 0x7ffff) << 5 | rt);
674}
675
676static void tcg_out_insn_3306(TCGContext *s, AArch64Insn insn, TCGReg rs,
677                              TCGReg rt, TCGReg rt2, TCGReg rn)
678{
679    tcg_out32(s, insn | rs << 16 | rt2 << 10 | rn << 5 | rt);
680}
681
682static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext,
683                              TCGReg rt, int imm19)
684{
685    tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt);
686}
687
688static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn,
689                              TCGCond c, int imm19)
690{
691    tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5);
692}
693
694static void tcg_out_insn_3205(TCGContext *s, AArch64Insn insn,
695                              TCGReg rt, int imm6, int imm14)
696{
697    insn |= (imm6 & 0x20) << (31 - 5);
698    insn |= (imm6 & 0x1f) << 19;
699    tcg_out32(s, insn | (imm14 & 0x3fff) << 5 | rt);
700}
701
702static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26)
703{
704    tcg_out32(s, insn | (imm26 & 0x03ffffff));
705}
706
707static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn)
708{
709    tcg_out32(s, insn | rn << 5);
710}
711
712static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn,
713                              TCGReg r1, TCGReg r2, TCGReg rn,
714                              tcg_target_long ofs, bool pre, bool w)
715{
716    insn |= 1u << 31; /* ext */
717    insn |= pre << 24;
718    insn |= w << 23;
719
720    tcg_debug_assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0);
721    insn |= (ofs & (0x7f << 3)) << (15 - 3);
722
723    tcg_out32(s, insn | r2 << 10 | rn << 5 | r1);
724}
725
726static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext,
727                              TCGReg rd, TCGReg rn, uint64_t aimm)
728{
729    if (aimm > 0xfff) {
730        tcg_debug_assert((aimm & 0xfff) == 0);
731        aimm >>= 12;
732        tcg_debug_assert(aimm <= 0xfff);
733        aimm |= 1 << 12;  /* apply LSL 12 */
734    }
735    tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd);
736}
737
738/* This function can be used for both 3.4.2 (Bitfield) and 3.4.4
739   (Logical immediate).  Both insn groups have N, IMMR and IMMS fields
740   that feed the DecodeBitMasks pseudo function.  */
741static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext,
742                              TCGReg rd, TCGReg rn, int n, int immr, int imms)
743{
744    tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10
745              | rn << 5 | rd);
746}
747
748#define tcg_out_insn_3404  tcg_out_insn_3402
749
750static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext,
751                              TCGReg rd, TCGReg rn, TCGReg rm, int imms)
752{
753    tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10
754              | rn << 5 | rd);
755}
756
757/* This function is used for the Move (wide immediate) instruction group.
758   Note that SHIFT is a full shift count, not the 2 bit HW field. */
759static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext,
760                              TCGReg rd, uint16_t half, unsigned shift)
761{
762    tcg_debug_assert((shift & ~0x30) == 0);
763    tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd);
764}
765
766static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn,
767                              TCGReg rd, int64_t disp)
768{
769    tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd);
770}
771
772static inline void tcg_out_insn_3501(TCGContext *s, AArch64Insn insn,
773                                     TCGType sf, TCGReg rd, TCGReg rn,
774                                     TCGReg rm, int opt, int imm3)
775{
776    tcg_out32(s, insn | sf << 31 | rm << 16 | opt << 13 |
777              imm3 << 10 | rn << 5 | rd);
778}
779
780/* This function is for both 3.5.2 (Add/Subtract shifted register), for
781   the rare occasion when we actually want to supply a shift amount.  */
782static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn,
783                                      TCGType ext, TCGReg rd, TCGReg rn,
784                                      TCGReg rm, int imm6)
785{
786    tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd);
787}
788
789/* This function is for 3.5.2 (Add/subtract shifted register),
790   and 3.5.10 (Logical shifted register), for the vast majorty of cases
791   when we don't want to apply a shift.  Thus it can also be used for
792   3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source).  */
793static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext,
794                              TCGReg rd, TCGReg rn, TCGReg rm)
795{
796    tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd);
797}
798
799#define tcg_out_insn_3503  tcg_out_insn_3502
800#define tcg_out_insn_3508  tcg_out_insn_3502
801#define tcg_out_insn_3510  tcg_out_insn_3502
802
803static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext,
804                              TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c)
805{
806    tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd
807              | tcg_cond_to_aarch64[c] << 12);
808}
809
810static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext,
811                              TCGReg rd, TCGReg rn)
812{
813    tcg_out32(s, insn | ext << 31 | rn << 5 | rd);
814}
815
816static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext,
817                              TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra)
818{
819    tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd);
820}
821
822static void tcg_out_insn_3605(TCGContext *s, AArch64Insn insn, bool q,
823                              TCGReg rd, TCGReg rn, int dst_idx, int src_idx)
824{
825    /* Note that bit 11 set means general register input.  Therefore
826       we can handle both register sets with one function.  */
827    tcg_out32(s, insn | q << 30 | (dst_idx << 16) | (src_idx << 11)
828              | (rd & 0x1f) | (~rn & 0x20) << 6 | (rn & 0x1f) << 5);
829}
830
831static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q,
832                              TCGReg rd, bool op, int cmode, uint8_t imm8)
833{
834    tcg_out32(s, insn | q << 30 | op << 29 | cmode << 12 | (rd & 0x1f)
835              | (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5);
836}
837
838static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn,
839                              TCGReg rd, TCGReg rn, unsigned immhb)
840{
841    tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f));
842}
843
844static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn,
845                              unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
846{
847    tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16
848              | (rn & 0x1f) << 5 | (rd & 0x1f));
849}
850
851static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn,
852                              unsigned size, TCGReg rd, TCGReg rn)
853{
854    tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f));
855}
856
857static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q,
858                              TCGReg rd, TCGReg rn, unsigned immhb)
859{
860    tcg_out32(s, insn | q << 30 | immhb << 16
861              | (rn & 0x1f) << 5 | (rd & 0x1f));
862}
863
864static void tcg_out_insn_3616(TCGContext *s, AArch64Insn insn, bool q,
865                              unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
866{
867    tcg_out32(s, insn | q << 30 | (size << 22) | (rm & 0x1f) << 16
868              | (rn & 0x1f) << 5 | (rd & 0x1f));
869}
870
871static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q,
872                              unsigned size, TCGReg rd, TCGReg rn)
873{
874    tcg_out32(s, insn | q << 30 | (size << 22)
875              | (rn & 0x1f) << 5 | (rd & 0x1f));
876}
877
878static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
879                              TCGReg rd, TCGReg base, TCGType ext,
880                              TCGReg regoff)
881{
882    /* Note the AArch64Insn constants above are for C3.3.12.  Adjust.  */
883    tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
884              0x4000 | ext << 13 | base << 5 | (rd & 0x1f));
885}
886
887static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
888                              TCGReg rd, TCGReg rn, intptr_t offset)
889{
890    tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | (rd & 0x1f));
891}
892
893static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
894                              TCGReg rd, TCGReg rn, uintptr_t scaled_uimm)
895{
896    /* Note the AArch64Insn constants above are for C3.3.12.  Adjust.  */
897    tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10
898              | rn << 5 | (rd & 0x1f));
899}
900
901static void tcg_out_bti(TCGContext *s, AArch64Insn insn)
902{
903    /*
904     * While BTI insns are nops on hosts without FEAT_BTI,
905     * there is no point in emitting them in that case either.
906     */
907    if (cpuinfo & CPUINFO_BTI) {
908        tcg_out32(s, insn);
909    }
910}
911
912/* Register to register move using ORR (shifted register with no shift). */
913static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
914{
915    tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm);
916}
917
918/* Register to register move using ADDI (move to/from SP).  */
919static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
920{
921    tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0);
922}
923
924/* This function is used for the Logical (immediate) instruction group.
925   The value of LIMM must satisfy IS_LIMM.  See the comment above about
926   only supporting simplified logical immediates.  */
927static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
928                             TCGReg rd, TCGReg rn, uint64_t limm)
929{
930    unsigned h, l, r, c;
931
932    tcg_debug_assert(is_limm(limm));
933
934    h = clz64(limm);
935    l = ctz64(limm);
936    if (l == 0) {
937        r = 0;                  /* form 0....01....1 */
938        c = ctz64(~limm) - 1;
939        if (h == 0) {
940            r = clz64(~limm);   /* form 1..10..01..1 */
941            c += r;
942        }
943    } else {
944        r = 64 - l;             /* form 1....10....0 or 0..01..10..0 */
945        c = r - h - 1;
946    }
947    if (ext == TCG_TYPE_I32) {
948        r &= 31;
949        c &= 31;
950    }
951
952    tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c);
953}
954
955static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
956                             TCGReg rd, int64_t v64)
957{
958    bool q = type == TCG_TYPE_V128;
959    int cmode, imm8, i;
960
961    /* Test all bytes equal first.  */
962    if (vece == MO_8) {
963        imm8 = (uint8_t)v64;
964        tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0xe, imm8);
965        return;
966    }
967
968    /*
969     * Test all bytes 0x00 or 0xff second.  This can match cases that
970     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
971     */
972    for (i = imm8 = 0; i < 8; i++) {
973        uint8_t byte = v64 >> (i * 8);
974        if (byte == 0xff) {
975            imm8 |= 1 << i;
976        } else if (byte != 0) {
977            goto fail_bytes;
978        }
979    }
980    tcg_out_insn(s, 3606, MOVI, q, rd, 1, 0xe, imm8);
981    return;
982 fail_bytes:
983
984    /*
985     * Tests for various replications.  For each element width, if we
986     * cannot find an expansion there's no point checking a larger
987     * width because we already know by replication it cannot match.
988     */
989    if (vece == MO_16) {
990        uint16_t v16 = v64;
991
992        if (is_shimm16(v16, &cmode, &imm8)) {
993            tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
994            return;
995        }
996        if (is_shimm16(~v16, &cmode, &imm8)) {
997            tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
998            return;
999        }
1000
1001        /*
1002         * Otherwise, all remaining constants can be loaded in two insns:
1003         * rd = v16 & 0xff, rd |= v16 & 0xff00.
1004         */
1005        tcg_out_insn(s, 3606, MOVI, q, rd, 0, 0x8, v16 & 0xff);
1006        tcg_out_insn(s, 3606, ORR, q, rd, 0, 0xa, v16 >> 8);
1007        return;
1008    } else if (vece == MO_32) {
1009        uint32_t v32 = v64;
1010        uint32_t n32 = ~v32;
1011
1012        if (is_shimm32(v32, &cmode, &imm8) ||
1013            is_soimm32(v32, &cmode, &imm8) ||
1014            is_fimm32(v32, &cmode, &imm8)) {
1015            tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
1016            return;
1017        }
1018        if (is_shimm32(n32, &cmode, &imm8) ||
1019            is_soimm32(n32, &cmode, &imm8)) {
1020            tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
1021            return;
1022        }
1023
1024        /*
1025         * Restrict the set of constants to those we can load with
1026         * two instructions.  Others we load from the pool.
1027         */
1028        i = is_shimm32_pair(v32, &cmode, &imm8);
1029        if (i) {
1030            tcg_out_insn(s, 3606, MOVI, q, rd, 0, cmode, imm8);
1031            tcg_out_insn(s, 3606, ORR, q, rd, 0, i, extract32(v32, i * 4, 8));
1032            return;
1033        }
1034        i = is_shimm32_pair(n32, &cmode, &imm8);
1035        if (i) {
1036            tcg_out_insn(s, 3606, MVNI, q, rd, 0, cmode, imm8);
1037            tcg_out_insn(s, 3606, BIC, q, rd, 0, i, extract32(n32, i * 4, 8));
1038            return;
1039        }
1040    } else if (is_fimm64(v64, &cmode, &imm8)) {
1041        tcg_out_insn(s, 3606, MOVI, q, rd, 1, cmode, imm8);
1042        return;
1043    }
1044
1045    /*
1046     * As a last resort, load from the constant pool.  Sadly there
1047     * is no LD1R (literal), so store the full 16-byte vector.
1048     */
1049    if (type == TCG_TYPE_V128) {
1050        new_pool_l2(s, R_AARCH64_CONDBR19, s->code_ptr, 0, v64, v64);
1051        tcg_out_insn(s, 3305, LDR_v128, 0, rd);
1052    } else {
1053        new_pool_label(s, v64, R_AARCH64_CONDBR19, s->code_ptr, 0);
1054        tcg_out_insn(s, 3305, LDR_v64, 0, rd);
1055    }
1056}
1057
1058static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1059                            TCGReg rd, TCGReg rs)
1060{
1061    int is_q = type - TCG_TYPE_V64;
1062    tcg_out_insn(s, 3605, DUP, is_q, rd, rs, 1 << vece, 0);
1063    return true;
1064}
1065
1066static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1067                             TCGReg r, TCGReg base, intptr_t offset)
1068{
1069    TCGReg temp = TCG_REG_TMP0;
1070
1071    if (offset < -0xffffff || offset > 0xffffff) {
1072        tcg_out_movi(s, TCG_TYPE_PTR, temp, offset);
1073        tcg_out_insn(s, 3502, ADD, 1, temp, temp, base);
1074        base = temp;
1075    } else {
1076        AArch64Insn add_insn = I3401_ADDI;
1077
1078        if (offset < 0) {
1079            add_insn = I3401_SUBI;
1080            offset = -offset;
1081        }
1082        if (offset & 0xfff000) {
1083            tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff000);
1084            base = temp;
1085        }
1086        if (offset & 0xfff) {
1087            tcg_out_insn_3401(s, add_insn, 1, temp, base, offset & 0xfff);
1088            base = temp;
1089        }
1090    }
1091    tcg_out_insn(s, 3303, LD1R, type == TCG_TYPE_V128, r, base, vece);
1092    return true;
1093}
1094
1095static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
1096                         tcg_target_long value)
1097{
1098    tcg_target_long svalue = value;
1099    tcg_target_long ivalue = ~value;
1100    tcg_target_long t0, t1, t2;
1101    int s0, s1;
1102    AArch64Insn opc;
1103
1104    switch (type) {
1105    case TCG_TYPE_I32:
1106    case TCG_TYPE_I64:
1107        tcg_debug_assert(rd < 32);
1108        break;
1109    default:
1110        g_assert_not_reached();
1111    }
1112
1113    /* For 32-bit values, discard potential garbage in value.  For 64-bit
1114       values within [2**31, 2**32-1], we can create smaller sequences by
1115       interpreting this as a negative 32-bit number, while ensuring that
1116       the high 32 bits are cleared by setting SF=0.  */
1117    if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) {
1118        svalue = (int32_t)value;
1119        value = (uint32_t)value;
1120        ivalue = (uint32_t)ivalue;
1121        type = TCG_TYPE_I32;
1122    }
1123
1124    /* Speed things up by handling the common case of small positive
1125       and negative values specially.  */
1126    if ((value & ~0xffffull) == 0) {
1127        tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0);
1128        return;
1129    } else if ((ivalue & ~0xffffull) == 0) {
1130        tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0);
1131        return;
1132    }
1133
1134    /* Check for bitfield immediates.  For the benefit of 32-bit quantities,
1135       use the sign-extended value.  That lets us match rotated values such
1136       as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */
1137    if (is_limm(svalue)) {
1138        tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue);
1139        return;
1140    }
1141
1142    /* Look for host pointer values within 4G of the PC.  This happens
1143       often when loading pointers to QEMU's own data structures.  */
1144    if (type == TCG_TYPE_I64) {
1145        intptr_t src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
1146        tcg_target_long disp = value - src_rx;
1147        if (disp == sextract64(disp, 0, 21)) {
1148            tcg_out_insn(s, 3406, ADR, rd, disp);
1149            return;
1150        }
1151        disp = (value >> 12) - (src_rx >> 12);
1152        if (disp == sextract64(disp, 0, 21)) {
1153            tcg_out_insn(s, 3406, ADRP, rd, disp);
1154            if (value & 0xfff) {
1155                tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff);
1156            }
1157            return;
1158        }
1159    }
1160
1161    /* Would it take fewer insns to begin with MOVN?  */
1162    if (ctpop64(value) >= 32) {
1163        t0 = ivalue;
1164        opc = I3405_MOVN;
1165    } else {
1166        t0 = value;
1167        opc = I3405_MOVZ;
1168    }
1169    s0 = ctz64(t0) & (63 & -16);
1170    t1 = t0 & ~(0xffffull << s0);
1171    s1 = ctz64(t1) & (63 & -16);
1172    t2 = t1 & ~(0xffffull << s1);
1173    if (t2 == 0) {
1174        tcg_out_insn_3405(s, opc, type, rd, t0 >> s0, s0);
1175        if (t1 != 0) {
1176            tcg_out_insn(s, 3405, MOVK, type, rd, value >> s1, s1);
1177        }
1178        return;
1179    }
1180
1181    /* For more than 2 insns, dump it into the constant pool.  */
1182    new_pool_label(s, value, R_AARCH64_CONDBR19, s->code_ptr, 0);
1183    tcg_out_insn(s, 3305, LDR, 0, rd);
1184}
1185
1186static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1187{
1188    return false;
1189}
1190
1191static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1192                             tcg_target_long imm)
1193{
1194    /* This function is only used for passing structs by reference. */
1195    g_assert_not_reached();
1196}
1197
1198/* Define something more legible for general use.  */
1199#define tcg_out_ldst_r  tcg_out_insn_3310
1200
1201static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,
1202                         TCGReg rn, intptr_t offset, int lgsize)
1203{
1204    /* If the offset is naturally aligned and in range, then we can
1205       use the scaled uimm12 encoding */
1206    if (offset >= 0 && !(offset & ((1 << lgsize) - 1))) {
1207        uintptr_t scaled_uimm = offset >> lgsize;
1208        if (scaled_uimm <= 0xfff) {
1209            tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm);
1210            return;
1211        }
1212    }
1213
1214    /* Small signed offsets can use the unscaled encoding.  */
1215    if (offset >= -256 && offset < 256) {
1216        tcg_out_insn_3312(s, insn, rd, rn, offset);
1217        return;
1218    }
1219
1220    /* Worst-case scenario, move offset to temp register, use reg offset.  */
1221    tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, offset);
1222    tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP0);
1223}
1224
1225static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
1226{
1227    if (ret == arg) {
1228        return true;
1229    }
1230    switch (type) {
1231    case TCG_TYPE_I32:
1232    case TCG_TYPE_I64:
1233        if (ret < 32 && arg < 32) {
1234            tcg_out_movr(s, type, ret, arg);
1235            break;
1236        } else if (ret < 32) {
1237            tcg_out_insn(s, 3605, UMOV, type, ret, arg, 0, 0);
1238            break;
1239        } else if (arg < 32) {
1240            tcg_out_insn(s, 3605, INS, 0, ret, arg, 4 << type, 0);
1241            break;
1242        }
1243        /* FALLTHRU */
1244
1245    case TCG_TYPE_V64:
1246        tcg_debug_assert(ret >= 32 && arg >= 32);
1247        tcg_out_insn(s, 3616, ORR, 0, 0, ret, arg, arg);
1248        break;
1249    case TCG_TYPE_V128:
1250        tcg_debug_assert(ret >= 32 && arg >= 32);
1251        tcg_out_insn(s, 3616, ORR, 1, 0, ret, arg, arg);
1252        break;
1253
1254    default:
1255        g_assert_not_reached();
1256    }
1257    return true;
1258}
1259
1260static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1261                       TCGReg base, intptr_t ofs)
1262{
1263    AArch64Insn insn;
1264    int lgsz;
1265
1266    switch (type) {
1267    case TCG_TYPE_I32:
1268        insn = (ret < 32 ? I3312_LDRW : I3312_LDRVS);
1269        lgsz = 2;
1270        break;
1271    case TCG_TYPE_I64:
1272        insn = (ret < 32 ? I3312_LDRX : I3312_LDRVD);
1273        lgsz = 3;
1274        break;
1275    case TCG_TYPE_V64:
1276        insn = I3312_LDRVD;
1277        lgsz = 3;
1278        break;
1279    case TCG_TYPE_V128:
1280        insn = I3312_LDRVQ;
1281        lgsz = 4;
1282        break;
1283    default:
1284        g_assert_not_reached();
1285    }
1286    tcg_out_ldst(s, insn, ret, base, ofs, lgsz);
1287}
1288
1289static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
1290                       TCGReg base, intptr_t ofs)
1291{
1292    AArch64Insn insn;
1293    int lgsz;
1294
1295    switch (type) {
1296    case TCG_TYPE_I32:
1297        insn = (src < 32 ? I3312_STRW : I3312_STRVS);
1298        lgsz = 2;
1299        break;
1300    case TCG_TYPE_I64:
1301        insn = (src < 32 ? I3312_STRX : I3312_STRVD);
1302        lgsz = 3;
1303        break;
1304    case TCG_TYPE_V64:
1305        insn = I3312_STRVD;
1306        lgsz = 3;
1307        break;
1308    case TCG_TYPE_V128:
1309        insn = I3312_STRVQ;
1310        lgsz = 4;
1311        break;
1312    default:
1313        g_assert_not_reached();
1314    }
1315    tcg_out_ldst(s, insn, src, base, ofs, lgsz);
1316}
1317
1318static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1319                               TCGReg base, intptr_t ofs)
1320{
1321    if (type <= TCG_TYPE_I64 && val == 0) {
1322        tcg_out_st(s, type, TCG_REG_XZR, base, ofs);
1323        return true;
1324    }
1325    return false;
1326}
1327
1328static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd,
1329                               TCGReg rn, unsigned int a, unsigned int b)
1330{
1331    tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b);
1332}
1333
1334static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd,
1335                                TCGReg rn, unsigned int a, unsigned int b)
1336{
1337    tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b);
1338}
1339
1340static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd,
1341                                TCGReg rn, unsigned int a, unsigned int b)
1342{
1343    tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b);
1344}
1345
1346static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
1347                                TCGReg rn, TCGReg rm, unsigned int a)
1348{
1349    tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
1350}
1351
1352static void tgen_cmp(TCGContext *s, TCGType ext, TCGCond cond,
1353                     TCGReg a, TCGReg b)
1354{
1355    if (is_tst_cond(cond)) {
1356        tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b);
1357    } else {
1358        tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
1359    }
1360}
1361
1362static void tgen_cmpi(TCGContext *s, TCGType ext, TCGCond cond,
1363                      TCGReg a, tcg_target_long b)
1364{
1365    if (is_tst_cond(cond)) {
1366        tcg_out_logicali(s, I3404_ANDSI, ext, TCG_REG_XZR, a, b);
1367    } else if (b >= 0) {
1368        tcg_debug_assert(is_aimm(b));
1369        tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
1370    } else {
1371        tcg_debug_assert(is_aimm(-b));
1372        tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
1373    }
1374}
1375
1376static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg a,
1377                        tcg_target_long b, bool const_b)
1378{
1379    if (const_b) {
1380        tgen_cmpi(s, ext, cond, a, b);
1381    } else {
1382        tgen_cmp(s, ext, cond, a, b);
1383    }
1384}
1385
1386static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1387{
1388    ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
1389    tcg_debug_assert(offset == sextract64(offset, 0, 26));
1390    tcg_out_insn(s, 3206, B, offset);
1391}
1392
1393static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *target)
1394{
1395    ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
1396    if (offset == sextract64(offset, 0, 26)) {
1397        tcg_out_insn(s, 3206, BL, offset);
1398    } else {
1399        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, (intptr_t)target);
1400        tcg_out_insn(s, 3207, BLR, TCG_REG_TMP0);
1401    }
1402}
1403
1404static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
1405                         const TCGHelperInfo *info)
1406{
1407    tcg_out_call_int(s, target);
1408}
1409
1410static void tcg_out_br(TCGContext *s, TCGLabel *l)
1411{
1412    if (!l->has_value) {
1413        tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, l, 0);
1414        tcg_out_insn(s, 3206, B, 0);
1415    } else {
1416        tcg_out_goto(s, l->u.value_ptr);
1417    }
1418}
1419
1420static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1421                        TCGReg a, TCGReg b, TCGLabel *l)
1422{
1423    tgen_cmp(s, type, c, a, b);
1424    tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
1425    tcg_out_insn(s, 3202, B_C, c, 0);
1426}
1427
1428static void tgen_brcondi(TCGContext *s, TCGType ext, TCGCond c,
1429                         TCGReg a, tcg_target_long b, TCGLabel *l)
1430{
1431    int tbit = -1;
1432    bool need_cmp = true;
1433
1434    switch (c) {
1435    case TCG_COND_EQ:
1436    case TCG_COND_NE:
1437        /* cmp xN,0; b.ne L -> cbnz xN,L */
1438        if (b == 0) {
1439            need_cmp = false;
1440        }
1441        break;
1442    case TCG_COND_LT:
1443    case TCG_COND_GE:
1444        /* cmp xN,0; b.mi L -> tbnz xN,63,L */
1445        if (b == 0) {
1446            c = (c == TCG_COND_LT ? TCG_COND_TSTNE : TCG_COND_TSTEQ);
1447            tbit = ext ? 63 : 31;
1448            need_cmp = false;
1449        }
1450        break;
1451    case TCG_COND_TSTEQ:
1452    case TCG_COND_TSTNE:
1453        /* tst xN,0xffffffff; b.ne L -> cbnz wN,L */
1454        if (b == UINT32_MAX) {
1455            c = tcg_tst_eqne_cond(c);
1456            ext = TCG_TYPE_I32;
1457            need_cmp = false;
1458            break;
1459        }
1460        /* tst xN,1<<B; b.ne L -> tbnz xN,B,L */
1461        if (is_power_of_2(b)) {
1462            tbit = ctz64(b);
1463            need_cmp = false;
1464        }
1465        break;
1466    default:
1467        break;
1468    }
1469
1470    if (need_cmp) {
1471        tgen_cmpi(s, ext, c, a, b);
1472        tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
1473        tcg_out_insn(s, 3202, B_C, c, 0);
1474        return;
1475    }
1476
1477    if (tbit >= 0) {
1478        tcg_out_reloc(s, s->code_ptr, R_AARCH64_TSTBR14, l, 0);
1479        switch (c) {
1480        case TCG_COND_TSTEQ:
1481            tcg_out_insn(s, 3205, TBZ, a, tbit, 0);
1482            break;
1483        case TCG_COND_TSTNE:
1484            tcg_out_insn(s, 3205, TBNZ, a, tbit, 0);
1485            break;
1486        default:
1487            g_assert_not_reached();
1488        }
1489    } else {
1490        tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
1491        switch (c) {
1492        case TCG_COND_EQ:
1493            tcg_out_insn(s, 3201, CBZ, ext, a, 0);
1494            break;
1495        case TCG_COND_NE:
1496            tcg_out_insn(s, 3201, CBNZ, ext, a, 0);
1497            break;
1498        default:
1499            g_assert_not_reached();
1500        }
1501    }
1502}
1503
1504static const TCGOutOpBrcond outop_brcond = {
1505    .base.static_constraint = C_O0_I2(r, rC),
1506    .out_rr = tgen_brcond,
1507    .out_ri = tgen_brcondi,
1508};
1509
1510static inline void tcg_out_rev(TCGContext *s, int ext, MemOp s_bits,
1511                               TCGReg rd, TCGReg rn)
1512{
1513    /* REV, REV16, REV32 */
1514    tcg_out_insn_3507(s, I3507_REV | (s_bits << 10), ext, rd, rn);
1515}
1516
1517static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits,
1518                               TCGReg rd, TCGReg rn)
1519{
1520    /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */
1521    int bits = (8 << s_bits) - 1;
1522    tcg_out_sbfm(s, ext, rd, rn, 0, bits);
1523}
1524
1525static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn)
1526{
1527    tcg_out_sxt(s, type, MO_8, rd, rn);
1528}
1529
1530static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn)
1531{
1532    tcg_out_sxt(s, type, MO_16, rd, rn);
1533}
1534
1535static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
1536{
1537    tcg_out_sxt(s, TCG_TYPE_I64, MO_32, rd, rn);
1538}
1539
1540static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
1541{
1542    tcg_out_ext32s(s, rd, rn);
1543}
1544
1545static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits,
1546                               TCGReg rd, TCGReg rn)
1547{
1548    /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */
1549    int bits = (8 << s_bits) - 1;
1550    tcg_out_ubfm(s, 0, rd, rn, 0, bits);
1551}
1552
1553static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
1554{
1555    tcg_out_uxt(s, MO_8, rd, rn);
1556}
1557
1558static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
1559{
1560    tcg_out_uxt(s, MO_16, rd, rn);
1561}
1562
1563static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
1564{
1565    tcg_out_movr(s, TCG_TYPE_I32, rd, rn);
1566}
1567
1568static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
1569{
1570    tcg_out_ext32u(s, rd, rn);
1571}
1572
1573static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
1574{
1575    tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
1576}
1577
1578static void tcg_out_mb(TCGContext *s, unsigned a0)
1579{
1580    static const uint32_t sync[] = {
1581        [0 ... TCG_MO_ALL]            = DMB_ISH | DMB_LD | DMB_ST,
1582        [TCG_MO_ST_ST]                = DMB_ISH | DMB_ST,
1583        [TCG_MO_LD_LD]                = DMB_ISH | DMB_LD,
1584        [TCG_MO_LD_ST]                = DMB_ISH | DMB_LD,
1585        [TCG_MO_LD_ST | TCG_MO_LD_LD] = DMB_ISH | DMB_LD,
1586    };
1587    tcg_out32(s, sync[a0 & TCG_MO_ALL]);
1588}
1589
1590typedef struct {
1591    TCGReg base;
1592    TCGReg index;
1593    TCGType index_ext;
1594    TCGAtomAlign aa;
1595} HostAddress;
1596
1597bool tcg_target_has_memory_bswap(MemOp memop)
1598{
1599    return false;
1600}
1601
1602static const TCGLdstHelperParam ldst_helper_param = {
1603    .ntmp = 1, .tmp = { TCG_REG_TMP0 }
1604};
1605
1606static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1607{
1608    MemOp opc = get_memop(lb->oi);
1609
1610    if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1611        return false;
1612    }
1613
1614    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1615    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1616    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1617    tcg_out_goto(s, lb->raddr);
1618    return true;
1619}
1620
1621static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1622{
1623    MemOp opc = get_memop(lb->oi);
1624
1625    if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1626        return false;
1627    }
1628
1629    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1630    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]);
1631    tcg_out_goto(s, lb->raddr);
1632    return true;
1633}
1634
1635/* We expect to use a 7-bit scaled negative offset from ENV.  */
1636#define MIN_TLB_MASK_TABLE_OFS  -512
1637
1638/*
1639 * For system-mode, perform the TLB load and compare.
1640 * For user-mode, perform any required alignment tests.
1641 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1642 * is required and fill in @h with the host address for the fast path.
1643 */
1644static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1645                                           TCGReg addr_reg, MemOpIdx oi,
1646                                           bool is_ld)
1647{
1648    TCGType addr_type = s->addr_type;
1649    TCGLabelQemuLdst *ldst = NULL;
1650    MemOp opc = get_memop(oi);
1651    MemOp s_bits = opc & MO_SIZE;
1652    unsigned a_mask;
1653
1654    h->aa = atom_and_align_for_opc(s, opc,
1655                                   have_lse2 ? MO_ATOM_WITHIN16
1656                                             : MO_ATOM_IFALIGN,
1657                                   s_bits == MO_128);
1658    a_mask = (1 << h->aa.align) - 1;
1659
1660    if (tcg_use_softmmu) {
1661        unsigned s_mask = (1u << s_bits) - 1;
1662        unsigned mem_index = get_mmuidx(oi);
1663        TCGReg addr_adj;
1664        uint64_t compare_mask;
1665
1666        ldst = new_ldst_label(s);
1667        ldst->is_ld = is_ld;
1668        ldst->oi = oi;
1669        ldst->addr_reg = addr_reg;
1670
1671        /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
1672        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1673        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
1674        tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0,
1675                     tlb_mask_table_ofs(s, mem_index), 1, 0);
1676
1677        /* Extract the TLB index from the address into X0.  */
1678        tcg_out_insn(s, 3502S, AND_LSR, TCG_TYPE_I64,
1679                     TCG_REG_TMP0, TCG_REG_TMP0, addr_reg,
1680                     TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1681
1682        /* Add the tlb_table pointer, forming the CPUTLBEntry address. */
1683        tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0);
1684
1685        /* Load the tlb comparator into TMP0, and the fast path addend. */
1686        QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1687        tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1,
1688                   is_ld ? offsetof(CPUTLBEntry, addr_read)
1689                         : offsetof(CPUTLBEntry, addr_write));
1690        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
1691                   offsetof(CPUTLBEntry, addend));
1692
1693        /*
1694         * For aligned accesses, we check the first byte and include
1695         * the alignment bits within the address.  For unaligned access,
1696         * we check that we don't cross pages using the address of the
1697         * last byte of the access.
1698         */
1699        if (a_mask >= s_mask) {
1700            addr_adj = addr_reg;
1701        } else {
1702            addr_adj = TCG_REG_TMP2;
1703            tcg_out_insn(s, 3401, ADDI, addr_type,
1704                         addr_adj, addr_reg, s_mask - a_mask);
1705        }
1706        compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
1707
1708        /* Store the page mask part of the address into TMP2.  */
1709        tcg_out_logicali(s, I3404_ANDI, addr_type, TCG_REG_TMP2,
1710                         addr_adj, compare_mask);
1711
1712        /* Perform the address comparison. */
1713        tcg_out_cmp(s, addr_type, TCG_COND_NE, TCG_REG_TMP0, TCG_REG_TMP2, 0);
1714
1715        /* If not equal, we jump to the slow path. */
1716        ldst->label_ptr[0] = s->code_ptr;
1717        tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
1718
1719        h->base = TCG_REG_TMP1;
1720        h->index = addr_reg;
1721        h->index_ext = addr_type;
1722    } else {
1723        if (a_mask) {
1724            ldst = new_ldst_label(s);
1725
1726            ldst->is_ld = is_ld;
1727            ldst->oi = oi;
1728            ldst->addr_reg = addr_reg;
1729
1730            /* tst addr, #mask */
1731            tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
1732
1733            /* b.ne slow_path */
1734            ldst->label_ptr[0] = s->code_ptr;
1735            tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
1736        }
1737
1738        if (guest_base || addr_type == TCG_TYPE_I32) {
1739            h->base = TCG_REG_GUEST_BASE;
1740            h->index = addr_reg;
1741            h->index_ext = addr_type;
1742        } else {
1743            h->base = addr_reg;
1744            h->index = TCG_REG_XZR;
1745            h->index_ext = TCG_TYPE_I64;
1746        }
1747    }
1748
1749    return ldst;
1750}
1751
1752static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
1753                                   TCGReg data_r, HostAddress h)
1754{
1755    switch (memop & MO_SSIZE) {
1756    case MO_UB:
1757        tcg_out_ldst_r(s, I3312_LDRB, data_r, h.base, h.index_ext, h.index);
1758        break;
1759    case MO_SB:
1760        tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
1761                       data_r, h.base, h.index_ext, h.index);
1762        break;
1763    case MO_UW:
1764        tcg_out_ldst_r(s, I3312_LDRH, data_r, h.base, h.index_ext, h.index);
1765        break;
1766    case MO_SW:
1767        tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
1768                       data_r, h.base, h.index_ext, h.index);
1769        break;
1770    case MO_UL:
1771        tcg_out_ldst_r(s, I3312_LDRW, data_r, h.base, h.index_ext, h.index);
1772        break;
1773    case MO_SL:
1774        tcg_out_ldst_r(s, I3312_LDRSWX, data_r, h.base, h.index_ext, h.index);
1775        break;
1776    case MO_UQ:
1777        tcg_out_ldst_r(s, I3312_LDRX, data_r, h.base, h.index_ext, h.index);
1778        break;
1779    default:
1780        g_assert_not_reached();
1781    }
1782}
1783
1784static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
1785                                   TCGReg data_r, HostAddress h)
1786{
1787    switch (memop & MO_SIZE) {
1788    case MO_8:
1789        tcg_out_ldst_r(s, I3312_STRB, data_r, h.base, h.index_ext, h.index);
1790        break;
1791    case MO_16:
1792        tcg_out_ldst_r(s, I3312_STRH, data_r, h.base, h.index_ext, h.index);
1793        break;
1794    case MO_32:
1795        tcg_out_ldst_r(s, I3312_STRW, data_r, h.base, h.index_ext, h.index);
1796        break;
1797    case MO_64:
1798        tcg_out_ldst_r(s, I3312_STRX, data_r, h.base, h.index_ext, h.index);
1799        break;
1800    default:
1801        g_assert_not_reached();
1802    }
1803}
1804
1805static void tgen_qemu_ld(TCGContext *s, TCGType data_type, TCGReg data_reg,
1806                         TCGReg addr_reg, MemOpIdx oi)
1807{
1808    TCGLabelQemuLdst *ldst;
1809    HostAddress h;
1810
1811    ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1812    tcg_out_qemu_ld_direct(s, get_memop(oi), data_type, data_reg, h);
1813
1814    if (ldst) {
1815        ldst->type = data_type;
1816        ldst->datalo_reg = data_reg;
1817        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1818    }
1819}
1820
1821static const TCGOutOpQemuLdSt outop_qemu_ld = {
1822    .base.static_constraint = C_O1_I1(r, r),
1823    .out = tgen_qemu_ld,
1824};
1825
1826static void tgen_qemu_st(TCGContext *s, TCGType data_type, TCGReg data_reg,
1827                         TCGReg addr_reg, MemOpIdx oi)
1828{
1829    TCGLabelQemuLdst *ldst;
1830    HostAddress h;
1831
1832    ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1833    tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h);
1834
1835    if (ldst) {
1836        ldst->type = data_type;
1837        ldst->datalo_reg = data_reg;
1838        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1839    }
1840}
1841
1842static const TCGOutOpQemuLdSt outop_qemu_st = {
1843    .base.static_constraint = C_O0_I2(rz, r),
1844    .out = tgen_qemu_st,
1845};
1846
1847static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
1848                                   TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1849{
1850    TCGLabelQemuLdst *ldst;
1851    HostAddress h;
1852    TCGReg base;
1853    bool use_pair;
1854
1855    ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1856
1857    /* Compose the final address, as LDP/STP have no indexing. */
1858    if (h.index == TCG_REG_XZR) {
1859        base = h.base;
1860    } else {
1861        base = TCG_REG_TMP2;
1862        if (h.index_ext == TCG_TYPE_I32) {
1863            /* add base, base, index, uxtw */
1864            tcg_out_insn(s, 3501, ADD, TCG_TYPE_I64, base,
1865                         h.base, h.index, MO_32, 0);
1866        } else {
1867            /* add base, base, index */
1868            tcg_out_insn(s, 3502, ADD, 1, base, h.base, h.index);
1869        }
1870    }
1871
1872    use_pair = h.aa.atom < MO_128 || have_lse2;
1873
1874    if (!use_pair) {
1875        tcg_insn_unit *branch = NULL;
1876        TCGReg ll, lh, sl, sh;
1877
1878        /*
1879         * If we have already checked for 16-byte alignment, that's all
1880         * we need. Otherwise we have determined that misaligned atomicity
1881         * may be handled with two 8-byte loads.
1882         */
1883        if (h.aa.align < MO_128) {
1884            /*
1885             * TODO: align should be MO_64, so we only need test bit 3,
1886             * which means we could use TBNZ instead of ANDS+B_C.
1887             */
1888            tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, 15);
1889            branch = s->code_ptr;
1890            tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
1891            use_pair = true;
1892        }
1893
1894        if (is_ld) {
1895            /*
1896             * 16-byte atomicity without LSE2 requires LDXP+STXP loop:
1897             *    ldxp lo, hi, [base]
1898             *    stxp t0, lo, hi, [base]
1899             *    cbnz t0, .-8
1900             * Require no overlap between data{lo,hi} and base.
1901             */
1902            if (datalo == base || datahi == base) {
1903                tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_TMP2, base);
1904                base = TCG_REG_TMP2;
1905            }
1906            ll = sl = datalo;
1907            lh = sh = datahi;
1908        } else {
1909            /*
1910             * 16-byte atomicity without LSE2 requires LDXP+STXP loop:
1911             * 1: ldxp t0, t1, [base]
1912             *    stxp t0, lo, hi, [base]
1913             *    cbnz t0, 1b
1914             */
1915            tcg_debug_assert(base != TCG_REG_TMP0 && base != TCG_REG_TMP1);
1916            ll = TCG_REG_TMP0;
1917            lh = TCG_REG_TMP1;
1918            sl = datalo;
1919            sh = datahi;
1920        }
1921
1922        tcg_out_insn(s, 3306, LDXP, TCG_REG_XZR, ll, lh, base);
1923        tcg_out_insn(s, 3306, STXP, TCG_REG_TMP0, sl, sh, base);
1924        tcg_out_insn(s, 3201, CBNZ, 0, TCG_REG_TMP0, -2);
1925
1926        if (use_pair) {
1927            /* "b .+8", branching across the one insn of use_pair. */
1928            tcg_out_insn(s, 3206, B, 2);
1929            reloc_pc19(branch, tcg_splitwx_to_rx(s->code_ptr));
1930        }
1931    }
1932
1933    if (use_pair) {
1934        if (is_ld) {
1935            tcg_out_insn(s, 3314, LDP, datalo, datahi, base, 0, 1, 0);
1936        } else {
1937            tcg_out_insn(s, 3314, STP, datalo, datahi, base, 0, 1, 0);
1938        }
1939    }
1940
1941    if (ldst) {
1942        ldst->type = TCG_TYPE_I128;
1943        ldst->datalo_reg = datalo;
1944        ldst->datahi_reg = datahi;
1945        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1946    }
1947}
1948
1949static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo,
1950                          TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
1951{
1952    tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, true);
1953}
1954
1955static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
1956    .base.static_constraint = C_O2_I1(r, r, r),
1957    .out = tgen_qemu_ld2,
1958};
1959
1960static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo,
1961                          TCGReg datahi, TCGReg addr_reg, MemOpIdx oi)
1962{
1963    tcg_out_qemu_ldst_i128(s, datalo, datahi, addr_reg, oi, false);
1964}
1965
1966static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
1967    .base.static_constraint = C_O0_I3(rz, rz, r),
1968    .out = tgen_qemu_st2,
1969};
1970
1971static const tcg_insn_unit *tb_ret_addr;
1972
1973static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1974{
1975    const tcg_insn_unit *target;
1976    ptrdiff_t offset;
1977
1978    /* Reuse the zeroing that exists for goto_ptr.  */
1979    if (a0 == 0) {
1980        target = tcg_code_gen_epilogue;
1981    } else {
1982        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
1983        target = tb_ret_addr;
1984    }
1985
1986    offset = tcg_pcrel_diff(s, target) >> 2;
1987    if (offset == sextract64(offset, 0, 26)) {
1988        tcg_out_insn(s, 3206, B, offset);
1989    } else {
1990        /*
1991         * Only x16/x17 generate BTI type Jump (2),
1992         * other registers generate BTI type Jump|Call (3).
1993         */
1994        QEMU_BUILD_BUG_ON(TCG_REG_TMP0 != TCG_REG_X16);
1995        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, (intptr_t)target);
1996        tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
1997    }
1998}
1999
2000static void tcg_out_goto_tb(TCGContext *s, int which)
2001{
2002    /*
2003     * Direct branch, or indirect address load, will be patched
2004     * by tb_target_set_jmp_target.  Assert indirect load offset
2005     * in range early, regardless of direct branch distance.
2006     */
2007    intptr_t i_off = tcg_pcrel_diff(s, (void *)get_jmp_target_addr(s, which));
2008    tcg_debug_assert(i_off == sextract64(i_off, 0, 21));
2009
2010    set_jmp_insn_offset(s, which);
2011    tcg_out32(s, I3206_B);
2012    tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
2013    set_jmp_reset_offset(s, which);
2014    tcg_out_bti(s, BTI_J);
2015}
2016
2017static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
2018{
2019    tcg_out_insn(s, 3207, BR, a0);
2020}
2021
2022void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2023                              uintptr_t jmp_rx, uintptr_t jmp_rw)
2024{
2025    uintptr_t d_addr = tb->jmp_target_addr[n];
2026    ptrdiff_t d_offset = d_addr - jmp_rx;
2027    tcg_insn_unit insn;
2028
2029    /* Either directly branch, or indirect branch load. */
2030    if (d_offset == sextract64(d_offset, 0, 28)) {
2031        insn = deposit32(I3206_B, 0, 26, d_offset >> 2);
2032    } else {
2033        uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
2034        ptrdiff_t i_offset = i_addr - jmp_rx;
2035
2036        /* Note that we asserted this in range in tcg_out_goto_tb. */
2037        insn = deposit32(I3305_LDR | TCG_REG_TMP0, 5, 19, i_offset >> 2);
2038    }
2039    qatomic_set((uint32_t *)jmp_rw, insn);
2040    flush_idcache_range(jmp_rx, jmp_rw, 4);
2041}
2042
2043
2044static void tgen_add(TCGContext *s, TCGType type,
2045                     TCGReg a0, TCGReg a1, TCGReg a2)
2046{
2047    tcg_out_insn(s, 3502, ADD, type, a0, a1, a2);
2048}
2049
2050static void tgen_addi(TCGContext *s, TCGType type,
2051                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2052{
2053    if (a2 >= 0) {
2054        tcg_out_insn(s, 3401, ADDI, type, a0, a1, a2);
2055    } else {
2056        tcg_out_insn(s, 3401, SUBI, type, a0, a1, -a2);
2057    }
2058}
2059
2060static const TCGOutOpBinary outop_add = {
2061    .base.static_constraint = C_O1_I2(r, r, rA),
2062    .out_rrr = tgen_add,
2063    .out_rri = tgen_addi,
2064};
2065
2066static void tgen_addco(TCGContext *s, TCGType type,
2067                       TCGReg a0, TCGReg a1, TCGReg a2)
2068{
2069    tcg_out_insn(s, 3502, ADDS, type, a0, a1, a2);
2070}
2071
2072static void tgen_addco_imm(TCGContext *s, TCGType type,
2073                           TCGReg a0, TCGReg a1, tcg_target_long a2)
2074{
2075    if (a2 >= 0) {
2076        tcg_out_insn(s, 3401, ADDSI, type, a0, a1, a2);
2077    } else {
2078        tcg_out_insn(s, 3401, SUBSI, type, a0, a1, -a2);
2079    }
2080}
2081
2082static const TCGOutOpBinary outop_addco = {
2083    .base.static_constraint = C_O1_I2(r, r, rA),
2084    .out_rrr = tgen_addco,
2085    .out_rri = tgen_addco_imm,
2086};
2087
2088static void tgen_addci_rrr(TCGContext *s, TCGType type,
2089                           TCGReg a0, TCGReg a1, TCGReg a2)
2090{
2091    tcg_out_insn(s, 3503, ADC, type, a0, a1, a2);
2092}
2093
2094static void tgen_addci_rri(TCGContext *s, TCGType type,
2095                           TCGReg a0, TCGReg a1, tcg_target_long a2)
2096{
2097    /*
2098     * Note that the only two constants we support are 0 and -1, and
2099     * that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa.
2100     */
2101    if (a2) {
2102        tcg_out_insn(s, 3503, SBC, type, a0, a1, TCG_REG_XZR);
2103    } else {
2104        tcg_out_insn(s, 3503, ADC, type, a0, a1, TCG_REG_XZR);
2105    }
2106}
2107
2108static const TCGOutOpAddSubCarry outop_addci = {
2109    .base.static_constraint = C_O1_I2(r, rz, rMZ),
2110    .out_rrr = tgen_addci_rrr,
2111    .out_rri = tgen_addci_rri,
2112};
2113
2114static void tgen_addcio(TCGContext *s, TCGType type,
2115                        TCGReg a0, TCGReg a1, TCGReg a2)
2116{
2117    tcg_out_insn(s, 3503, ADCS, type, a0, a1, a2);
2118}
2119
2120static void tgen_addcio_imm(TCGContext *s, TCGType type,
2121                            TCGReg a0, TCGReg a1, tcg_target_long a2)
2122{
2123    /* Use SBCS w/0 for ADCS w/-1 -- see above. */
2124    if (a2) {
2125        tcg_out_insn(s, 3503, SBCS, type, a0, a1, TCG_REG_XZR);
2126    } else {
2127        tcg_out_insn(s, 3503, ADCS, type, a0, a1, TCG_REG_XZR);
2128    }
2129}
2130
2131static const TCGOutOpBinary outop_addcio = {
2132    .base.static_constraint = C_O1_I2(r, rz, rMZ),
2133    .out_rrr = tgen_addcio,
2134    .out_rri = tgen_addcio_imm,
2135};
2136
2137static void tcg_out_set_carry(TCGContext *s)
2138{
2139    tcg_out_insn(s, 3502, SUBS, TCG_TYPE_I32,
2140                 TCG_REG_XZR, TCG_REG_XZR, TCG_REG_XZR);
2141}
2142
2143static void tgen_and(TCGContext *s, TCGType type,
2144                     TCGReg a0, TCGReg a1, TCGReg a2)
2145{
2146    tcg_out_insn(s, 3510, AND, type, a0, a1, a2);
2147}
2148
2149static void tgen_andi(TCGContext *s, TCGType type,
2150                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2151{
2152    tcg_out_logicali(s, I3404_ANDI, type, a0, a1, a2);
2153}
2154
2155static const TCGOutOpBinary outop_and = {
2156    .base.static_constraint = C_O1_I2(r, r, rL),
2157    .out_rrr = tgen_and,
2158    .out_rri = tgen_andi,
2159};
2160
2161static void tgen_andc(TCGContext *s, TCGType type,
2162                      TCGReg a0, TCGReg a1, TCGReg a2)
2163{
2164    tcg_out_insn(s, 3510, BIC, type, a0, a1, a2);
2165}
2166
2167static const TCGOutOpBinary outop_andc = {
2168    .base.static_constraint = C_O1_I2(r, r, r),
2169    .out_rrr = tgen_andc,
2170};
2171
2172static void tgen_clz(TCGContext *s, TCGType type,
2173                     TCGReg a0, TCGReg a1, TCGReg a2)
2174{
2175    tcg_out_cmp(s, type, TCG_COND_NE, a1, 0, true);
2176    tcg_out_insn(s, 3507, CLZ, type, TCG_REG_TMP0, a1);
2177    tcg_out_insn(s, 3506, CSEL, type, a0, TCG_REG_TMP0, a2, TCG_COND_NE);
2178}
2179
2180static void tgen_clzi(TCGContext *s, TCGType type,
2181                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2182{
2183    if (a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
2184        tcg_out_insn(s, 3507, CLZ, type, a0, a1);
2185        return;
2186    }
2187
2188    tcg_out_cmp(s, type, TCG_COND_NE, a1, 0, true);
2189    tcg_out_insn(s, 3507, CLZ, type, a0, a1);
2190
2191    switch (a2) {
2192    case -1:
2193        tcg_out_insn(s, 3506, CSINV, type, a0, a0, TCG_REG_XZR, TCG_COND_NE);
2194        break;
2195    case 0:
2196        tcg_out_insn(s, 3506, CSEL, type, a0, a0, TCG_REG_XZR, TCG_COND_NE);
2197        break;
2198    default:
2199        tcg_out_movi(s, type, TCG_REG_TMP0, a2);
2200        tcg_out_insn(s, 3506, CSEL, type, a0, a0, TCG_REG_TMP0, TCG_COND_NE);
2201        break;
2202    }
2203}
2204
2205static const TCGOutOpBinary outop_clz = {
2206    .base.static_constraint = C_O1_I2(r, r, rAL),
2207    .out_rrr = tgen_clz,
2208    .out_rri = tgen_clzi,
2209};
2210
2211static const TCGOutOpUnary outop_ctpop = {
2212    .base.static_constraint = C_NotImplemented,
2213};
2214
2215static void tgen_ctz(TCGContext *s, TCGType type,
2216                     TCGReg a0, TCGReg a1, TCGReg a2)
2217{
2218    tcg_out_insn(s, 3507, RBIT, type, TCG_REG_TMP0, a1);
2219    tgen_clz(s, type, a0, TCG_REG_TMP0, a2);
2220}
2221
2222static void tgen_ctzi(TCGContext *s, TCGType type,
2223                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2224{
2225    tcg_out_insn(s, 3507, RBIT, type, TCG_REG_TMP0, a1);
2226    tgen_clzi(s, type, a0, TCG_REG_TMP0, a2);
2227}
2228
2229static const TCGOutOpBinary outop_ctz = {
2230    .base.static_constraint = C_O1_I2(r, r, rAL),
2231    .out_rrr = tgen_ctz,
2232    .out_rri = tgen_ctzi,
2233};
2234
2235static void tgen_divs(TCGContext *s, TCGType type,
2236                      TCGReg a0, TCGReg a1, TCGReg a2)
2237{
2238    tcg_out_insn(s, 3508, SDIV, type, a0, a1, a2);
2239}
2240
2241static const TCGOutOpBinary outop_divs = {
2242    .base.static_constraint = C_O1_I2(r, r, r),
2243    .out_rrr = tgen_divs,
2244};
2245
2246static const TCGOutOpDivRem outop_divs2 = {
2247    .base.static_constraint = C_NotImplemented,
2248};
2249
2250static void tgen_divu(TCGContext *s, TCGType type,
2251                      TCGReg a0, TCGReg a1, TCGReg a2)
2252{
2253    tcg_out_insn(s, 3508, UDIV, type, a0, a1, a2);
2254}
2255
2256static const TCGOutOpBinary outop_divu = {
2257    .base.static_constraint = C_O1_I2(r, r, r),
2258    .out_rrr = tgen_divu,
2259};
2260
2261static const TCGOutOpDivRem outop_divu2 = {
2262    .base.static_constraint = C_NotImplemented,
2263};
2264
2265static void tgen_eqv(TCGContext *s, TCGType type,
2266                     TCGReg a0, TCGReg a1, TCGReg a2)
2267{
2268    tcg_out_insn(s, 3510, EON, type, a0, a1, a2);
2269}
2270
2271static const TCGOutOpBinary outop_eqv = {
2272    .base.static_constraint = C_O1_I2(r, r, r),
2273    .out_rrr = tgen_eqv,
2274};
2275
2276static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
2277{
2278    tcg_out_ubfm(s, TCG_TYPE_I64, a0, a1, 32, 63);
2279}
2280
2281static const TCGOutOpUnary outop_extrh_i64_i32 = {
2282    .base.static_constraint = C_O1_I1(r, r),
2283    .out_rr = tgen_extrh_i64_i32,
2284};
2285
2286static void tgen_mul(TCGContext *s, TCGType type,
2287                     TCGReg a0, TCGReg a1, TCGReg a2)
2288{
2289    tcg_out_insn(s, 3509, MADD, type, a0, a1, a2, TCG_REG_XZR);
2290}
2291
2292static const TCGOutOpBinary outop_mul = {
2293    .base.static_constraint = C_O1_I2(r, r, r),
2294    .out_rrr = tgen_mul,
2295};
2296
2297static const TCGOutOpMul2 outop_muls2 = {
2298    .base.static_constraint = C_NotImplemented,
2299};
2300
2301static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
2302{
2303    return type == TCG_TYPE_I64 ? C_O1_I2(r, r, r) : C_NotImplemented;
2304}
2305
2306static void tgen_mulsh(TCGContext *s, TCGType type,
2307                       TCGReg a0, TCGReg a1, TCGReg a2)
2308{
2309    tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
2310}
2311
2312static const TCGOutOpBinary outop_mulsh = {
2313    .base.static_constraint = C_Dynamic,
2314    .base.dynamic_constraint = cset_mulh,
2315    .out_rrr = tgen_mulsh,
2316};
2317
2318static const TCGOutOpMul2 outop_mulu2 = {
2319    .base.static_constraint = C_NotImplemented,
2320};
2321
2322static void tgen_muluh(TCGContext *s, TCGType type,
2323                       TCGReg a0, TCGReg a1, TCGReg a2)
2324{
2325    tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
2326}
2327
2328static const TCGOutOpBinary outop_muluh = {
2329    .base.static_constraint = C_Dynamic,
2330    .base.dynamic_constraint = cset_mulh,
2331    .out_rrr = tgen_muluh,
2332};
2333
2334static const TCGOutOpBinary outop_nand = {
2335    .base.static_constraint = C_NotImplemented,
2336};
2337
2338static const TCGOutOpBinary outop_nor = {
2339    .base.static_constraint = C_NotImplemented,
2340};
2341
2342static void tgen_or(TCGContext *s, TCGType type,
2343                    TCGReg a0, TCGReg a1, TCGReg a2)
2344{
2345    tcg_out_insn(s, 3510, ORR, type, a0, a1, a2);
2346}
2347
2348static void tgen_ori(TCGContext *s, TCGType type,
2349                     TCGReg a0, TCGReg a1, tcg_target_long a2)
2350{
2351    tcg_out_logicali(s, I3404_ORRI, type, a0, a1, a2);
2352}
2353
2354static const TCGOutOpBinary outop_or = {
2355    .base.static_constraint = C_O1_I2(r, r, rL),
2356    .out_rrr = tgen_or,
2357    .out_rri = tgen_ori,
2358};
2359
2360static void tgen_orc(TCGContext *s, TCGType type,
2361                     TCGReg a0, TCGReg a1, TCGReg a2)
2362{
2363    tcg_out_insn(s, 3510, ORN, type, a0, a1, a2);
2364}
2365
2366static const TCGOutOpBinary outop_orc = {
2367    .base.static_constraint = C_O1_I2(r, r, r),
2368    .out_rrr = tgen_orc,
2369};
2370
2371static void tgen_rems(TCGContext *s, TCGType type,
2372                      TCGReg a0, TCGReg a1, TCGReg a2)
2373{
2374    tcg_out_insn(s, 3508, SDIV, type, TCG_REG_TMP0, a1, a2);
2375    tcg_out_insn(s, 3509, MSUB, type, a0, TCG_REG_TMP0, a2, a1);
2376}
2377
2378static const TCGOutOpBinary outop_rems = {
2379    .base.static_constraint = C_O1_I2(r, r, r),
2380    .out_rrr = tgen_rems,
2381};
2382
2383static void tgen_remu(TCGContext *s, TCGType type,
2384                      TCGReg a0, TCGReg a1, TCGReg a2)
2385{
2386    tcg_out_insn(s, 3508, UDIV, type, TCG_REG_TMP0, a1, a2);
2387    tcg_out_insn(s, 3509, MSUB, type, a0, TCG_REG_TMP0, a2, a1);
2388}
2389
2390static const TCGOutOpBinary outop_remu = {
2391    .base.static_constraint = C_O1_I2(r, r, r),
2392    .out_rrr = tgen_remu,
2393};
2394
2395static const TCGOutOpBinary outop_rotl = {
2396    .base.static_constraint = C_NotImplemented,
2397};
2398
2399static void tgen_rotr(TCGContext *s, TCGType type,
2400                      TCGReg a0, TCGReg a1, TCGReg a2)
2401{
2402    tcg_out_insn(s, 3508, RORV, type, a0, a1, a2);
2403}
2404
2405static void tgen_rotri(TCGContext *s, TCGType type,
2406                       TCGReg a0, TCGReg a1, tcg_target_long a2)
2407{
2408    int max = type == TCG_TYPE_I32 ? 31 : 63;
2409    tcg_out_extr(s, type, a0, a1, a1, a2 & max);
2410}
2411
2412static const TCGOutOpBinary outop_rotr = {
2413    .base.static_constraint = C_O1_I2(r, r, ri),
2414    .out_rrr = tgen_rotr,
2415    .out_rri = tgen_rotri,
2416};
2417
2418static void tgen_sar(TCGContext *s, TCGType type,
2419                     TCGReg a0, TCGReg a1, TCGReg a2)
2420{
2421    tcg_out_insn(s, 3508, ASRV, type, a0, a1, a2);
2422}
2423
2424static void tgen_sari(TCGContext *s, TCGType type,
2425                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2426{
2427    int max = type == TCG_TYPE_I32 ? 31 : 63;
2428    tcg_out_sbfm(s, type, a0, a1, a2 & max, max);
2429}
2430
2431static const TCGOutOpBinary outop_sar = {
2432    .base.static_constraint = C_O1_I2(r, r, ri),
2433    .out_rrr = tgen_sar,
2434    .out_rri = tgen_sari,
2435};
2436
2437static void tgen_shl(TCGContext *s, TCGType type,
2438                     TCGReg a0, TCGReg a1, TCGReg a2)
2439{
2440    tcg_out_insn(s, 3508, LSLV, type, a0, a1, a2);
2441}
2442
2443static void tgen_shli(TCGContext *s, TCGType type,
2444                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2445{
2446    int max = type == TCG_TYPE_I32 ? 31 : 63;
2447    tcg_out_ubfm(s, type, a0, a1, -a2 & max, ~a2 & max);
2448}
2449
2450static const TCGOutOpBinary outop_shl = {
2451    .base.static_constraint = C_O1_I2(r, r, ri),
2452    .out_rrr = tgen_shl,
2453    .out_rri = tgen_shli,
2454};
2455
2456static void tgen_shr(TCGContext *s, TCGType type,
2457                     TCGReg a0, TCGReg a1, TCGReg a2)
2458{
2459    tcg_out_insn(s, 3508, LSRV, type, a0, a1, a2);
2460}
2461
2462static void tgen_shri(TCGContext *s, TCGType type,
2463                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2464{
2465    int max = type == TCG_TYPE_I32 ? 31 : 63;
2466    tcg_out_ubfm(s, type, a0, a1, a2 & max, max);
2467}
2468
2469static const TCGOutOpBinary outop_shr = {
2470    .base.static_constraint = C_O1_I2(r, r, ri),
2471    .out_rrr = tgen_shr,
2472    .out_rri = tgen_shri,
2473};
2474
2475static void tgen_sub(TCGContext *s, TCGType type,
2476                     TCGReg a0, TCGReg a1, TCGReg a2)
2477{
2478    tcg_out_insn(s, 3502, SUB, type, a0, a1, a2);
2479}
2480
2481static const TCGOutOpSubtract outop_sub = {
2482    .base.static_constraint = C_O1_I2(r, r, r),
2483    .out_rrr = tgen_sub,
2484};
2485
2486static void tgen_subbo_rrr(TCGContext *s, TCGType type,
2487                           TCGReg a0, TCGReg a1, TCGReg a2)
2488{
2489    tcg_out_insn(s, 3502, SUBS, type, a0, a1, a2);
2490}
2491
2492static void tgen_subbo_rri(TCGContext *s, TCGType type,
2493                           TCGReg a0, TCGReg a1, tcg_target_long a2)
2494{
2495    if (a2 >= 0) {
2496        tcg_out_insn(s, 3401, SUBSI, type, a0, a1, a2);
2497    } else {
2498        tcg_out_insn(s, 3401, ADDSI, type, a0, a1, -a2);
2499    }
2500}
2501
2502static void tgen_subbo_rir(TCGContext *s, TCGType type,
2503                           TCGReg a0, tcg_target_long a1, TCGReg a2)
2504{
2505    tgen_subbo_rrr(s, type, a0, TCG_REG_XZR, a2);
2506}
2507
2508static void tgen_subbo_rii(TCGContext *s, TCGType type,
2509                           TCGReg a0, tcg_target_long a1, tcg_target_long a2)
2510{
2511    if (a2 == 0) {
2512        tgen_subbo_rrr(s, type, a0, TCG_REG_XZR, TCG_REG_XZR);
2513        return;
2514    }
2515
2516    /*
2517     * We want to allow a1 to be zero for the benefit of negation via
2518     * subtraction.  However, that leaves open the possibility of
2519     * adding 0 +/- const, and the immediate add/sub instructions
2520     * encode XSP not XZR.  Since we have 0 - non-zero, borrow is
2521     * always set.
2522     */
2523    tcg_out_movi(s, type, a0, -a2);
2524    tcg_out_set_borrow(s);
2525}
2526
2527static const TCGOutOpAddSubCarry outop_subbo = {
2528    .base.static_constraint = C_O1_I2(r, rZ, rA),
2529    .out_rrr = tgen_subbo_rrr,
2530    .out_rri = tgen_subbo_rri,
2531    .out_rir = tgen_subbo_rir,
2532    .out_rii = tgen_subbo_rii,
2533};
2534
2535static void tgen_subbi_rrr(TCGContext *s, TCGType type,
2536                           TCGReg a0, TCGReg a1, TCGReg a2)
2537{
2538    tcg_out_insn(s, 3503, SBC, type, a0, a1, a2);
2539}
2540
2541static void tgen_subbi_rri(TCGContext *s, TCGType type,
2542                           TCGReg a0, TCGReg a1, tcg_target_long a2)
2543{
2544    tgen_addci_rri(s, type, a0, a1, ~a2);
2545}
2546
2547static const TCGOutOpAddSubCarry outop_subbi = {
2548    .base.static_constraint = C_O1_I2(r, rz, rMZ),
2549    .out_rrr = tgen_subbi_rrr,
2550    .out_rri = tgen_subbi_rri,
2551};
2552
2553static void tgen_subbio_rrr(TCGContext *s, TCGType type,
2554                            TCGReg a0, TCGReg a1, TCGReg a2)
2555{
2556    tcg_out_insn(s, 3503, SBCS, type, a0, a1, a2);
2557}
2558
2559static void tgen_subbio_rri(TCGContext *s, TCGType type,
2560                            TCGReg a0, TCGReg a1, tcg_target_long a2)
2561{
2562    tgen_addcio_imm(s, type, a0, a1, ~a2);
2563}
2564
2565static const TCGOutOpAddSubCarry outop_subbio = {
2566    .base.static_constraint = C_O1_I2(r, rz, rMZ),
2567    .out_rrr = tgen_subbio_rrr,
2568    .out_rri = tgen_subbio_rri,
2569};
2570
2571static void tcg_out_set_borrow(TCGContext *s)
2572{
2573    tcg_out_insn(s, 3502, ADDS, TCG_TYPE_I32,
2574                 TCG_REG_XZR, TCG_REG_XZR, TCG_REG_XZR);
2575}
2576
2577static void tgen_xor(TCGContext *s, TCGType type,
2578                     TCGReg a0, TCGReg a1, TCGReg a2)
2579{
2580    tcg_out_insn(s, 3510, EOR, type, a0, a1, a2);
2581}
2582
2583static void tgen_xori(TCGContext *s, TCGType type,
2584                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2585{
2586    tcg_out_logicali(s, I3404_EORI, type, a0, a1, a2);
2587}
2588
2589static const TCGOutOpBinary outop_xor = {
2590    .base.static_constraint = C_O1_I2(r, r, rL),
2591    .out_rrr = tgen_xor,
2592    .out_rri = tgen_xori,
2593};
2594
2595static void tgen_bswap16(TCGContext *s, TCGType type,
2596                         TCGReg a0, TCGReg a1, unsigned flags)
2597{
2598    tcg_out_rev(s, TCG_TYPE_I32, MO_16, a0, a1);
2599    if (flags & TCG_BSWAP_OS) {
2600        /* Output must be sign-extended. */
2601        tcg_out_ext16s(s, type, a0, a0);
2602    } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2603        /* Output must be zero-extended, but input isn't. */
2604        tcg_out_ext16u(s, a0, a0);
2605    }
2606}
2607
2608static const TCGOutOpBswap outop_bswap16 = {
2609    .base.static_constraint = C_O1_I1(r, r),
2610    .out_rr = tgen_bswap16,
2611};
2612
2613static void tgen_bswap32(TCGContext *s, TCGType type,
2614                         TCGReg a0, TCGReg a1, unsigned flags)
2615{
2616    tcg_out_rev(s, TCG_TYPE_I32, MO_32, a0, a1);
2617    if (flags & TCG_BSWAP_OS) {
2618        tcg_out_ext32s(s, a0, a0);
2619    }
2620}
2621
2622static const TCGOutOpBswap outop_bswap32 = {
2623    .base.static_constraint = C_O1_I1(r, r),
2624    .out_rr = tgen_bswap32,
2625};
2626
2627static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
2628{
2629    tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
2630}
2631
2632static const TCGOutOpUnary outop_bswap64 = {
2633    .base.static_constraint = C_O1_I1(r, r),
2634    .out_rr = tgen_bswap64,
2635};
2636
2637static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
2638{
2639    tgen_sub(s, type, a0, TCG_REG_XZR, a1);
2640}
2641
2642static const TCGOutOpUnary outop_neg = {
2643    .base.static_constraint = C_O1_I1(r, r),
2644    .out_rr = tgen_neg,
2645};
2646
2647static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
2648{
2649    tgen_orc(s, type, a0, TCG_REG_XZR, a1);
2650}
2651
2652static const TCGOutOpUnary outop_not = {
2653    .base.static_constraint = C_O1_I1(r, r),
2654    .out_rr = tgen_not,
2655};
2656
2657static void tgen_cset(TCGContext *s, TCGCond cond, TCGReg ret)
2658{
2659    /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond).  */
2660    tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, ret, TCG_REG_XZR,
2661                 TCG_REG_XZR, tcg_invert_cond(cond));
2662}
2663
2664static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
2665                         TCGReg a0, TCGReg a1, TCGReg a2)
2666{
2667    tgen_cmp(s, type, cond, a1, a2);
2668    tgen_cset(s, cond, a0);
2669}
2670
2671static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
2672                          TCGReg a0, TCGReg a1, tcg_target_long a2)
2673{
2674    tgen_cmpi(s, type, cond, a1, a2);
2675    tgen_cset(s, cond, a0);
2676}
2677
2678static const TCGOutOpSetcond outop_setcond = {
2679    .base.static_constraint = C_O1_I2(r, r, rC),
2680    .out_rrr = tgen_setcond,
2681    .out_rri = tgen_setcondi,
2682};
2683
2684static void tgen_csetm(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret)
2685{
2686    /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond).  */
2687    tcg_out_insn(s, 3506, CSINV, ext, ret, TCG_REG_XZR,
2688                 TCG_REG_XZR, tcg_invert_cond(cond));
2689}
2690
2691static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
2692                            TCGReg a0, TCGReg a1, TCGReg a2)
2693{
2694    tgen_cmp(s, type, cond, a1, a2);
2695    tgen_csetm(s, type, cond, a0);
2696}
2697
2698static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
2699                             TCGReg a0, TCGReg a1, tcg_target_long a2)
2700{
2701    tgen_cmpi(s, type, cond, a1, a2);
2702    tgen_csetm(s, type, cond, a0);
2703}
2704
2705static const TCGOutOpSetcond outop_negsetcond = {
2706    .base.static_constraint = C_O1_I2(r, r, rC),
2707    .out_rrr = tgen_negsetcond,
2708    .out_rri = tgen_negsetcondi,
2709};
2710
2711static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
2712                         TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
2713                         TCGArg vt, bool const_vt, TCGArg vf, bool const_vf)
2714{
2715    tcg_out_cmp(s, type, cond, c1, c2, const_c2);
2716    tcg_out_insn(s, 3506, CSEL, type, ret, vt, vf, cond);
2717}
2718
2719static const TCGOutOpMovcond outop_movcond = {
2720    .base.static_constraint = C_O1_I4(r, r, rC, rz, rz),
2721    .out = tgen_movcond,
2722};
2723
2724static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
2725                         TCGReg a2, unsigned ofs, unsigned len)
2726{
2727    unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
2728
2729    /*
2730     * Since we can't support "0Z" as a constraint, we allow a1 in
2731     * any register.  Fix things up as if a matching constraint.
2732     */
2733    if (a0 != a1) {
2734        if (a0 == a2) {
2735            tcg_out_mov(s, type, TCG_REG_TMP0, a2);
2736            a2 = TCG_REG_TMP0;
2737        }
2738        tcg_out_mov(s, type, a0, a1);
2739    }
2740    tcg_out_bfm(s, type, a0, a2, -ofs & mask, len - 1);
2741}
2742
2743static void tgen_depositi(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
2744                          tcg_target_long a2, unsigned ofs, unsigned len)
2745{
2746    tgen_andi(s, type, a0, a1, ~MAKE_64BIT_MASK(ofs, len));
2747}
2748
2749static void tgen_depositz(TCGContext *s, TCGType type, TCGReg a0, TCGReg a2,
2750                          unsigned ofs, unsigned len)
2751{
2752    int max = type == TCG_TYPE_I32 ? 31 : 63;
2753    tcg_out_ubfm(s, type, a0, a2, -ofs & max, len - 1);
2754}
2755
2756static const TCGOutOpDeposit outop_deposit = {
2757    .base.static_constraint = C_O1_I2(r, rZ, rZ),
2758    .out_rrr = tgen_deposit,
2759    .out_rri = tgen_depositi,
2760    .out_rzr = tgen_depositz,
2761};
2762
2763static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
2764                         unsigned ofs, unsigned len)
2765{
2766    if (ofs == 0) {
2767        uint64_t mask = MAKE_64BIT_MASK(0, len);
2768        tcg_out_logicali(s, I3404_ANDI, type, a0, a1, mask);
2769    } else {
2770        tcg_out_ubfm(s, type, a0, a1, ofs, ofs + len - 1);
2771    }
2772}
2773
2774static const TCGOutOpExtract outop_extract = {
2775    .base.static_constraint = C_O1_I1(r, r),
2776    .out_rr = tgen_extract,
2777};
2778
2779static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
2780                          unsigned ofs, unsigned len)
2781{
2782    tcg_out_sbfm(s, type, a0, a1, ofs, ofs + len - 1);
2783}
2784
2785static const TCGOutOpExtract outop_sextract = {
2786    .base.static_constraint = C_O1_I1(r, r),
2787    .out_rr = tgen_sextract,
2788};
2789
2790static void tgen_extract2(TCGContext *s, TCGType type, TCGReg a0,
2791                          TCGReg a1, TCGReg a2, unsigned shr)
2792{
2793    tcg_out_extr(s, type, a0, a2, a1, shr);
2794}
2795
2796static const TCGOutOpExtract2 outop_extract2 = {
2797    .base.static_constraint = C_O1_I2(r, rz, rz),
2798    .out_rrr = tgen_extract2,
2799};
2800
2801static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
2802                      TCGReg base, ptrdiff_t offset)
2803{
2804    tcg_out_ldst(s, I3312_LDRB, dest, base, offset, 0);
2805}
2806
2807static const TCGOutOpLoad outop_ld8u = {
2808    .base.static_constraint = C_O1_I1(r, r),
2809    .out = tgen_ld8u,
2810};
2811
2812static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
2813                      TCGReg base, ptrdiff_t offset)
2814{
2815    AArch64Insn insn = type == TCG_TYPE_I32 ? I3312_LDRSBW : I3312_LDRSBX;
2816    tcg_out_ldst(s, insn, dest, base, offset, 0);
2817}
2818
2819static const TCGOutOpLoad outop_ld8s = {
2820    .base.static_constraint = C_O1_I1(r, r),
2821    .out = tgen_ld8s,
2822};
2823
2824static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
2825                       TCGReg base, ptrdiff_t offset)
2826{
2827    tcg_out_ldst(s, I3312_LDRH, dest, base, offset, 1);
2828}
2829
2830static const TCGOutOpLoad outop_ld16u = {
2831    .base.static_constraint = C_O1_I1(r, r),
2832    .out = tgen_ld16u,
2833};
2834
2835static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
2836                       TCGReg base, ptrdiff_t offset)
2837{
2838    AArch64Insn insn = type == TCG_TYPE_I32 ? I3312_LDRSHW : I3312_LDRSHX;
2839    tcg_out_ldst(s, insn, dest, base, offset, 1);
2840}
2841
2842static const TCGOutOpLoad outop_ld16s = {
2843    .base.static_constraint = C_O1_I1(r, r),
2844    .out = tgen_ld16s,
2845};
2846
2847static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
2848                       TCGReg base, ptrdiff_t offset)
2849{
2850    tcg_out_ldst(s, I3312_LDRW, dest, base, offset, 2);
2851}
2852
2853static const TCGOutOpLoad outop_ld32u = {
2854    .base.static_constraint = C_O1_I1(r, r),
2855    .out = tgen_ld32u,
2856};
2857
2858static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
2859                       TCGReg base, ptrdiff_t offset)
2860{
2861    tcg_out_ldst(s, I3312_LDRSWX, dest, base, offset, 2);
2862}
2863
2864static const TCGOutOpLoad outop_ld32s = {
2865    .base.static_constraint = C_O1_I1(r, r),
2866    .out = tgen_ld32s,
2867};
2868
2869static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
2870                       TCGReg base, ptrdiff_t offset)
2871{
2872    tcg_out_ldst(s, I3312_STRB, data, base, offset, 0);
2873}
2874
2875static const TCGOutOpStore outop_st8 = {
2876    .base.static_constraint = C_O0_I2(rz, r),
2877    .out_r = tgen_st8_r,
2878};
2879
2880static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
2881                        TCGReg base, ptrdiff_t offset)
2882{
2883    tcg_out_ldst(s, I3312_STRH, data, base, offset, 1);
2884}
2885
2886static const TCGOutOpStore outop_st16 = {
2887    .base.static_constraint = C_O0_I2(rz, r),
2888    .out_r = tgen_st16_r,
2889};
2890
2891static const TCGOutOpStore outop_st = {
2892    .base.static_constraint = C_O0_I2(rz, r),
2893    .out_r = tcg_out_st,
2894};
2895
2896static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2897                           unsigned vecl, unsigned vece,
2898                           const TCGArg args[TCG_MAX_OP_ARGS],
2899                           const int const_args[TCG_MAX_OP_ARGS])
2900{
2901    static const AArch64Insn cmp_vec_insn[16] = {
2902        [TCG_COND_EQ] = I3616_CMEQ,
2903        [TCG_COND_GT] = I3616_CMGT,
2904        [TCG_COND_GE] = I3616_CMGE,
2905        [TCG_COND_GTU] = I3616_CMHI,
2906        [TCG_COND_GEU] = I3616_CMHS,
2907    };
2908    static const AArch64Insn cmp_scalar_insn[16] = {
2909        [TCG_COND_EQ] = I3611_CMEQ,
2910        [TCG_COND_GT] = I3611_CMGT,
2911        [TCG_COND_GE] = I3611_CMGE,
2912        [TCG_COND_GTU] = I3611_CMHI,
2913        [TCG_COND_GEU] = I3611_CMHS,
2914    };
2915    static const AArch64Insn cmp0_vec_insn[16] = {
2916        [TCG_COND_EQ] = I3617_CMEQ0,
2917        [TCG_COND_GT] = I3617_CMGT0,
2918        [TCG_COND_GE] = I3617_CMGE0,
2919        [TCG_COND_LT] = I3617_CMLT0,
2920        [TCG_COND_LE] = I3617_CMLE0,
2921    };
2922    static const AArch64Insn cmp0_scalar_insn[16] = {
2923        [TCG_COND_EQ] = I3612_CMEQ0,
2924        [TCG_COND_GT] = I3612_CMGT0,
2925        [TCG_COND_GE] = I3612_CMGE0,
2926        [TCG_COND_LT] = I3612_CMLT0,
2927        [TCG_COND_LE] = I3612_CMLE0,
2928    };
2929
2930    TCGType type = vecl + TCG_TYPE_V64;
2931    unsigned is_q = vecl;
2932    bool is_scalar = !is_q && vece == MO_64;
2933    TCGArg a0, a1, a2, a3;
2934    int cmode, imm8;
2935
2936    a0 = args[0];
2937    a1 = args[1];
2938    a2 = args[2];
2939
2940    switch (opc) {
2941    case INDEX_op_ld_vec:
2942        tcg_out_ld(s, type, a0, a1, a2);
2943        break;
2944    case INDEX_op_st_vec:
2945        tcg_out_st(s, type, a0, a1, a2);
2946        break;
2947    case INDEX_op_dupm_vec:
2948        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2949        break;
2950    case INDEX_op_add_vec:
2951        if (is_scalar) {
2952            tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2);
2953        } else {
2954            tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
2955        }
2956        break;
2957    case INDEX_op_sub_vec:
2958        if (is_scalar) {
2959            tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2);
2960        } else {
2961            tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
2962        }
2963        break;
2964    case INDEX_op_mul_vec:
2965        tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2);
2966        break;
2967    case INDEX_op_neg_vec:
2968        if (is_scalar) {
2969            tcg_out_insn(s, 3612, NEG, vece, a0, a1);
2970        } else {
2971            tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
2972        }
2973        break;
2974    case INDEX_op_abs_vec:
2975        if (is_scalar) {
2976            tcg_out_insn(s, 3612, ABS, vece, a0, a1);
2977        } else {
2978            tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
2979        }
2980        break;
2981    case INDEX_op_and_vec:
2982        if (const_args[2]) {
2983            is_shimm1632(~a2, &cmode, &imm8);
2984            if (a0 == a1) {
2985                tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8);
2986                return;
2987            }
2988            tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8);
2989            a2 = a0;
2990        }
2991        tcg_out_insn(s, 3616, AND, is_q, 0, a0, a1, a2);
2992        break;
2993    case INDEX_op_or_vec:
2994        if (const_args[2]) {
2995            is_shimm1632(a2, &cmode, &imm8);
2996            if (a0 == a1) {
2997                tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8);
2998                return;
2999            }
3000            tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8);
3001            a2 = a0;
3002        }
3003        tcg_out_insn(s, 3616, ORR, is_q, 0, a0, a1, a2);
3004        break;
3005    case INDEX_op_andc_vec:
3006        if (const_args[2]) {
3007            is_shimm1632(a2, &cmode, &imm8);
3008            if (a0 == a1) {
3009                tcg_out_insn(s, 3606, BIC, is_q, a0, 0, cmode, imm8);
3010                return;
3011            }
3012            tcg_out_insn(s, 3606, MOVI, is_q, a0, 0, cmode, imm8);
3013            a2 = a0;
3014        }
3015        tcg_out_insn(s, 3616, BIC, is_q, 0, a0, a1, a2);
3016        break;
3017    case INDEX_op_orc_vec:
3018        if (const_args[2]) {
3019            is_shimm1632(~a2, &cmode, &imm8);
3020            if (a0 == a1) {
3021                tcg_out_insn(s, 3606, ORR, is_q, a0, 0, cmode, imm8);
3022                return;
3023            }
3024            tcg_out_insn(s, 3606, MVNI, is_q, a0, 0, cmode, imm8);
3025            a2 = a0;
3026        }
3027        tcg_out_insn(s, 3616, ORN, is_q, 0, a0, a1, a2);
3028        break;
3029    case INDEX_op_xor_vec:
3030        tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2);
3031        break;
3032    case INDEX_op_ssadd_vec:
3033        if (is_scalar) {
3034            tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2);
3035        } else {
3036            tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
3037        }
3038        break;
3039    case INDEX_op_sssub_vec:
3040        if (is_scalar) {
3041            tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2);
3042        } else {
3043            tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
3044        }
3045        break;
3046    case INDEX_op_usadd_vec:
3047        if (is_scalar) {
3048            tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2);
3049        } else {
3050            tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
3051        }
3052        break;
3053    case INDEX_op_ussub_vec:
3054        if (is_scalar) {
3055            tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2);
3056        } else {
3057            tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
3058        }
3059        break;
3060    case INDEX_op_smax_vec:
3061        tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2);
3062        break;
3063    case INDEX_op_smin_vec:
3064        tcg_out_insn(s, 3616, SMIN, is_q, vece, a0, a1, a2);
3065        break;
3066    case INDEX_op_umax_vec:
3067        tcg_out_insn(s, 3616, UMAX, is_q, vece, a0, a1, a2);
3068        break;
3069    case INDEX_op_umin_vec:
3070        tcg_out_insn(s, 3616, UMIN, is_q, vece, a0, a1, a2);
3071        break;
3072    case INDEX_op_not_vec:
3073        tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1);
3074        break;
3075    case INDEX_op_shli_vec:
3076        if (is_scalar) {
3077            tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece));
3078        } else {
3079            tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
3080        }
3081        break;
3082    case INDEX_op_shri_vec:
3083        if (is_scalar) {
3084            tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2);
3085        } else {
3086            tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
3087        }
3088        break;
3089    case INDEX_op_sari_vec:
3090        if (is_scalar) {
3091            tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2);
3092        } else {
3093            tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
3094        }
3095        break;
3096    case INDEX_op_aa64_sli_vec:
3097        if (is_scalar) {
3098            tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece));
3099        } else {
3100            tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
3101        }
3102        break;
3103    case INDEX_op_shlv_vec:
3104        if (is_scalar) {
3105            tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2);
3106        } else {
3107            tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
3108        }
3109        break;
3110    case INDEX_op_aa64_sshl_vec:
3111        if (is_scalar) {
3112            tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2);
3113        } else {
3114            tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
3115        }
3116        break;
3117    case INDEX_op_cmp_vec:
3118        {
3119            TCGCond cond = args[3];
3120            AArch64Insn insn;
3121
3122            switch (cond) {
3123            case TCG_COND_NE:
3124                if (const_args[2]) {
3125                    if (is_scalar) {
3126                        tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1);
3127                    } else {
3128                        tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
3129                    }
3130                } else {
3131                    if (is_scalar) {
3132                        tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2);
3133                    } else {
3134                        tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
3135                    }
3136                    tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0);
3137                }
3138                break;
3139
3140            case TCG_COND_TSTNE:
3141            case TCG_COND_TSTEQ:
3142                if (const_args[2]) {
3143                    /* (x & 0) == 0 */
3144                    tcg_out_dupi_vec(s, type, MO_8, a0,
3145                                     -(cond == TCG_COND_TSTEQ));
3146                    break;
3147                }
3148                if (is_scalar) {
3149                    tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a2);
3150                } else {
3151                    tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a2);
3152                }
3153                if (cond == TCG_COND_TSTEQ) {
3154                    tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0);
3155                }
3156                break;
3157
3158            default:
3159                if (const_args[2]) {
3160                    if (is_scalar) {
3161                        insn = cmp0_scalar_insn[cond];
3162                        if (insn) {
3163                            tcg_out_insn_3612(s, insn, vece, a0, a1);
3164                            break;
3165                        }
3166                    } else {
3167                        insn = cmp0_vec_insn[cond];
3168                        if (insn) {
3169                            tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
3170                            break;
3171                        }
3172                    }
3173                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP0, 0);
3174                    a2 = TCG_VEC_TMP0;
3175                }
3176                if (is_scalar) {
3177                    insn = cmp_scalar_insn[cond];
3178                    if (insn == 0) {
3179                        TCGArg t;
3180                        t = a1, a1 = a2, a2 = t;
3181                        cond = tcg_swap_cond(cond);
3182                        insn = cmp_scalar_insn[cond];
3183                        tcg_debug_assert(insn != 0);
3184                    }
3185                    tcg_out_insn_3611(s, insn, vece, a0, a1, a2);
3186                } else {
3187                    insn = cmp_vec_insn[cond];
3188                    if (insn == 0) {
3189                        TCGArg t;
3190                        t = a1, a1 = a2, a2 = t;
3191                        cond = tcg_swap_cond(cond);
3192                        insn = cmp_vec_insn[cond];
3193                        tcg_debug_assert(insn != 0);
3194                    }
3195                    tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
3196                }
3197                break;
3198            }
3199        }
3200        break;
3201
3202    case INDEX_op_bitsel_vec:
3203        a3 = args[3];
3204        if (a0 == a3) {
3205            tcg_out_insn(s, 3616, BIT, is_q, 0, a0, a2, a1);
3206        } else if (a0 == a2) {
3207            tcg_out_insn(s, 3616, BIF, is_q, 0, a0, a3, a1);
3208        } else {
3209            if (a0 != a1) {
3210                tcg_out_mov(s, type, a0, a1);
3211            }
3212            tcg_out_insn(s, 3616, BSL, is_q, 0, a0, a2, a3);
3213        }
3214        break;
3215
3216    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
3217    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
3218    default:
3219        g_assert_not_reached();
3220    }
3221}
3222
3223int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3224{
3225    switch (opc) {
3226    case INDEX_op_add_vec:
3227    case INDEX_op_sub_vec:
3228    case INDEX_op_and_vec:
3229    case INDEX_op_or_vec:
3230    case INDEX_op_xor_vec:
3231    case INDEX_op_andc_vec:
3232    case INDEX_op_orc_vec:
3233    case INDEX_op_neg_vec:
3234    case INDEX_op_abs_vec:
3235    case INDEX_op_not_vec:
3236    case INDEX_op_cmp_vec:
3237    case INDEX_op_shli_vec:
3238    case INDEX_op_shri_vec:
3239    case INDEX_op_sari_vec:
3240    case INDEX_op_ssadd_vec:
3241    case INDEX_op_sssub_vec:
3242    case INDEX_op_usadd_vec:
3243    case INDEX_op_ussub_vec:
3244    case INDEX_op_shlv_vec:
3245    case INDEX_op_bitsel_vec:
3246        return 1;
3247    case INDEX_op_rotli_vec:
3248    case INDEX_op_shrv_vec:
3249    case INDEX_op_sarv_vec:
3250    case INDEX_op_rotlv_vec:
3251    case INDEX_op_rotrv_vec:
3252        return -1;
3253    case INDEX_op_mul_vec:
3254    case INDEX_op_smax_vec:
3255    case INDEX_op_smin_vec:
3256    case INDEX_op_umax_vec:
3257    case INDEX_op_umin_vec:
3258        return vece < MO_64;
3259
3260    default:
3261        return 0;
3262    }
3263}
3264
3265void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3266                       TCGArg a0, ...)
3267{
3268    va_list va;
3269    TCGv_vec v0, v1, v2, t1, t2, c1;
3270    TCGArg a2;
3271
3272    va_start(va, a0);
3273    v0 = temp_tcgv_vec(arg_temp(a0));
3274    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3275    a2 = va_arg(va, TCGArg);
3276    va_end(va);
3277
3278    switch (opc) {
3279    case INDEX_op_rotli_vec:
3280        t1 = tcg_temp_new_vec(type);
3281        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
3282        vec_gen_4(INDEX_op_aa64_sli_vec, type, vece,
3283                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
3284        tcg_temp_free_vec(t1);
3285        break;
3286
3287    case INDEX_op_shrv_vec:
3288    case INDEX_op_sarv_vec:
3289        /* Right shifts are negative left shifts for AArch64.  */
3290        v2 = temp_tcgv_vec(arg_temp(a2));
3291        t1 = tcg_temp_new_vec(type);
3292        tcg_gen_neg_vec(vece, t1, v2);
3293        opc = (opc == INDEX_op_shrv_vec
3294               ? INDEX_op_shlv_vec : INDEX_op_aa64_sshl_vec);
3295        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
3296                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3297        tcg_temp_free_vec(t1);
3298        break;
3299
3300    case INDEX_op_rotlv_vec:
3301        v2 = temp_tcgv_vec(arg_temp(a2));
3302        t1 = tcg_temp_new_vec(type);
3303        c1 = tcg_constant_vec(type, vece, 8 << vece);
3304        tcg_gen_sub_vec(vece, t1, v2, c1);
3305        /* Right shifts are negative left shifts for AArch64.  */
3306        vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
3307                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3308        vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(v0),
3309                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3310        tcg_gen_or_vec(vece, v0, v0, t1);
3311        tcg_temp_free_vec(t1);
3312        break;
3313
3314    case INDEX_op_rotrv_vec:
3315        v2 = temp_tcgv_vec(arg_temp(a2));
3316        t1 = tcg_temp_new_vec(type);
3317        t2 = tcg_temp_new_vec(type);
3318        c1 = tcg_constant_vec(type, vece, 8 << vece);
3319        tcg_gen_neg_vec(vece, t1, v2);
3320        tcg_gen_sub_vec(vece, t2, c1, v2);
3321        /* Right shifts are negative left shifts for AArch64.  */
3322        vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t1),
3323                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3324        vec_gen_3(INDEX_op_shlv_vec, type, vece, tcgv_vec_arg(t2),
3325                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
3326        tcg_gen_or_vec(vece, v0, t1, t2);
3327        tcg_temp_free_vec(t1);
3328        tcg_temp_free_vec(t2);
3329        break;
3330
3331    default:
3332        g_assert_not_reached();
3333    }
3334}
3335
3336static TCGConstraintSetIndex
3337tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
3338{
3339    switch (op) {
3340    case INDEX_op_add_vec:
3341    case INDEX_op_sub_vec:
3342    case INDEX_op_mul_vec:
3343    case INDEX_op_xor_vec:
3344    case INDEX_op_ssadd_vec:
3345    case INDEX_op_sssub_vec:
3346    case INDEX_op_usadd_vec:
3347    case INDEX_op_ussub_vec:
3348    case INDEX_op_smax_vec:
3349    case INDEX_op_smin_vec:
3350    case INDEX_op_umax_vec:
3351    case INDEX_op_umin_vec:
3352    case INDEX_op_shlv_vec:
3353    case INDEX_op_shrv_vec:
3354    case INDEX_op_sarv_vec:
3355    case INDEX_op_aa64_sshl_vec:
3356        return C_O1_I2(w, w, w);
3357    case INDEX_op_not_vec:
3358    case INDEX_op_neg_vec:
3359    case INDEX_op_abs_vec:
3360    case INDEX_op_shli_vec:
3361    case INDEX_op_shri_vec:
3362    case INDEX_op_sari_vec:
3363        return C_O1_I1(w, w);
3364    case INDEX_op_ld_vec:
3365    case INDEX_op_dupm_vec:
3366        return C_O1_I1(w, r);
3367    case INDEX_op_st_vec:
3368        return C_O0_I2(w, r);
3369    case INDEX_op_dup_vec:
3370        return C_O1_I1(w, wr);
3371    case INDEX_op_or_vec:
3372    case INDEX_op_andc_vec:
3373        return C_O1_I2(w, w, wO);
3374    case INDEX_op_and_vec:
3375    case INDEX_op_orc_vec:
3376        return C_O1_I2(w, w, wN);
3377    case INDEX_op_cmp_vec:
3378        return C_O1_I2(w, w, wZ);
3379    case INDEX_op_bitsel_vec:
3380        return C_O1_I3(w, w, w, w);
3381    case INDEX_op_aa64_sli_vec:
3382        return C_O1_I2(w, 0, w);
3383
3384    default:
3385        return C_NotImplemented;
3386    }
3387}
3388
3389static void tcg_target_init(TCGContext *s)
3390{
3391    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu;
3392    tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu;
3393    tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3394    tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3395
3396    tcg_target_call_clobber_regs = -1ull;
3397    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X19);
3398    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X20);
3399    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X21);
3400    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X22);
3401    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X23);
3402    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X24);
3403    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X25);
3404    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X26);
3405    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X27);
3406    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X28);
3407    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X29);
3408    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3409    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3410    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3411    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3412    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3413    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3414    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3415    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3416
3417    s->reserved_regs = 0;
3418    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
3419    tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
3420    tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */
3421    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
3422    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
3423    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
3424    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
3425}
3426
3427/* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)).  */
3428#define PUSH_SIZE  ((30 - 19 + 1) * 8)
3429
3430#define FRAME_SIZE \
3431    ((PUSH_SIZE \
3432      + TCG_STATIC_CALL_ARGS_SIZE \
3433      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3434      + TCG_TARGET_STACK_ALIGN - 1) \
3435     & ~(TCG_TARGET_STACK_ALIGN - 1))
3436
3437/* We're expecting a 2 byte uleb128 encoded value.  */
3438QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3439
3440/* We're expecting to use a single ADDI insn.  */
3441QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff);
3442
3443static void tcg_target_qemu_prologue(TCGContext *s)
3444{
3445    TCGReg r;
3446
3447    tcg_out_bti(s, BTI_C);
3448
3449    /* Push (FP, LR) and allocate space for all saved registers.  */
3450    tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
3451                 TCG_REG_SP, -PUSH_SIZE, 1, 1);
3452
3453    /* Set up frame pointer for canonical unwinding.  */
3454    tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP);
3455
3456    /* Store callee-preserved regs x19..x28.  */
3457    for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
3458        int ofs = (r - TCG_REG_X19 + 2) * 8;
3459        tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
3460    }
3461
3462    /* Make stack space for TCG locals.  */
3463    tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
3464                 FRAME_SIZE - PUSH_SIZE);
3465
3466    /* Inform TCG about how to find TCG locals with register, offset, size.  */
3467    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
3468                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3469
3470    if (!tcg_use_softmmu) {
3471        /*
3472         * Note that XZR cannot be encoded in the address base register slot,
3473         * as that actually encodes SP.  Depending on the guest, we may need
3474         * to zero-extend the guest address via the address index register slot,
3475         * therefore we need to load even a zero guest base into a register.
3476         */
3477        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
3478        tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
3479    }
3480
3481    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3482    tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]);
3483
3484    /*
3485     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3486     * and fall through to the rest of the epilogue.
3487     */
3488    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3489    tcg_out_bti(s, BTI_J);
3490    tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
3491
3492    /* TB epilogue */
3493    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
3494    tcg_out_bti(s, BTI_J);
3495
3496    /* Remove TCG locals stack space.  */
3497    tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
3498                 FRAME_SIZE - PUSH_SIZE);
3499
3500    /* Restore registers x19..x28.  */
3501    for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
3502        int ofs = (r - TCG_REG_X19 + 2) * 8;
3503        tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
3504    }
3505
3506    /* Pop (FP, LR), restore SP to previous frame.  */
3507    tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR,
3508                 TCG_REG_SP, PUSH_SIZE, 0, 1);
3509    tcg_out_insn(s, 3207, RET, TCG_REG_LR);
3510}
3511
3512static void tcg_out_tb_start(TCGContext *s)
3513{
3514    tcg_out_bti(s, BTI_J);
3515}
3516
3517static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3518{
3519    int i;
3520    for (i = 0; i < count; ++i) {
3521        p[i] = NOP;
3522    }
3523}
3524
3525typedef struct {
3526    DebugFrameHeader h;
3527    uint8_t fde_def_cfa[4];
3528    uint8_t fde_reg_ofs[24];
3529} DebugFrame;
3530
3531#define ELF_HOST_MACHINE EM_AARCH64
3532
3533static const DebugFrame debug_frame = {
3534    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3535    .h.cie.id = -1,
3536    .h.cie.version = 1,
3537    .h.cie.code_align = 1,
3538    .h.cie.data_align = 0x78,             /* sleb128 -8 */
3539    .h.cie.return_column = TCG_REG_LR,
3540
3541    /* Total FDE size does not include the "len" member.  */
3542    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3543
3544    .fde_def_cfa = {
3545        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ... */
3546        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3547        (FRAME_SIZE >> 7)
3548    },
3549    .fde_reg_ofs = {
3550        0x80 + 28, 1,                   /* DW_CFA_offset, x28,  -8 */
3551        0x80 + 27, 2,                   /* DW_CFA_offset, x27, -16 */
3552        0x80 + 26, 3,                   /* DW_CFA_offset, x26, -24 */
3553        0x80 + 25, 4,                   /* DW_CFA_offset, x25, -32 */
3554        0x80 + 24, 5,                   /* DW_CFA_offset, x24, -40 */
3555        0x80 + 23, 6,                   /* DW_CFA_offset, x23, -48 */
3556        0x80 + 22, 7,                   /* DW_CFA_offset, x22, -56 */
3557        0x80 + 21, 8,                   /* DW_CFA_offset, x21, -64 */
3558        0x80 + 20, 9,                   /* DW_CFA_offset, x20, -72 */
3559        0x80 + 19, 10,                  /* DW_CFA_offset, x1p, -80 */
3560        0x80 + 30, 11,                  /* DW_CFA_offset,  lr, -88 */
3561        0x80 + 29, 12,                  /* DW_CFA_offset,  fp, -96 */
3562    }
3563};
3564
3565void tcg_register_jit(const void *buf, size_t buf_size)
3566{
3567    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3568}
3569