xref: /openbmc/qemu/tcg/arm/tcg-target.c.inc (revision 412a91f6)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-ldst.c.inc"
27#include "../tcg-pool.c.inc"
28
29int arm_arch = __ARM_ARCH;
30
31#ifndef use_idiv_instructions
32bool use_idiv_instructions;
33#endif
34#ifndef use_neon_instructions
35bool use_neon_instructions;
36#endif
37
38#ifdef CONFIG_DEBUG_TCG
39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
41    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%sp",  "%r14", "%pc",
42    "%q0",  "%q1",  "%q2",  "%q3",  "%q4",  "%q5",  "%q6",  "%q7",
43    "%q8",  "%q9",  "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
44};
45#endif
46
47static const int tcg_target_reg_alloc_order[] = {
48    TCG_REG_R4,
49    TCG_REG_R5,
50    TCG_REG_R6,
51    TCG_REG_R7,
52    TCG_REG_R8,
53    TCG_REG_R9,
54    TCG_REG_R10,
55    TCG_REG_R11,
56    TCG_REG_R13,
57    TCG_REG_R0,
58    TCG_REG_R1,
59    TCG_REG_R2,
60    TCG_REG_R3,
61    TCG_REG_R12,
62    TCG_REG_R14,
63
64    TCG_REG_Q0,
65    TCG_REG_Q1,
66    TCG_REG_Q2,
67    TCG_REG_Q3,
68    /* Q4 - Q7 are call-saved, and skipped. */
69    TCG_REG_Q8,
70    TCG_REG_Q9,
71    TCG_REG_Q10,
72    TCG_REG_Q11,
73    TCG_REG_Q12,
74    TCG_REG_Q13,
75    TCG_REG_Q14,
76    TCG_REG_Q15,
77};
78
79static const int tcg_target_call_iarg_regs[4] = {
80    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
81};
82
83static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
84{
85    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
86    tcg_debug_assert(slot >= 0 && slot <= 3);
87    return TCG_REG_R0 + slot;
88}
89
90#define TCG_REG_TMP  TCG_REG_R12
91#define TCG_VEC_TMP  TCG_REG_Q15
92#define TCG_REG_GUEST_BASE  TCG_REG_R11
93
94typedef enum {
95    COND_EQ = 0x0,
96    COND_NE = 0x1,
97    COND_CS = 0x2,	/* Unsigned greater or equal */
98    COND_CC = 0x3,	/* Unsigned less than */
99    COND_MI = 0x4,	/* Negative */
100    COND_PL = 0x5,	/* Zero or greater */
101    COND_VS = 0x6,	/* Overflow */
102    COND_VC = 0x7,	/* No overflow */
103    COND_HI = 0x8,	/* Unsigned greater than */
104    COND_LS = 0x9,	/* Unsigned less or equal */
105    COND_GE = 0xa,
106    COND_LT = 0xb,
107    COND_GT = 0xc,
108    COND_LE = 0xd,
109    COND_AL = 0xe,
110} ARMCond;
111
112#define TO_CPSR (1 << 20)
113
114#define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
115#define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
116#define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
117#define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
118#define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
119#define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
120#define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
121#define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
122
123typedef enum {
124    ARITH_AND = 0x0 << 21,
125    ARITH_EOR = 0x1 << 21,
126    ARITH_SUB = 0x2 << 21,
127    ARITH_RSB = 0x3 << 21,
128    ARITH_ADD = 0x4 << 21,
129    ARITH_ADC = 0x5 << 21,
130    ARITH_SBC = 0x6 << 21,
131    ARITH_RSC = 0x7 << 21,
132    ARITH_TST = 0x8 << 21 | TO_CPSR,
133    ARITH_CMP = 0xa << 21 | TO_CPSR,
134    ARITH_CMN = 0xb << 21 | TO_CPSR,
135    ARITH_ORR = 0xc << 21,
136    ARITH_MOV = 0xd << 21,
137    ARITH_BIC = 0xe << 21,
138    ARITH_MVN = 0xf << 21,
139
140    INSN_B         = 0x0a000000,
141
142    INSN_CLZ       = 0x016f0f10,
143    INSN_RBIT      = 0x06ff0f30,
144
145    INSN_LDMIA     = 0x08b00000,
146    INSN_STMDB     = 0x09200000,
147
148    INSN_LDR_IMM   = 0x04100000,
149    INSN_LDR_REG   = 0x06100000,
150    INSN_STR_IMM   = 0x04000000,
151    INSN_STR_REG   = 0x06000000,
152
153    INSN_LDRH_IMM  = 0x005000b0,
154    INSN_LDRH_REG  = 0x001000b0,
155    INSN_LDRSH_IMM = 0x005000f0,
156    INSN_LDRSH_REG = 0x001000f0,
157    INSN_STRH_IMM  = 0x004000b0,
158    INSN_STRH_REG  = 0x000000b0,
159
160    INSN_LDRB_IMM  = 0x04500000,
161    INSN_LDRB_REG  = 0x06500000,
162    INSN_LDRSB_IMM = 0x005000d0,
163    INSN_LDRSB_REG = 0x001000d0,
164    INSN_STRB_IMM  = 0x04400000,
165    INSN_STRB_REG  = 0x06400000,
166
167    INSN_LDRD_IMM  = 0x004000d0,
168    INSN_LDRD_REG  = 0x000000d0,
169    INSN_STRD_IMM  = 0x004000f0,
170    INSN_STRD_REG  = 0x000000f0,
171
172    INSN_DMB_ISH   = 0xf57ff05b,
173    INSN_DMB_MCR   = 0xee070fba,
174
175    /* Architected nop introduced in v6k.  */
176    /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
177       also Just So Happened to do nothing on pre-v6k so that we
178       don't need to conditionalize it?  */
179    INSN_NOP_v6k   = 0xe320f000,
180    /* Otherwise the assembler uses mov r0,r0 */
181    INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
182
183    INSN_VADD      = 0xf2000800,
184    INSN_VAND      = 0xf2000110,
185    INSN_VBIC      = 0xf2100110,
186    INSN_VEOR      = 0xf3000110,
187    INSN_VORN      = 0xf2300110,
188    INSN_VORR      = 0xf2200110,
189    INSN_VSUB      = 0xf3000800,
190    INSN_VMUL      = 0xf2000910,
191    INSN_VQADD     = 0xf2000010,
192    INSN_VQADD_U   = 0xf3000010,
193    INSN_VQSUB     = 0xf2000210,
194    INSN_VQSUB_U   = 0xf3000210,
195    INSN_VMAX      = 0xf2000600,
196    INSN_VMAX_U    = 0xf3000600,
197    INSN_VMIN      = 0xf2000610,
198    INSN_VMIN_U    = 0xf3000610,
199
200    INSN_VABS      = 0xf3b10300,
201    INSN_VMVN      = 0xf3b00580,
202    INSN_VNEG      = 0xf3b10380,
203
204    INSN_VCEQ0     = 0xf3b10100,
205    INSN_VCGT0     = 0xf3b10000,
206    INSN_VCGE0     = 0xf3b10080,
207    INSN_VCLE0     = 0xf3b10180,
208    INSN_VCLT0     = 0xf3b10200,
209
210    INSN_VCEQ      = 0xf3000810,
211    INSN_VCGE      = 0xf2000310,
212    INSN_VCGT      = 0xf2000300,
213    INSN_VCGE_U    = 0xf3000310,
214    INSN_VCGT_U    = 0xf3000300,
215
216    INSN_VSHLI     = 0xf2800510,  /* VSHL (immediate) */
217    INSN_VSARI     = 0xf2800010,  /* VSHR.S */
218    INSN_VSHRI     = 0xf3800010,  /* VSHR.U */
219    INSN_VSLI      = 0xf3800510,
220    INSN_VSHL_S    = 0xf2000400,  /* VSHL.S (register) */
221    INSN_VSHL_U    = 0xf3000400,  /* VSHL.U (register) */
222
223    INSN_VBSL      = 0xf3100110,
224    INSN_VBIT      = 0xf3200110,
225    INSN_VBIF      = 0xf3300110,
226
227    INSN_VTST      = 0xf2000810,
228
229    INSN_VDUP_G    = 0xee800b10,  /* VDUP (ARM core register) */
230    INSN_VDUP_S    = 0xf3b00c00,  /* VDUP (scalar) */
231    INSN_VLDR_D    = 0xed100b00,  /* VLDR.64 */
232    INSN_VLD1      = 0xf4200000,  /* VLD1 (multiple single elements) */
233    INSN_VLD1R     = 0xf4a00c00,  /* VLD1 (single element to all lanes) */
234    INSN_VST1      = 0xf4000000,  /* VST1 (multiple single elements) */
235    INSN_VMOVI     = 0xf2800010,  /* VMOV (immediate) */
236} ARMInsn;
237
238#define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
239
240static const uint8_t tcg_cond_to_arm_cond[] = {
241    [TCG_COND_EQ] = COND_EQ,
242    [TCG_COND_NE] = COND_NE,
243    [TCG_COND_LT] = COND_LT,
244    [TCG_COND_GE] = COND_GE,
245    [TCG_COND_LE] = COND_LE,
246    [TCG_COND_GT] = COND_GT,
247    /* unsigned */
248    [TCG_COND_LTU] = COND_CC,
249    [TCG_COND_GEU] = COND_CS,
250    [TCG_COND_LEU] = COND_LS,
251    [TCG_COND_GTU] = COND_HI,
252};
253
254static int encode_imm(uint32_t imm);
255
256/* TCG private relocation type: add with pc+imm8 */
257#define R_ARM_PC8  11
258
259/* TCG private relocation type: vldr with imm8 << 2 */
260#define R_ARM_PC11 12
261
262static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
263{
264    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
265    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
266
267    if (offset == sextract32(offset, 0, 24)) {
268        *src_rw = deposit32(*src_rw, 0, 24, offset);
269        return true;
270    }
271    return false;
272}
273
274static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
275{
276    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
277    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
278
279    if (offset >= -0xfff && offset <= 0xfff) {
280        tcg_insn_unit insn = *src_rw;
281        bool u = (offset >= 0);
282        if (!u) {
283            offset = -offset;
284        }
285        insn = deposit32(insn, 23, 1, u);
286        insn = deposit32(insn, 0, 12, offset);
287        *src_rw = insn;
288        return true;
289    }
290    return false;
291}
292
293static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
294{
295    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
296    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
297
298    if (offset >= -0xff && offset <= 0xff) {
299        tcg_insn_unit insn = *src_rw;
300        bool u = (offset >= 0);
301        if (!u) {
302            offset = -offset;
303        }
304        insn = deposit32(insn, 23, 1, u);
305        insn = deposit32(insn, 0, 8, offset);
306        *src_rw = insn;
307        return true;
308    }
309    return false;
310}
311
312static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
313{
314    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
315    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
316    int imm12 = encode_imm(offset);
317
318    if (imm12 >= 0) {
319        *src_rw = deposit32(*src_rw, 0, 12, imm12);
320        return true;
321    }
322    return false;
323}
324
325static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
326                        intptr_t value, intptr_t addend)
327{
328    tcg_debug_assert(addend == 0);
329    switch (type) {
330    case R_ARM_PC24:
331        return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
332    case R_ARM_PC13:
333        return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
334    case R_ARM_PC11:
335        return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
336    case R_ARM_PC8:
337        return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
338    default:
339        g_assert_not_reached();
340    }
341}
342
343#define TCG_CT_CONST_ARM  0x100
344#define TCG_CT_CONST_INV  0x200
345#define TCG_CT_CONST_NEG  0x400
346#define TCG_CT_CONST_ZERO 0x800
347#define TCG_CT_CONST_ORRI 0x1000
348#define TCG_CT_CONST_ANDI 0x2000
349
350#define ALL_GENERAL_REGS  0xffffu
351#define ALL_VECTOR_REGS   0xffff0000u
352
353/*
354 * r0-r3 will be overwritten when reading the tlb entry (system-mode only);
355 * r14 will be overwritten by the BLNE branching to the slow path.
356 */
357#define ALL_QLDST_REGS \
358    (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14)))
359
360/*
361 * ARM immediates for ALU instructions are made of an unsigned 8-bit
362 * right-rotated by an even amount between 0 and 30.
363 *
364 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
365 */
366static int encode_imm(uint32_t imm)
367{
368    uint32_t rot, imm8;
369
370    /* Simple case, no rotation required. */
371    if ((imm & ~0xff) == 0) {
372        return imm;
373    }
374
375    /* Next, try a simple even shift.  */
376    rot = ctz32(imm) & ~1;
377    imm8 = imm >> rot;
378    rot = 32 - rot;
379    if ((imm8 & ~0xff) == 0) {
380        goto found;
381    }
382
383    /*
384     * Finally, try harder with rotations.
385     * The ctz test above will have taken care of rotates >= 8.
386     */
387    for (rot = 2; rot < 8; rot += 2) {
388        imm8 = rol32(imm, rot);
389        if ((imm8 & ~0xff) == 0) {
390            goto found;
391        }
392    }
393    /* Fail: imm cannot be encoded. */
394    return -1;
395
396 found:
397    /* Note that rot is even, and we discard bit 0 by shifting by 7. */
398    return rot << 7 | imm8;
399}
400
401static int encode_imm_nofail(uint32_t imm)
402{
403    int ret = encode_imm(imm);
404    tcg_debug_assert(ret >= 0);
405    return ret;
406}
407
408static bool check_fit_imm(uint32_t imm)
409{
410    return encode_imm(imm) >= 0;
411}
412
413/* Return true if v16 is a valid 16-bit shifted immediate.  */
414static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
415{
416    if (v16 == (v16 & 0xff)) {
417        *cmode = 0x8;
418        *imm8 = v16 & 0xff;
419        return true;
420    } else if (v16 == (v16 & 0xff00)) {
421        *cmode = 0xa;
422        *imm8 = v16 >> 8;
423        return true;
424    }
425    return false;
426}
427
428/* Return true if v32 is a valid 32-bit shifted immediate.  */
429static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
430{
431    if (v32 == (v32 & 0xff)) {
432        *cmode = 0x0;
433        *imm8 = v32 & 0xff;
434        return true;
435    } else if (v32 == (v32 & 0xff00)) {
436        *cmode = 0x2;
437        *imm8 = (v32 >> 8) & 0xff;
438        return true;
439    } else if (v32 == (v32 & 0xff0000)) {
440        *cmode = 0x4;
441        *imm8 = (v32 >> 16) & 0xff;
442        return true;
443    } else if (v32 == (v32 & 0xff000000)) {
444        *cmode = 0x6;
445        *imm8 = v32 >> 24;
446        return true;
447    }
448    return false;
449}
450
451/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
452static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
453{
454    if ((v32 & 0xffff00ff) == 0xff) {
455        *cmode = 0xc;
456        *imm8 = (v32 >> 8) & 0xff;
457        return true;
458    } else if ((v32 & 0xff00ffff) == 0xffff) {
459        *cmode = 0xd;
460        *imm8 = (v32 >> 16) & 0xff;
461        return true;
462    }
463    return false;
464}
465
466/*
467 * Return non-zero if v32 can be formed by MOVI+ORR.
468 * Place the parameters for MOVI in (cmode, imm8).
469 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
470 */
471static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
472{
473    int i;
474
475    for (i = 6; i > 0; i -= 2) {
476        /* Mask out one byte we can add with ORR.  */
477        uint32_t tmp = v32 & ~(0xffu << (i * 4));
478        if (is_shimm32(tmp, cmode, imm8) ||
479            is_soimm32(tmp, cmode, imm8)) {
480            break;
481        }
482    }
483    return i;
484}
485
486/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
487static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
488{
489    if (v32 == deposit32(v32, 16, 16, v32)) {
490        return is_shimm16(v32, cmode, imm8);
491    } else {
492        return is_shimm32(v32, cmode, imm8);
493    }
494}
495
496/* Test if a constant matches the constraint.
497 * TODO: define constraints for:
498 *
499 * ldr/str offset:   between -0xfff and 0xfff
500 * ldrh/strh offset: between -0xff and 0xff
501 * mov operand2:     values represented with x << (2 * y), x < 0x100
502 * add, sub, eor...: ditto
503 */
504static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
505{
506    if (ct & TCG_CT_CONST) {
507        return 1;
508    } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
509        return 1;
510    } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
511        return 1;
512    } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
513        return 1;
514    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
515        return 1;
516    }
517
518    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
519    case 0:
520        break;
521    case TCG_CT_CONST_ANDI:
522        val = ~val;
523        /* fallthru */
524    case TCG_CT_CONST_ORRI:
525        if (val == deposit64(val, 32, 32, val)) {
526            int cmode, imm8;
527            return is_shimm1632(val, &cmode, &imm8);
528        }
529        break;
530    default:
531        /* Both bits should not be set for the same insn.  */
532        g_assert_not_reached();
533    }
534
535    return 0;
536}
537
538static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
539{
540    tcg_out32(s, (cond << 28) | INSN_B |
541                    (((offset - 8) >> 2) & 0x00ffffff));
542}
543
544static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
545{
546    tcg_out32(s, (cond << 28) | 0x0b000000 |
547                    (((offset - 8) >> 2) & 0x00ffffff));
548}
549
550static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
551{
552    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
553}
554
555static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
556{
557    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
558                (((offset - 8) >> 2) & 0x00ffffff));
559}
560
561static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
562                            TCGReg rd, TCGReg rn, TCGReg rm, int shift)
563{
564    tcg_out32(s, (cond << 28) | (0 << 25) | opc |
565                    (rn << 16) | (rd << 12) | shift | rm);
566}
567
568static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
569{
570    /* Simple reg-reg move, optimising out the 'do nothing' case */
571    if (rd != rm) {
572        tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
573    }
574}
575
576static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
577{
578    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
579}
580
581static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
582{
583    /*
584     * Unless the C portion of QEMU is compiled as thumb, we don't need
585     * true BX semantics; merely a branch to an address held in a register.
586     */
587    tcg_out_bx_reg(s, cond, rn);
588}
589
590static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
591                            TCGReg rd, TCGReg rn, int im)
592{
593    tcg_out32(s, (cond << 28) | (1 << 25) | opc |
594                    (rn << 16) | (rd << 12) | im);
595}
596
597static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
598                          TCGReg rn, uint16_t mask)
599{
600    tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
601}
602
603/* Note that this routine is used for both LDR and LDRH formats, so we do
604   not wish to include an immediate shift at this point.  */
605static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
606                            TCGReg rn, TCGReg rm, bool u, bool p, bool w)
607{
608    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
609              | (w << 21) | (rn << 16) | (rt << 12) | rm);
610}
611
612static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
613                            TCGReg rn, int imm8, bool p, bool w)
614{
615    bool u = 1;
616    if (imm8 < 0) {
617        imm8 = -imm8;
618        u = 0;
619    }
620    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
621              (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
622}
623
624static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
625                             TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
626{
627    bool u = 1;
628    if (imm12 < 0) {
629        imm12 = -imm12;
630        u = 0;
631    }
632    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
633              (rn << 16) | (rt << 12) | imm12);
634}
635
636static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
637                            TCGReg rn, int imm12)
638{
639    tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
640}
641
642static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
643                            TCGReg rn, int imm12)
644{
645    tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
646}
647
648static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
649                           TCGReg rn, TCGReg rm)
650{
651    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
652}
653
654static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
655                           TCGReg rn, TCGReg rm)
656{
657    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
658}
659
660static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
661                           TCGReg rn, int imm8)
662{
663    tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
664}
665
666static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
667                           TCGReg rn, TCGReg rm)
668{
669    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
670}
671
672static void __attribute__((unused))
673tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
674{
675    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
676}
677
678static void __attribute__((unused))
679tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8)
680{
681    tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
682}
683
684static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
685                           TCGReg rn, TCGReg rm)
686{
687    tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
688}
689
690/* Register pre-increment with base writeback.  */
691static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
692                             TCGReg rn, TCGReg rm)
693{
694    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
695}
696
697static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
698                             TCGReg rn, TCGReg rm)
699{
700    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
701}
702
703static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
704                            TCGReg rn, int imm8)
705{
706    tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
707}
708
709static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
710                           TCGReg rn, int imm8)
711{
712    tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
713}
714
715static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
716                            TCGReg rn, TCGReg rm)
717{
718    tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
719}
720
721static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
722                           TCGReg rn, TCGReg rm)
723{
724    tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
725}
726
727static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
728                            TCGReg rn, int imm8)
729{
730    tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
731}
732
733static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
734                            TCGReg rn, TCGReg rm)
735{
736    tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
737}
738
739static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
740                           TCGReg rn, int imm12)
741{
742    tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
743}
744
745static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
746                           TCGReg rn, int imm12)
747{
748    tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
749}
750
751static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
752                          TCGReg rn, TCGReg rm)
753{
754    tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
755}
756
757static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
758                          TCGReg rn, TCGReg rm)
759{
760    tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
761}
762
763static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
764                           TCGReg rn, int imm8)
765{
766    tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
767}
768
769static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
770                           TCGReg rn, TCGReg rm)
771{
772    tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
773}
774
775static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
776                              TCGReg rd, uint32_t arg)
777{
778    new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
779    tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
780}
781
782static void tcg_out_movi32(TCGContext *s, ARMCond cond,
783                           TCGReg rd, uint32_t arg)
784{
785    int imm12, diff, opc, sh1, sh2;
786    uint32_t tt0, tt1, tt2;
787
788    /* Check a single MOV/MVN before anything else.  */
789    imm12 = encode_imm(arg);
790    if (imm12 >= 0) {
791        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
792        return;
793    }
794    imm12 = encode_imm(~arg);
795    if (imm12 >= 0) {
796        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
797        return;
798    }
799
800    /* Check for a pc-relative address.  This will usually be the TB,
801       or within the TB, which is immediately before the code block.  */
802    diff = tcg_pcrel_diff(s, (void *)arg) - 8;
803    if (diff >= 0) {
804        imm12 = encode_imm(diff);
805        if (imm12 >= 0) {
806            tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
807            return;
808        }
809    } else {
810        imm12 = encode_imm(-diff);
811        if (imm12 >= 0) {
812            tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
813            return;
814        }
815    }
816
817    /* Use movw + movt.  */
818    if (use_armv7_instructions) {
819        /* movw */
820        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
821                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
822        if (arg & 0xffff0000) {
823            /* movt */
824            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
825                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
826        }
827        return;
828    }
829
830    /* Look for sequences of two insns.  If we have lots of 1's, we can
831       shorten the sequence by beginning with mvn and then clearing
832       higher bits with eor.  */
833    tt0 = arg;
834    opc = ARITH_MOV;
835    if (ctpop32(arg) > 16) {
836        tt0 = ~arg;
837        opc = ARITH_MVN;
838    }
839    sh1 = ctz32(tt0) & ~1;
840    tt1 = tt0 & ~(0xff << sh1);
841    sh2 = ctz32(tt1) & ~1;
842    tt2 = tt1 & ~(0xff << sh2);
843    if (tt2 == 0) {
844        int rot;
845
846        rot = ((32 - sh1) << 7) & 0xf00;
847        tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
848        rot = ((32 - sh2) << 7) & 0xf00;
849        tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
850                        ((tt0 >> sh2) & 0xff) | rot);
851        return;
852    }
853
854    /* Otherwise, drop it into the constant pool.  */
855    tcg_out_movi_pool(s, cond, rd, arg);
856}
857
858/*
859 * Emit either the reg,imm or reg,reg form of a data-processing insn.
860 * rhs must satisfy the "rI" constraint.
861 */
862static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
863                           TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
864{
865    if (rhs_is_const) {
866        tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
867    } else {
868        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
869    }
870}
871
872/*
873 * Emit either the reg,imm or reg,reg form of a data-processing insn.
874 * rhs must satisfy the "rIK" constraint.
875 */
876static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
877                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
878                            bool rhs_is_const)
879{
880    if (rhs_is_const) {
881        int imm12 = encode_imm(rhs);
882        if (imm12 < 0) {
883            imm12 = encode_imm_nofail(~rhs);
884            opc = opinv;
885        }
886        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
887    } else {
888        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
889    }
890}
891
892static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
893                            ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
894                            bool rhs_is_const)
895{
896    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
897     * rhs must satisfy the "rIN" constraint.
898     */
899    if (rhs_is_const) {
900        int imm12 = encode_imm(rhs);
901        if (imm12 < 0) {
902            imm12 = encode_imm_nofail(-rhs);
903            opc = opneg;
904        }
905        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
906    } else {
907        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
908    }
909}
910
911static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
912                          TCGReg rn, TCGReg rm)
913{
914    /* mul */
915    tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
916}
917
918static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
919                            TCGReg rd1, TCGReg rn, TCGReg rm)
920{
921    /* umull */
922    tcg_out32(s, (cond << 28) | 0x00800090 |
923              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
924}
925
926static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
927                            TCGReg rd1, TCGReg rn, TCGReg rm)
928{
929    /* smull */
930    tcg_out32(s, (cond << 28) | 0x00c00090 |
931              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
932}
933
934static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
935                         TCGReg rd, TCGReg rn, TCGReg rm)
936{
937    tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
938}
939
940static void tcg_out_udiv(TCGContext *s, ARMCond cond,
941                         TCGReg rd, TCGReg rn, TCGReg rm)
942{
943    tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
944}
945
946static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
947{
948    /* sxtb */
949    tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
950}
951
952static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
953{
954    tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
955}
956
957static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
958{
959    /* sxth */
960    tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
961}
962
963static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
964{
965    /* uxth */
966    tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn);
967}
968
969static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
970{
971    g_assert_not_reached();
972}
973
974static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
975{
976    g_assert_not_reached();
977}
978
979static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
980{
981    g_assert_not_reached();
982}
983
984static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
985{
986    g_assert_not_reached();
987}
988
989static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
990{
991    g_assert_not_reached();
992}
993
994static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
995                            TCGReg rd, TCGReg rn, int flags)
996{
997    if (flags & TCG_BSWAP_OS) {
998        /* revsh */
999        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1000        return;
1001    }
1002
1003    /* rev16 */
1004    tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1005    if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1006        /* uxth */
1007        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
1008    }
1009}
1010
1011static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1012{
1013    /* rev */
1014    tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1015}
1016
1017static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1018                            TCGArg a1, int ofs, int len, bool const_a1)
1019{
1020    if (const_a1) {
1021        /* bfi becomes bfc with rn == 15.  */
1022        a1 = 15;
1023    }
1024    /* bfi/bfc */
1025    tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1026              | (ofs << 7) | ((ofs + len - 1) << 16));
1027}
1028
1029static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1030                            TCGReg rn, int ofs, int len)
1031{
1032    /* ubfx */
1033    tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1034              | (ofs << 7) | ((len - 1) << 16));
1035}
1036
1037static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1038                             TCGReg rn, int ofs, int len)
1039{
1040    /* sbfx */
1041    tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1042              | (ofs << 7) | ((len - 1) << 16));
1043}
1044
1045static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1046                          TCGReg rd, TCGReg rn, int32_t offset)
1047{
1048    if (offset > 0xfff || offset < -0xfff) {
1049        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1050        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1051    } else
1052        tcg_out_ld32_12(s, cond, rd, rn, offset);
1053}
1054
1055static void tcg_out_st32(TCGContext *s, ARMCond cond,
1056                         TCGReg rd, TCGReg rn, int32_t offset)
1057{
1058    if (offset > 0xfff || offset < -0xfff) {
1059        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1060        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1061    } else
1062        tcg_out_st32_12(s, cond, rd, rn, offset);
1063}
1064
1065static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1066                          TCGReg rd, TCGReg rn, int32_t offset)
1067{
1068    if (offset > 0xff || offset < -0xff) {
1069        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1070        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1071    } else
1072        tcg_out_ld16u_8(s, cond, rd, rn, offset);
1073}
1074
1075static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1076                          TCGReg rd, TCGReg rn, int32_t offset)
1077{
1078    if (offset > 0xff || offset < -0xff) {
1079        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1080        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1081    } else
1082        tcg_out_ld16s_8(s, cond, rd, rn, offset);
1083}
1084
1085static void tcg_out_st16(TCGContext *s, ARMCond cond,
1086                         TCGReg rd, TCGReg rn, int32_t offset)
1087{
1088    if (offset > 0xff || offset < -0xff) {
1089        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1090        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1091    } else
1092        tcg_out_st16_8(s, cond, rd, rn, offset);
1093}
1094
1095static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1096                         TCGReg rd, TCGReg rn, int32_t offset)
1097{
1098    if (offset > 0xfff || offset < -0xfff) {
1099        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1100        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1101    } else
1102        tcg_out_ld8_12(s, cond, rd, rn, offset);
1103}
1104
1105static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1106                         TCGReg rd, TCGReg rn, int32_t offset)
1107{
1108    if (offset > 0xff || offset < -0xff) {
1109        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1110        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1111    } else
1112        tcg_out_ld8s_8(s, cond, rd, rn, offset);
1113}
1114
1115static void tcg_out_st8(TCGContext *s, ARMCond cond,
1116                        TCGReg rd, TCGReg rn, int32_t offset)
1117{
1118    if (offset > 0xfff || offset < -0xfff) {
1119        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1120        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1121    } else
1122        tcg_out_st8_12(s, cond, rd, rn, offset);
1123}
1124
1125/*
1126 * The _goto case is normally between TBs within the same code buffer, and
1127 * with the code buffer limited to 16MB we wouldn't need the long case.
1128 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1129 */
1130static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1131{
1132    intptr_t addri = (intptr_t)addr;
1133    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1134    bool arm_mode = !(addri & 1);
1135
1136    if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1137        tcg_out_b_imm(s, cond, disp);
1138        return;
1139    }
1140
1141    /* LDR is interworking from v5t. */
1142    tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1143}
1144
1145/*
1146 * The call case is mostly used for helpers - so it's not unreasonable
1147 * for them to be beyond branch range.
1148 */
1149static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
1150{
1151    intptr_t addri = (intptr_t)addr;
1152    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1153    bool arm_mode = !(addri & 1);
1154
1155    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1156        if (arm_mode) {
1157            tcg_out_bl_imm(s, COND_AL, disp);
1158        } else {
1159            tcg_out_blx_imm(s, disp);
1160        }
1161        return;
1162    }
1163
1164    tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1165    tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1166}
1167
1168static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
1169                         const TCGHelperInfo *info)
1170{
1171    tcg_out_call_int(s, addr);
1172}
1173
1174static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1175{
1176    if (l->has_value) {
1177        tcg_out_goto(s, cond, l->u.value_ptr);
1178    } else {
1179        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1180        tcg_out_b_imm(s, cond, 0);
1181    }
1182}
1183
1184static void tcg_out_mb(TCGContext *s, TCGArg a0)
1185{
1186    if (use_armv7_instructions) {
1187        tcg_out32(s, INSN_DMB_ISH);
1188    } else {
1189        tcg_out32(s, INSN_DMB_MCR);
1190    }
1191}
1192
1193static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1194                            const int *const_args)
1195{
1196    TCGReg al = args[0];
1197    TCGReg ah = args[1];
1198    TCGArg bl = args[2];
1199    TCGArg bh = args[3];
1200    TCGCond cond = args[4];
1201    int const_bl = const_args[2];
1202    int const_bh = const_args[3];
1203
1204    switch (cond) {
1205    case TCG_COND_EQ:
1206    case TCG_COND_NE:
1207    case TCG_COND_LTU:
1208    case TCG_COND_LEU:
1209    case TCG_COND_GTU:
1210    case TCG_COND_GEU:
1211        /*
1212         * We perform a conditional comparison.  If the high half is
1213         * equal, then overwrite the flags with the comparison of the
1214         * low half.  The resulting flags cover the whole.
1215         */
1216        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1217        tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1218        return cond;
1219
1220    case TCG_COND_LT:
1221    case TCG_COND_GE:
1222        /* We perform a double-word subtraction and examine the result.
1223           We do not actually need the result of the subtract, so the
1224           low part "subtract" is a compare.  For the high half we have
1225           no choice but to compute into a temporary.  */
1226        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1227        tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1228                       TCG_REG_TMP, ah, bh, const_bh);
1229        return cond;
1230
1231    case TCG_COND_LE:
1232    case TCG_COND_GT:
1233        /* Similar, but with swapped arguments, via reversed subtract.  */
1234        tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1235                       TCG_REG_TMP, al, bl, const_bl);
1236        tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1237                       TCG_REG_TMP, ah, bh, const_bh);
1238        return tcg_swap_cond(cond);
1239
1240    default:
1241        g_assert_not_reached();
1242    }
1243}
1244
1245/*
1246 * Note that TCGReg references Q-registers.
1247 * Q-regno = 2 * D-regno, so shift left by 1 while inserting.
1248 */
1249static uint32_t encode_vd(TCGReg rd)
1250{
1251    tcg_debug_assert(rd >= TCG_REG_Q0);
1252    return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1253}
1254
1255static uint32_t encode_vn(TCGReg rn)
1256{
1257    tcg_debug_assert(rn >= TCG_REG_Q0);
1258    return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1259}
1260
1261static uint32_t encode_vm(TCGReg rm)
1262{
1263    tcg_debug_assert(rm >= TCG_REG_Q0);
1264    return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1265}
1266
1267static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1268                          TCGReg d, TCGReg m)
1269{
1270    tcg_out32(s, insn | (vece << 18) | (q << 6) |
1271              encode_vd(d) | encode_vm(m));
1272}
1273
1274static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1275                          TCGReg d, TCGReg n, TCGReg m)
1276{
1277    tcg_out32(s, insn | (vece << 20) | (q << 6) |
1278              encode_vd(d) | encode_vn(n) | encode_vm(m));
1279}
1280
1281static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1282                          int q, int op, int cmode, uint8_t imm8)
1283{
1284    tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1285              | (cmode << 8) | extract32(imm8, 0, 4)
1286              | (extract32(imm8, 4, 3) << 16)
1287              | (extract32(imm8, 7, 1) << 24));
1288}
1289
1290static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1291                            TCGReg rd, TCGReg rm, int l_imm6)
1292{
1293    tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1294              (extract32(l_imm6, 6, 1) << 7) |
1295              (extract32(l_imm6, 0, 6) << 16));
1296}
1297
1298static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1299                          TCGReg rd, TCGReg rn, int offset)
1300{
1301    if (offset != 0) {
1302        if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1303            tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1304                            TCG_REG_TMP, rn, offset, true);
1305        } else {
1306            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1307            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1308                            TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1309        }
1310        rn = TCG_REG_TMP;
1311    }
1312    tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1313}
1314
1315typedef struct {
1316    ARMCond cond;
1317    TCGReg base;
1318    int index;
1319    bool index_scratch;
1320    TCGAtomAlign aa;
1321} HostAddress;
1322
1323bool tcg_target_has_memory_bswap(MemOp memop)
1324{
1325    return false;
1326}
1327
1328static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
1329{
1330    /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */
1331    return TCG_REG_R14;
1332}
1333
1334static const TCGLdstHelperParam ldst_helper_param = {
1335    .ra_gen = ldst_ra_gen,
1336    .ntmp = 1,
1337    .tmp = { TCG_REG_TMP },
1338};
1339
1340static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1341{
1342    MemOp opc = get_memop(lb->oi);
1343
1344    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1345        return false;
1346    }
1347
1348    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1349    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1350    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1351
1352    tcg_out_goto(s, COND_AL, lb->raddr);
1353    return true;
1354}
1355
1356static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1357{
1358    MemOp opc = get_memop(lb->oi);
1359
1360    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1361        return false;
1362    }
1363
1364    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1365
1366    /* Tail-call to the helper, which will return to the fast path.  */
1367    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1368    return true;
1369}
1370
1371/* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
1372#define MIN_TLB_MASK_TABLE_OFS  -256
1373
1374static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1375                                           TCGReg addrlo, TCGReg addrhi,
1376                                           MemOpIdx oi, bool is_ld)
1377{
1378    TCGLabelQemuLdst *ldst = NULL;
1379    MemOp opc = get_memop(oi);
1380    unsigned a_mask;
1381
1382    if (tcg_use_softmmu) {
1383        *h = (HostAddress){
1384            .cond = COND_AL,
1385            .base = addrlo,
1386            .index = TCG_REG_R1,
1387            .index_scratch = true,
1388        };
1389    } else {
1390        *h = (HostAddress){
1391            .cond = COND_AL,
1392            .base = addrlo,
1393            .index = guest_base ? TCG_REG_GUEST_BASE : -1,
1394            .index_scratch = false,
1395        };
1396    }
1397
1398    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1399    a_mask = (1 << h->aa.align) - 1;
1400
1401    if (tcg_use_softmmu) {
1402        int mem_index = get_mmuidx(oi);
1403        int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1404                            : offsetof(CPUTLBEntry, addr_write);
1405        int fast_off = tlb_mask_table_ofs(s, mem_index);
1406        unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
1407        TCGReg t_addr;
1408
1409        ldst = new_ldst_label(s);
1410        ldst->is_ld = is_ld;
1411        ldst->oi = oi;
1412        ldst->addrlo_reg = addrlo;
1413        ldst->addrhi_reg = addrhi;
1414
1415        /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}.  */
1416        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1417        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1418        tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1419
1420        /* Extract the tlb index from the address into R0.  */
1421        tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1422                        SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
1423
1424        /*
1425         * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1426         * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1427         */
1428        QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1429        if (cmp_off == 0) {
1430            if (s->addr_type == TCG_TYPE_I32) {
1431                tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2,
1432                                 TCG_REG_R1, TCG_REG_R0);
1433            } else {
1434                tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2,
1435                                 TCG_REG_R1, TCG_REG_R0);
1436            }
1437        } else {
1438            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1439                            TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1440            if (s->addr_type == TCG_TYPE_I32) {
1441                tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1442            } else {
1443                tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1444            }
1445        }
1446
1447        /* Load the tlb addend.  */
1448        tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1449                        offsetof(CPUTLBEntry, addend));
1450
1451        /*
1452         * Check alignment, check comparators.
1453         * Do this in 2-4 insns.  Use MOVW for v7, if possible,
1454         * to reduce the number of sequential conditional instructions.
1455         * Almost all guests have at least 4k pages, which means that we need
1456         * to clear at least 9 bits even for an 8-byte memory, which means it
1457         * isn't worth checking for an immediate operand for BIC.
1458         *
1459         * For unaligned accesses, test the page of the last unit of alignment.
1460         * This leaves the least significant alignment bits unchanged, and of
1461         * course must be zero.
1462         */
1463        t_addr = addrlo;
1464        if (a_mask < s_mask) {
1465            t_addr = TCG_REG_R0;
1466            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
1467                            addrlo, s_mask - a_mask);
1468        }
1469        if (use_armv7_instructions && s->page_bits <= 16) {
1470            tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
1471            tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1472                            t_addr, TCG_REG_TMP, 0);
1473            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1474                            TCG_REG_R2, TCG_REG_TMP, 0);
1475        } else {
1476            if (a_mask) {
1477                tcg_debug_assert(a_mask <= 0xff);
1478                tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1479            }
1480            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
1481                            SHIFT_IMM_LSR(s->page_bits));
1482            tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
1483                            0, TCG_REG_R2, TCG_REG_TMP,
1484                            SHIFT_IMM_LSL(s->page_bits));
1485        }
1486
1487        if (s->addr_type != TCG_TYPE_I32) {
1488            tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1489        }
1490    } else if (a_mask) {
1491        ldst = new_ldst_label(s);
1492        ldst->is_ld = is_ld;
1493        ldst->oi = oi;
1494        ldst->addrlo_reg = addrlo;
1495        ldst->addrhi_reg = addrhi;
1496
1497        /* We are expecting alignment to max out at 7 */
1498        tcg_debug_assert(a_mask <= 0xff);
1499        /* tst addr, #mask */
1500        tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1501    }
1502
1503    return ldst;
1504}
1505
1506static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1507                                   TCGReg datahi, HostAddress h)
1508{
1509    TCGReg base;
1510
1511    /* Byte swapping is left to middle-end expansion. */
1512    tcg_debug_assert((opc & MO_BSWAP) == 0);
1513
1514    switch (opc & MO_SSIZE) {
1515    case MO_UB:
1516        if (h.index < 0) {
1517            tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
1518        } else {
1519            tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
1520        }
1521        break;
1522    case MO_SB:
1523        if (h.index < 0) {
1524            tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
1525        } else {
1526            tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
1527        }
1528        break;
1529    case MO_UW:
1530        if (h.index < 0) {
1531            tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
1532        } else {
1533            tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
1534        }
1535        break;
1536    case MO_SW:
1537        if (h.index < 0) {
1538            tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
1539        } else {
1540            tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
1541        }
1542        break;
1543    case MO_UL:
1544        if (h.index < 0) {
1545            tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
1546        } else {
1547            tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
1548        }
1549        break;
1550    case MO_UQ:
1551        /* We used pair allocation for datalo, so already should be aligned. */
1552        tcg_debug_assert((datalo & 1) == 0);
1553        tcg_debug_assert(datahi == datalo + 1);
1554        /* LDRD requires alignment; double-check that. */
1555        if (get_alignment_bits(opc) >= MO_64) {
1556            if (h.index < 0) {
1557                tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
1558                break;
1559            }
1560            /*
1561             * Rm (the second address op) must not overlap Rt or Rt + 1.
1562             * Since datalo is aligned, we can simplify the test via alignment.
1563             * Flip the two address arguments if that works.
1564             */
1565            if ((h.index & ~1) != datalo) {
1566                tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
1567                break;
1568            }
1569            if ((h.base & ~1) != datalo) {
1570                tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
1571                break;
1572            }
1573        }
1574        if (h.index < 0) {
1575            base = h.base;
1576            if (datalo == h.base) {
1577                tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
1578                base = TCG_REG_TMP;
1579            }
1580        } else if (h.index_scratch) {
1581            tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
1582            tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
1583            break;
1584        } else {
1585            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1586                            h.base, h.index, SHIFT_IMM_LSL(0));
1587            base = TCG_REG_TMP;
1588        }
1589        tcg_out_ld32_12(s, h.cond, datalo, base, 0);
1590        tcg_out_ld32_12(s, h.cond, datahi, base, 4);
1591        break;
1592    default:
1593        g_assert_not_reached();
1594    }
1595}
1596
1597static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
1598                            TCGReg addrlo, TCGReg addrhi,
1599                            MemOpIdx oi, TCGType data_type)
1600{
1601    MemOp opc = get_memop(oi);
1602    TCGLabelQemuLdst *ldst;
1603    HostAddress h;
1604
1605    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
1606    if (ldst) {
1607        ldst->type = data_type;
1608        ldst->datalo_reg = datalo;
1609        ldst->datahi_reg = datahi;
1610
1611        /*
1612         * This a conditional BL only to load a pointer within this
1613         * opcode into LR for the slow path.  We will not be using
1614         * the value for a tail call.
1615         */
1616        ldst->label_ptr[0] = s->code_ptr;
1617        tcg_out_bl_imm(s, COND_NE, 0);
1618
1619        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1620        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1621    } else {
1622        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1623    }
1624}
1625
1626static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1627                                   TCGReg datahi, HostAddress h)
1628{
1629    /* Byte swapping is left to middle-end expansion. */
1630    tcg_debug_assert((opc & MO_BSWAP) == 0);
1631
1632    switch (opc & MO_SIZE) {
1633    case MO_8:
1634        if (h.index < 0) {
1635            tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
1636        } else {
1637            tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
1638        }
1639        break;
1640    case MO_16:
1641        if (h.index < 0) {
1642            tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
1643        } else {
1644            tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
1645        }
1646        break;
1647    case MO_32:
1648        if (h.index < 0) {
1649            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1650        } else {
1651            tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
1652        }
1653        break;
1654    case MO_64:
1655        /* We used pair allocation for datalo, so already should be aligned. */
1656        tcg_debug_assert((datalo & 1) == 0);
1657        tcg_debug_assert(datahi == datalo + 1);
1658        /* STRD requires alignment; double-check that. */
1659        if (get_alignment_bits(opc) >= MO_64) {
1660            if (h.index < 0) {
1661                tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
1662            } else {
1663                tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
1664            }
1665        } else if (h.index < 0) {
1666            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1667            tcg_out_st32_12(s, h.cond, datahi, h.base, 4);
1668        } else if (h.index_scratch) {
1669            tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
1670            tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
1671        } else {
1672            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1673                            h.base, h.index, SHIFT_IMM_LSL(0));
1674            tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
1675            tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
1676        }
1677        break;
1678    default:
1679        g_assert_not_reached();
1680    }
1681}
1682
1683static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
1684                            TCGReg addrlo, TCGReg addrhi,
1685                            MemOpIdx oi, TCGType data_type)
1686{
1687    MemOp opc = get_memop(oi);
1688    TCGLabelQemuLdst *ldst;
1689    HostAddress h;
1690
1691    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
1692    if (ldst) {
1693        ldst->type = data_type;
1694        ldst->datalo_reg = datalo;
1695        ldst->datahi_reg = datahi;
1696
1697        h.cond = COND_EQ;
1698        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1699
1700        /* The conditional call is last, as we're going to return here. */
1701        ldst->label_ptr[0] = s->code_ptr;
1702        tcg_out_bl_imm(s, COND_NE, 0);
1703        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1704    } else {
1705        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1706    }
1707}
1708
1709static void tcg_out_epilogue(TCGContext *s);
1710
1711static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
1712{
1713    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
1714    tcg_out_epilogue(s);
1715}
1716
1717static void tcg_out_goto_tb(TCGContext *s, int which)
1718{
1719    uintptr_t i_addr;
1720    intptr_t i_disp;
1721
1722    /* Direct branch will be patched by tb_target_set_jmp_target. */
1723    set_jmp_insn_offset(s, which);
1724    tcg_out32(s, INSN_NOP);
1725
1726    /* When branch is out of range, fall through to indirect. */
1727    i_addr = get_jmp_target_addr(s, which);
1728    i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
1729    tcg_debug_assert(i_disp < 0);
1730    if (i_disp >= -0xfff) {
1731        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
1732    } else {
1733        /*
1734         * The TB is close, but outside the 12 bits addressable by
1735         * the load.  We can extend this to 20 bits with a sub of a
1736         * shifted immediate from pc.
1737         */
1738        int h = -i_disp;
1739        int l = h & 0xfff;
1740
1741        h = encode_imm_nofail(h - l);
1742        tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
1743        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
1744    }
1745    set_jmp_reset_offset(s, which);
1746}
1747
1748void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1749                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1750{
1751    uintptr_t addr = tb->jmp_target_addr[n];
1752    ptrdiff_t offset = addr - (jmp_rx + 8);
1753    tcg_insn_unit insn;
1754
1755    /* Either directly branch, or fall through to indirect branch. */
1756    if (offset == sextract64(offset, 0, 26)) {
1757        /* B <addr> */
1758        insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
1759    } else {
1760        insn = INSN_NOP;
1761    }
1762
1763    qatomic_set((uint32_t *)jmp_rw, insn);
1764    flush_idcache_range(jmp_rx, jmp_rw, 4);
1765}
1766
1767static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1768                       const TCGArg args[TCG_MAX_OP_ARGS],
1769                       const int const_args[TCG_MAX_OP_ARGS])
1770{
1771    TCGArg a0, a1, a2, a3, a4, a5;
1772    int c;
1773
1774    switch (opc) {
1775    case INDEX_op_goto_ptr:
1776        tcg_out_b_reg(s, COND_AL, args[0]);
1777        break;
1778    case INDEX_op_br:
1779        tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
1780        break;
1781
1782    case INDEX_op_ld8u_i32:
1783        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1784        break;
1785    case INDEX_op_ld8s_i32:
1786        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1787        break;
1788    case INDEX_op_ld16u_i32:
1789        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1790        break;
1791    case INDEX_op_ld16s_i32:
1792        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1793        break;
1794    case INDEX_op_ld_i32:
1795        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1796        break;
1797    case INDEX_op_st8_i32:
1798        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1799        break;
1800    case INDEX_op_st16_i32:
1801        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1802        break;
1803    case INDEX_op_st_i32:
1804        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1805        break;
1806
1807    case INDEX_op_movcond_i32:
1808        /* Constraints mean that v2 is always in the same register as dest,
1809         * so we only need to do "if condition passed, move v1 to dest".
1810         */
1811        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1812                        args[1], args[2], const_args[2]);
1813        tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1814                        ARITH_MVN, args[0], 0, args[3], const_args[3]);
1815        break;
1816    case INDEX_op_add_i32:
1817        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1818                        args[0], args[1], args[2], const_args[2]);
1819        break;
1820    case INDEX_op_sub_i32:
1821        if (const_args[1]) {
1822            if (const_args[2]) {
1823                tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1824            } else {
1825                tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1826                               args[0], args[2], args[1], 1);
1827            }
1828        } else {
1829            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1830                            args[0], args[1], args[2], const_args[2]);
1831        }
1832        break;
1833    case INDEX_op_and_i32:
1834        tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1835                        args[0], args[1], args[2], const_args[2]);
1836        break;
1837    case INDEX_op_andc_i32:
1838        tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1839                        args[0], args[1], args[2], const_args[2]);
1840        break;
1841    case INDEX_op_or_i32:
1842        c = ARITH_ORR;
1843        goto gen_arith;
1844    case INDEX_op_xor_i32:
1845        c = ARITH_EOR;
1846        /* Fall through.  */
1847    gen_arith:
1848        tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1849        break;
1850    case INDEX_op_add2_i32:
1851        a0 = args[0], a1 = args[1], a2 = args[2];
1852        a3 = args[3], a4 = args[4], a5 = args[5];
1853        if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1854            a0 = TCG_REG_TMP;
1855        }
1856        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1857                        a0, a2, a4, const_args[4]);
1858        tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1859                        a1, a3, a5, const_args[5]);
1860        tcg_out_mov_reg(s, COND_AL, args[0], a0);
1861        break;
1862    case INDEX_op_sub2_i32:
1863        a0 = args[0], a1 = args[1], a2 = args[2];
1864        a3 = args[3], a4 = args[4], a5 = args[5];
1865        if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1866            a0 = TCG_REG_TMP;
1867        }
1868        if (const_args[2]) {
1869            if (const_args[4]) {
1870                tcg_out_movi32(s, COND_AL, a0, a4);
1871                a4 = a0;
1872            }
1873            tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1874        } else {
1875            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1876                            ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1877        }
1878        if (const_args[3]) {
1879            if (const_args[5]) {
1880                tcg_out_movi32(s, COND_AL, a1, a5);
1881                a5 = a1;
1882            }
1883            tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1884        } else {
1885            tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1886                            a1, a3, a5, const_args[5]);
1887        }
1888        tcg_out_mov_reg(s, COND_AL, args[0], a0);
1889        break;
1890    case INDEX_op_neg_i32:
1891        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1892        break;
1893    case INDEX_op_not_i32:
1894        tcg_out_dat_reg(s, COND_AL,
1895                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1896        break;
1897    case INDEX_op_mul_i32:
1898        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1899        break;
1900    case INDEX_op_mulu2_i32:
1901        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1902        break;
1903    case INDEX_op_muls2_i32:
1904        tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1905        break;
1906    /* XXX: Perhaps args[2] & 0x1f is wrong */
1907    case INDEX_op_shl_i32:
1908        c = const_args[2] ?
1909                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1910        goto gen_shift32;
1911    case INDEX_op_shr_i32:
1912        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1913                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1914        goto gen_shift32;
1915    case INDEX_op_sar_i32:
1916        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1917                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1918        goto gen_shift32;
1919    case INDEX_op_rotr_i32:
1920        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1921                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1922        /* Fall through.  */
1923    gen_shift32:
1924        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1925        break;
1926
1927    case INDEX_op_rotl_i32:
1928        if (const_args[2]) {
1929            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1930                            ((0x20 - args[2]) & 0x1f) ?
1931                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1932                            SHIFT_IMM_LSL(0));
1933        } else {
1934            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
1935            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1936                            SHIFT_REG_ROR(TCG_REG_TMP));
1937        }
1938        break;
1939
1940    case INDEX_op_ctz_i32:
1941        tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
1942        a1 = TCG_REG_TMP;
1943        goto do_clz;
1944
1945    case INDEX_op_clz_i32:
1946        a1 = args[1];
1947    do_clz:
1948        a0 = args[0];
1949        a2 = args[2];
1950        c = const_args[2];
1951        if (c && a2 == 32) {
1952            tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
1953            break;
1954        }
1955        tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1956        tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1957        if (c || a0 != a2) {
1958            tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
1959        }
1960        break;
1961
1962    case INDEX_op_brcond_i32:
1963        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1964                       args[0], args[1], const_args[1]);
1965        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
1966                           arg_label(args[3]));
1967        break;
1968    case INDEX_op_setcond_i32:
1969        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1970                        args[1], args[2], const_args[2]);
1971        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1972                        ARITH_MOV, args[0], 0, 1);
1973        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1974                        ARITH_MOV, args[0], 0, 0);
1975        break;
1976    case INDEX_op_negsetcond_i32:
1977        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1978                        args[1], args[2], const_args[2]);
1979        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1980                        ARITH_MVN, args[0], 0, 0);
1981        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1982                        ARITH_MOV, args[0], 0, 0);
1983        break;
1984
1985    case INDEX_op_brcond2_i32:
1986        c = tcg_out_cmp2(s, args, const_args);
1987        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
1988        break;
1989    case INDEX_op_setcond2_i32:
1990        c = tcg_out_cmp2(s, args + 1, const_args + 1);
1991        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
1992        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
1993                        ARITH_MOV, args[0], 0, 0);
1994        break;
1995
1996    case INDEX_op_qemu_ld_a32_i32:
1997        tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
1998        break;
1999    case INDEX_op_qemu_ld_a64_i32:
2000        tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
2001                        args[3], TCG_TYPE_I32);
2002        break;
2003    case INDEX_op_qemu_ld_a32_i64:
2004        tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
2005                        args[3], TCG_TYPE_I64);
2006        break;
2007    case INDEX_op_qemu_ld_a64_i64:
2008        tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
2009                        args[4], TCG_TYPE_I64);
2010        break;
2011
2012    case INDEX_op_qemu_st_a32_i32:
2013        tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
2014        break;
2015    case INDEX_op_qemu_st_a64_i32:
2016        tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
2017                        args[3], TCG_TYPE_I32);
2018        break;
2019    case INDEX_op_qemu_st_a32_i64:
2020        tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
2021                        args[3], TCG_TYPE_I64);
2022        break;
2023    case INDEX_op_qemu_st_a64_i64:
2024        tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
2025                        args[4], TCG_TYPE_I64);
2026        break;
2027
2028    case INDEX_op_bswap16_i32:
2029        tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2030        break;
2031    case INDEX_op_bswap32_i32:
2032        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2033        break;
2034
2035    case INDEX_op_deposit_i32:
2036        tcg_out_deposit(s, COND_AL, args[0], args[2],
2037                        args[3], args[4], const_args[2]);
2038        break;
2039    case INDEX_op_extract_i32:
2040        tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2041        break;
2042    case INDEX_op_sextract_i32:
2043        tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2044        break;
2045    case INDEX_op_extract2_i32:
2046        /* ??? These optimization vs zero should be generic.  */
2047        /* ??? But we can't substitute 2 for 1 in the opcode stream yet.  */
2048        if (const_args[1]) {
2049            if (const_args[2]) {
2050                tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2051            } else {
2052                tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2053                                args[2], SHIFT_IMM_LSL(32 - args[3]));
2054            }
2055        } else if (const_args[2]) {
2056            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2057                            args[1], SHIFT_IMM_LSR(args[3]));
2058        } else {
2059            /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
2060            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2061                            args[2], SHIFT_IMM_LSL(32 - args[3]));
2062            tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2063                            args[1], SHIFT_IMM_LSR(args[3]));
2064        }
2065        break;
2066
2067    case INDEX_op_div_i32:
2068        tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2069        break;
2070    case INDEX_op_divu_i32:
2071        tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2072        break;
2073
2074    case INDEX_op_mb:
2075        tcg_out_mb(s, args[0]);
2076        break;
2077
2078    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2079    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2080    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2081    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2082    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2083    case INDEX_op_ext8u_i32:
2084    case INDEX_op_ext16s_i32:
2085    case INDEX_op_ext16u_i32:
2086    default:
2087        g_assert_not_reached();
2088    }
2089}
2090
2091static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2092{
2093    switch (op) {
2094    case INDEX_op_goto_ptr:
2095        return C_O0_I1(r);
2096
2097    case INDEX_op_ld8u_i32:
2098    case INDEX_op_ld8s_i32:
2099    case INDEX_op_ld16u_i32:
2100    case INDEX_op_ld16s_i32:
2101    case INDEX_op_ld_i32:
2102    case INDEX_op_neg_i32:
2103    case INDEX_op_not_i32:
2104    case INDEX_op_bswap16_i32:
2105    case INDEX_op_bswap32_i32:
2106    case INDEX_op_ext8s_i32:
2107    case INDEX_op_ext16s_i32:
2108    case INDEX_op_ext16u_i32:
2109    case INDEX_op_extract_i32:
2110    case INDEX_op_sextract_i32:
2111        return C_O1_I1(r, r);
2112
2113    case INDEX_op_st8_i32:
2114    case INDEX_op_st16_i32:
2115    case INDEX_op_st_i32:
2116        return C_O0_I2(r, r);
2117
2118    case INDEX_op_add_i32:
2119    case INDEX_op_sub_i32:
2120    case INDEX_op_setcond_i32:
2121    case INDEX_op_negsetcond_i32:
2122        return C_O1_I2(r, r, rIN);
2123
2124    case INDEX_op_and_i32:
2125    case INDEX_op_andc_i32:
2126    case INDEX_op_clz_i32:
2127    case INDEX_op_ctz_i32:
2128        return C_O1_I2(r, r, rIK);
2129
2130    case INDEX_op_mul_i32:
2131    case INDEX_op_div_i32:
2132    case INDEX_op_divu_i32:
2133        return C_O1_I2(r, r, r);
2134
2135    case INDEX_op_mulu2_i32:
2136    case INDEX_op_muls2_i32:
2137        return C_O2_I2(r, r, r, r);
2138
2139    case INDEX_op_or_i32:
2140    case INDEX_op_xor_i32:
2141        return C_O1_I2(r, r, rI);
2142
2143    case INDEX_op_shl_i32:
2144    case INDEX_op_shr_i32:
2145    case INDEX_op_sar_i32:
2146    case INDEX_op_rotl_i32:
2147    case INDEX_op_rotr_i32:
2148        return C_O1_I2(r, r, ri);
2149
2150    case INDEX_op_brcond_i32:
2151        return C_O0_I2(r, rIN);
2152    case INDEX_op_deposit_i32:
2153        return C_O1_I2(r, 0, rZ);
2154    case INDEX_op_extract2_i32:
2155        return C_O1_I2(r, rZ, rZ);
2156    case INDEX_op_movcond_i32:
2157        return C_O1_I4(r, r, rIN, rIK, 0);
2158    case INDEX_op_add2_i32:
2159        return C_O2_I4(r, r, r, r, rIN, rIK);
2160    case INDEX_op_sub2_i32:
2161        return C_O2_I4(r, r, rI, rI, rIN, rIK);
2162    case INDEX_op_brcond2_i32:
2163        return C_O0_I4(r, r, rI, rI);
2164    case INDEX_op_setcond2_i32:
2165        return C_O1_I4(r, r, r, rI, rI);
2166
2167    case INDEX_op_qemu_ld_a32_i32:
2168        return C_O1_I1(r, q);
2169    case INDEX_op_qemu_ld_a64_i32:
2170        return C_O1_I2(r, q, q);
2171    case INDEX_op_qemu_ld_a32_i64:
2172        return C_O2_I1(e, p, q);
2173    case INDEX_op_qemu_ld_a64_i64:
2174        return C_O2_I2(e, p, q, q);
2175    case INDEX_op_qemu_st_a32_i32:
2176        return C_O0_I2(q, q);
2177    case INDEX_op_qemu_st_a64_i32:
2178        return C_O0_I3(q, q, q);
2179    case INDEX_op_qemu_st_a32_i64:
2180        return C_O0_I3(Q, p, q);
2181    case INDEX_op_qemu_st_a64_i64:
2182        return C_O0_I4(Q, p, q, q);
2183
2184    case INDEX_op_st_vec:
2185        return C_O0_I2(w, r);
2186    case INDEX_op_ld_vec:
2187    case INDEX_op_dupm_vec:
2188        return C_O1_I1(w, r);
2189    case INDEX_op_dup_vec:
2190        return C_O1_I1(w, wr);
2191    case INDEX_op_abs_vec:
2192    case INDEX_op_neg_vec:
2193    case INDEX_op_not_vec:
2194    case INDEX_op_shli_vec:
2195    case INDEX_op_shri_vec:
2196    case INDEX_op_sari_vec:
2197        return C_O1_I1(w, w);
2198    case INDEX_op_dup2_vec:
2199    case INDEX_op_add_vec:
2200    case INDEX_op_mul_vec:
2201    case INDEX_op_smax_vec:
2202    case INDEX_op_smin_vec:
2203    case INDEX_op_ssadd_vec:
2204    case INDEX_op_sssub_vec:
2205    case INDEX_op_sub_vec:
2206    case INDEX_op_umax_vec:
2207    case INDEX_op_umin_vec:
2208    case INDEX_op_usadd_vec:
2209    case INDEX_op_ussub_vec:
2210    case INDEX_op_xor_vec:
2211    case INDEX_op_arm_sshl_vec:
2212    case INDEX_op_arm_ushl_vec:
2213        return C_O1_I2(w, w, w);
2214    case INDEX_op_arm_sli_vec:
2215        return C_O1_I2(w, 0, w);
2216    case INDEX_op_or_vec:
2217    case INDEX_op_andc_vec:
2218        return C_O1_I2(w, w, wO);
2219    case INDEX_op_and_vec:
2220    case INDEX_op_orc_vec:
2221        return C_O1_I2(w, w, wV);
2222    case INDEX_op_cmp_vec:
2223        return C_O1_I2(w, w, wZ);
2224    case INDEX_op_bitsel_vec:
2225        return C_O1_I3(w, w, w, w);
2226    default:
2227        g_assert_not_reached();
2228    }
2229}
2230
2231static void tcg_target_init(TCGContext *s)
2232{
2233    /*
2234     * Only probe for the platform and capabilities if we haven't already
2235     * determined maximum values at compile time.
2236     */
2237#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2238    {
2239        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2240#ifndef use_idiv_instructions
2241        use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2242#endif
2243#ifndef use_neon_instructions
2244        use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2245#endif
2246    }
2247#endif
2248
2249    if (__ARM_ARCH < 7) {
2250        const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2251        if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2252            arm_arch = pl[1] - '0';
2253        }
2254
2255        if (arm_arch < 6) {
2256            error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
2257            exit(EXIT_FAILURE);
2258        }
2259    }
2260
2261    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2262
2263    tcg_target_call_clobber_regs = 0;
2264    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2265    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2266    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2267    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2268    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2269    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2270
2271    if (use_neon_instructions) {
2272        tcg_target_available_regs[TCG_TYPE_V64]  = ALL_VECTOR_REGS;
2273        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2274
2275        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2276        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2277        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2278        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2279        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2280        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2281        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2282        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2283        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2284        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2285        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2286        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2287    }
2288
2289    s->reserved_regs = 0;
2290    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2291    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2292    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2293    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2294}
2295
2296static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2297                       TCGReg arg1, intptr_t arg2)
2298{
2299    switch (type) {
2300    case TCG_TYPE_I32:
2301        tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2302        return;
2303    case TCG_TYPE_V64:
2304        /* regs 1; size 8; align 8 */
2305        tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2306        return;
2307    case TCG_TYPE_V128:
2308        /*
2309         * We have only 8-byte alignment for the stack per the ABI.
2310         * Rather than dynamically re-align the stack, it's easier
2311         * to simply not request alignment beyond that.  So:
2312         * regs 2; size 8; align 8
2313         */
2314        tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
2315        return;
2316    default:
2317        g_assert_not_reached();
2318    }
2319}
2320
2321static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2322                       TCGReg arg1, intptr_t arg2)
2323{
2324    switch (type) {
2325    case TCG_TYPE_I32:
2326        tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2327        return;
2328    case TCG_TYPE_V64:
2329        /* regs 1; size 8; align 8 */
2330        tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2331        return;
2332    case TCG_TYPE_V128:
2333        /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
2334        tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
2335        return;
2336    default:
2337        g_assert_not_reached();
2338    }
2339}
2340
2341static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2342                        TCGReg base, intptr_t ofs)
2343{
2344    return false;
2345}
2346
2347static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2348{
2349    if (ret == arg) {
2350        return true;
2351    }
2352    switch (type) {
2353    case TCG_TYPE_I32:
2354        if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2355            tcg_out_mov_reg(s, COND_AL, ret, arg);
2356            return true;
2357        }
2358        return false;
2359
2360    case TCG_TYPE_V64:
2361    case TCG_TYPE_V128:
2362        /* "VMOV D,N" is an alias for "VORR D,N,N". */
2363        tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2364        return true;
2365
2366    default:
2367        g_assert_not_reached();
2368    }
2369}
2370
2371static void tcg_out_movi(TCGContext *s, TCGType type,
2372                         TCGReg ret, tcg_target_long arg)
2373{
2374    tcg_debug_assert(type == TCG_TYPE_I32);
2375    tcg_debug_assert(ret < TCG_REG_Q0);
2376    tcg_out_movi32(s, COND_AL, ret, arg);
2377}
2378
2379static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
2380{
2381    return false;
2382}
2383
2384static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
2385                             tcg_target_long imm)
2386{
2387    int enc, opc = ARITH_ADD;
2388
2389    /* All of the easiest immediates to encode are positive. */
2390    if (imm < 0) {
2391        imm = -imm;
2392        opc = ARITH_SUB;
2393    }
2394    enc = encode_imm(imm);
2395    if (enc >= 0) {
2396        tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
2397    } else {
2398        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
2399        tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
2400                        TCG_REG_TMP, SHIFT_IMM_LSL(0));
2401    }
2402}
2403
2404/* Type is always V128, with I64 elements.  */
2405static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2406{
2407    /* Move high element into place first. */
2408    /* VMOV Dd+1, Ds */
2409    tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2410    /* Move low element into place; tcg_out_mov will check for nop. */
2411    tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2412}
2413
2414static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2415                            TCGReg rd, TCGReg rs)
2416{
2417    int q = type - TCG_TYPE_V64;
2418
2419    if (vece == MO_64) {
2420        if (type == TCG_TYPE_V128) {
2421            tcg_out_dup2_vec(s, rd, rs, rs);
2422        } else {
2423            tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2424        }
2425    } else if (rs < TCG_REG_Q0) {
2426        int b = (vece == MO_8);
2427        int e = (vece == MO_16);
2428        tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2429                  encode_vn(rd) | (rs << 12));
2430    } else {
2431        int imm4 = 1 << vece;
2432        tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2433                  encode_vd(rd) | encode_vm(rs));
2434    }
2435    return true;
2436}
2437
2438static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2439                             TCGReg rd, TCGReg base, intptr_t offset)
2440{
2441    if (vece == MO_64) {
2442        tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2443        if (type == TCG_TYPE_V128) {
2444            tcg_out_dup2_vec(s, rd, rd, rd);
2445        }
2446    } else {
2447        int q = type - TCG_TYPE_V64;
2448        tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2449                      rd, base, offset);
2450    }
2451    return true;
2452}
2453
2454static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2455                             TCGReg rd, int64_t v64)
2456{
2457    int q = type - TCG_TYPE_V64;
2458    int cmode, imm8, i;
2459
2460    /* Test all bytes equal first.  */
2461    if (vece == MO_8) {
2462        tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2463        return;
2464    }
2465
2466    /*
2467     * Test all bytes 0x00 or 0xff second.  This can match cases that
2468     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2469     */
2470    for (i = imm8 = 0; i < 8; i++) {
2471        uint8_t byte = v64 >> (i * 8);
2472        if (byte == 0xff) {
2473            imm8 |= 1 << i;
2474        } else if (byte != 0) {
2475            goto fail_bytes;
2476        }
2477    }
2478    tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2479    return;
2480 fail_bytes:
2481
2482    /*
2483     * Tests for various replications.  For each element width, if we
2484     * cannot find an expansion there's no point checking a larger
2485     * width because we already know by replication it cannot match.
2486     */
2487    if (vece == MO_16) {
2488        uint16_t v16 = v64;
2489
2490        if (is_shimm16(v16, &cmode, &imm8)) {
2491            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2492            return;
2493        }
2494        if (is_shimm16(~v16, &cmode, &imm8)) {
2495            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2496            return;
2497        }
2498
2499        /*
2500         * Otherwise, all remaining constants can be loaded in two insns:
2501         * rd = v16 & 0xff, rd |= v16 & 0xff00.
2502         */
2503        tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2504        tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8);   /* VORRI */
2505        return;
2506    }
2507
2508    if (vece == MO_32) {
2509        uint32_t v32 = v64;
2510
2511        if (is_shimm32(v32, &cmode, &imm8) ||
2512            is_soimm32(v32, &cmode, &imm8)) {
2513            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2514            return;
2515        }
2516        if (is_shimm32(~v32, &cmode, &imm8) ||
2517            is_soimm32(~v32, &cmode, &imm8)) {
2518            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2519            return;
2520        }
2521
2522        /*
2523         * Restrict the set of constants to those we can load with
2524         * two instructions.  Others we load from the pool.
2525         */
2526        i = is_shimm32_pair(v32, &cmode, &imm8);
2527        if (i) {
2528            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2529            tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2530            return;
2531        }
2532        i = is_shimm32_pair(~v32, &cmode, &imm8);
2533        if (i) {
2534            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2535            tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2536            return;
2537        }
2538    }
2539
2540    /*
2541     * As a last resort, load from the constant pool.
2542     */
2543    if (!q || vece == MO_64) {
2544        new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2545        /* VLDR Dd, [pc + offset] */
2546        tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2547        if (q) {
2548            tcg_out_dup2_vec(s, rd, rd, rd);
2549        }
2550    } else {
2551        new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2552        /* add tmp, pc, offset */
2553        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2554        tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2555    }
2556}
2557
2558static const ARMInsn vec_cmp_insn[16] = {
2559    [TCG_COND_EQ] = INSN_VCEQ,
2560    [TCG_COND_GT] = INSN_VCGT,
2561    [TCG_COND_GE] = INSN_VCGE,
2562    [TCG_COND_GTU] = INSN_VCGT_U,
2563    [TCG_COND_GEU] = INSN_VCGE_U,
2564};
2565
2566static const ARMInsn vec_cmp0_insn[16] = {
2567    [TCG_COND_EQ] = INSN_VCEQ0,
2568    [TCG_COND_GT] = INSN_VCGT0,
2569    [TCG_COND_GE] = INSN_VCGE0,
2570    [TCG_COND_LT] = INSN_VCLT0,
2571    [TCG_COND_LE] = INSN_VCLE0,
2572};
2573
2574static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2575                           unsigned vecl, unsigned vece,
2576                           const TCGArg args[TCG_MAX_OP_ARGS],
2577                           const int const_args[TCG_MAX_OP_ARGS])
2578{
2579    TCGType type = vecl + TCG_TYPE_V64;
2580    unsigned q = vecl;
2581    TCGArg a0, a1, a2, a3;
2582    int cmode, imm8;
2583
2584    a0 = args[0];
2585    a1 = args[1];
2586    a2 = args[2];
2587
2588    switch (opc) {
2589    case INDEX_op_ld_vec:
2590        tcg_out_ld(s, type, a0, a1, a2);
2591        return;
2592    case INDEX_op_st_vec:
2593        tcg_out_st(s, type, a0, a1, a2);
2594        return;
2595    case INDEX_op_dupm_vec:
2596        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2597        return;
2598    case INDEX_op_dup2_vec:
2599        tcg_out_dup2_vec(s, a0, a1, a2);
2600        return;
2601    case INDEX_op_abs_vec:
2602        tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2603        return;
2604    case INDEX_op_neg_vec:
2605        tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2606        return;
2607    case INDEX_op_not_vec:
2608        tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2609        return;
2610    case INDEX_op_add_vec:
2611        tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2612        return;
2613    case INDEX_op_mul_vec:
2614        tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2615        return;
2616    case INDEX_op_smax_vec:
2617        tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2618        return;
2619    case INDEX_op_smin_vec:
2620        tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2621        return;
2622    case INDEX_op_sub_vec:
2623        tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2624        return;
2625    case INDEX_op_ssadd_vec:
2626        tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2627        return;
2628    case INDEX_op_sssub_vec:
2629        tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2630        return;
2631    case INDEX_op_umax_vec:
2632        tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2633        return;
2634    case INDEX_op_umin_vec:
2635        tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2636        return;
2637    case INDEX_op_usadd_vec:
2638        tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2639        return;
2640    case INDEX_op_ussub_vec:
2641        tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2642        return;
2643    case INDEX_op_xor_vec:
2644        tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2645        return;
2646    case INDEX_op_arm_sshl_vec:
2647        /*
2648         * Note that Vm is the data and Vn is the shift count,
2649         * therefore the arguments appear reversed.
2650         */
2651        tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2652        return;
2653    case INDEX_op_arm_ushl_vec:
2654        /* See above. */
2655        tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2656        return;
2657    case INDEX_op_shli_vec:
2658        tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2659        return;
2660    case INDEX_op_shri_vec:
2661        tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2662        return;
2663    case INDEX_op_sari_vec:
2664        tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2665        return;
2666    case INDEX_op_arm_sli_vec:
2667        tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2668        return;
2669
2670    case INDEX_op_andc_vec:
2671        if (!const_args[2]) {
2672            tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2673            return;
2674        }
2675        a2 = ~a2;
2676        /* fall through */
2677    case INDEX_op_and_vec:
2678        if (const_args[2]) {
2679            is_shimm1632(~a2, &cmode, &imm8);
2680            if (a0 == a1) {
2681                tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2682                return;
2683            }
2684            tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2685            a2 = a0;
2686        }
2687        tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2688        return;
2689
2690    case INDEX_op_orc_vec:
2691        if (!const_args[2]) {
2692            tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2693            return;
2694        }
2695        a2 = ~a2;
2696        /* fall through */
2697    case INDEX_op_or_vec:
2698        if (const_args[2]) {
2699            is_shimm1632(a2, &cmode, &imm8);
2700            if (a0 == a1) {
2701                tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2702                return;
2703            }
2704            tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2705            a2 = a0;
2706        }
2707        tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2708        return;
2709
2710    case INDEX_op_cmp_vec:
2711        {
2712            TCGCond cond = args[3];
2713
2714            if (cond == TCG_COND_NE) {
2715                if (const_args[2]) {
2716                    tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2717                } else {
2718                    tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2719                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2720                }
2721            } else {
2722                ARMInsn insn;
2723
2724                if (const_args[2]) {
2725                    insn = vec_cmp0_insn[cond];
2726                    if (insn) {
2727                        tcg_out_vreg2(s, insn, q, vece, a0, a1);
2728                        return;
2729                    }
2730                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2731                    a2 = TCG_VEC_TMP;
2732                }
2733                insn = vec_cmp_insn[cond];
2734                if (insn == 0) {
2735                    TCGArg t;
2736                    t = a1, a1 = a2, a2 = t;
2737                    cond = tcg_swap_cond(cond);
2738                    insn = vec_cmp_insn[cond];
2739                    tcg_debug_assert(insn != 0);
2740                }
2741                tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2742            }
2743        }
2744        return;
2745
2746    case INDEX_op_bitsel_vec:
2747        a3 = args[3];
2748        if (a0 == a3) {
2749            tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2750        } else if (a0 == a2) {
2751            tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2752        } else {
2753            tcg_out_mov(s, type, a0, a1);
2754            tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2755        }
2756        return;
2757
2758    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
2759    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
2760    default:
2761        g_assert_not_reached();
2762    }
2763}
2764
2765int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2766{
2767    switch (opc) {
2768    case INDEX_op_add_vec:
2769    case INDEX_op_sub_vec:
2770    case INDEX_op_and_vec:
2771    case INDEX_op_andc_vec:
2772    case INDEX_op_or_vec:
2773    case INDEX_op_orc_vec:
2774    case INDEX_op_xor_vec:
2775    case INDEX_op_not_vec:
2776    case INDEX_op_shli_vec:
2777    case INDEX_op_shri_vec:
2778    case INDEX_op_sari_vec:
2779    case INDEX_op_ssadd_vec:
2780    case INDEX_op_sssub_vec:
2781    case INDEX_op_usadd_vec:
2782    case INDEX_op_ussub_vec:
2783    case INDEX_op_bitsel_vec:
2784        return 1;
2785    case INDEX_op_abs_vec:
2786    case INDEX_op_cmp_vec:
2787    case INDEX_op_mul_vec:
2788    case INDEX_op_neg_vec:
2789    case INDEX_op_smax_vec:
2790    case INDEX_op_smin_vec:
2791    case INDEX_op_umax_vec:
2792    case INDEX_op_umin_vec:
2793        return vece < MO_64;
2794    case INDEX_op_shlv_vec:
2795    case INDEX_op_shrv_vec:
2796    case INDEX_op_sarv_vec:
2797    case INDEX_op_rotli_vec:
2798    case INDEX_op_rotlv_vec:
2799    case INDEX_op_rotrv_vec:
2800        return -1;
2801    default:
2802        return 0;
2803    }
2804}
2805
2806void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2807                       TCGArg a0, ...)
2808{
2809    va_list va;
2810    TCGv_vec v0, v1, v2, t1, t2, c1;
2811    TCGArg a2;
2812
2813    va_start(va, a0);
2814    v0 = temp_tcgv_vec(arg_temp(a0));
2815    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2816    a2 = va_arg(va, TCGArg);
2817    va_end(va);
2818
2819    switch (opc) {
2820    case INDEX_op_shlv_vec:
2821        /*
2822         * Merely propagate shlv_vec to arm_ushl_vec.
2823         * In this way we don't set TCG_TARGET_HAS_shv_vec
2824         * because everything is done via expansion.
2825         */
2826        v2 = temp_tcgv_vec(arg_temp(a2));
2827        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2828                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2829        break;
2830
2831    case INDEX_op_shrv_vec:
2832    case INDEX_op_sarv_vec:
2833        /* Right shifts are negative left shifts for NEON.  */
2834        v2 = temp_tcgv_vec(arg_temp(a2));
2835        t1 = tcg_temp_new_vec(type);
2836        tcg_gen_neg_vec(vece, t1, v2);
2837        if (opc == INDEX_op_shrv_vec) {
2838            opc = INDEX_op_arm_ushl_vec;
2839        } else {
2840            opc = INDEX_op_arm_sshl_vec;
2841        }
2842        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
2843                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2844        tcg_temp_free_vec(t1);
2845        break;
2846
2847    case INDEX_op_rotli_vec:
2848        t1 = tcg_temp_new_vec(type);
2849        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
2850        vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
2851                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
2852        tcg_temp_free_vec(t1);
2853        break;
2854
2855    case INDEX_op_rotlv_vec:
2856        v2 = temp_tcgv_vec(arg_temp(a2));
2857        t1 = tcg_temp_new_vec(type);
2858        c1 = tcg_constant_vec(type, vece, 8 << vece);
2859        tcg_gen_sub_vec(vece, t1, v2, c1);
2860        /* Right shifts are negative left shifts for NEON.  */
2861        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
2862                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2863        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2864                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2865        tcg_gen_or_vec(vece, v0, v0, t1);
2866        tcg_temp_free_vec(t1);
2867        break;
2868
2869    case INDEX_op_rotrv_vec:
2870        v2 = temp_tcgv_vec(arg_temp(a2));
2871        t1 = tcg_temp_new_vec(type);
2872        t2 = tcg_temp_new_vec(type);
2873        c1 = tcg_constant_vec(type, vece, 8 << vece);
2874        tcg_gen_neg_vec(vece, t1, v2);
2875        tcg_gen_sub_vec(vece, t2, c1, v2);
2876        /* Right shifts are negative left shifts for NEON.  */
2877        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
2878                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2879        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
2880                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
2881        tcg_gen_or_vec(vece, v0, t1, t2);
2882        tcg_temp_free_vec(t1);
2883        tcg_temp_free_vec(t2);
2884        break;
2885
2886    default:
2887        g_assert_not_reached();
2888    }
2889}
2890
2891static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2892{
2893    int i;
2894    for (i = 0; i < count; ++i) {
2895        p[i] = INSN_NOP;
2896    }
2897}
2898
2899/* Compute frame size via macros, to share between tcg_target_qemu_prologue
2900   and tcg_register_jit.  */
2901
2902#define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2903
2904#define FRAME_SIZE \
2905    ((PUSH_SIZE \
2906      + TCG_STATIC_CALL_ARGS_SIZE \
2907      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2908      + TCG_TARGET_STACK_ALIGN - 1) \
2909     & -TCG_TARGET_STACK_ALIGN)
2910
2911#define STACK_ADDEND  (FRAME_SIZE - PUSH_SIZE)
2912
2913static void tcg_target_qemu_prologue(TCGContext *s)
2914{
2915    /* Calling convention requires us to save r4-r11 and lr.  */
2916    /* stmdb sp!, { r4 - r11, lr } */
2917    tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
2918                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
2919                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
2920                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
2921
2922    /* Reserve callee argument and tcg temp space.  */
2923    tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2924                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2925    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2926                  CPU_TEMP_BUF_NLONGS * sizeof(long));
2927
2928    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2929
2930    if (!tcg_use_softmmu && guest_base) {
2931        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
2932        tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
2933    }
2934
2935    tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
2936
2937    /*
2938     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2939     * and fall through to the rest of the epilogue.
2940     */
2941    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2942    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
2943    tcg_out_epilogue(s);
2944}
2945
2946static void tcg_out_epilogue(TCGContext *s)
2947{
2948    /* Release local stack frame.  */
2949    tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2950                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2951
2952    /* ldmia sp!, { r4 - r11, pc } */
2953    tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
2954                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
2955                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
2956                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
2957}
2958
2959static void tcg_out_tb_start(TCGContext *s)
2960{
2961    /* nothing to do */
2962}
2963
2964typedef struct {
2965    DebugFrameHeader h;
2966    uint8_t fde_def_cfa[4];
2967    uint8_t fde_reg_ofs[18];
2968} DebugFrame;
2969
2970#define ELF_HOST_MACHINE EM_ARM
2971
2972/* We're expecting a 2 byte uleb128 encoded value.  */
2973QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2974
2975static const DebugFrame debug_frame = {
2976    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2977    .h.cie.id = -1,
2978    .h.cie.version = 1,
2979    .h.cie.code_align = 1,
2980    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
2981    .h.cie.return_column = 14,
2982
2983    /* Total FDE size does not include the "len" member.  */
2984    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2985
2986    .fde_def_cfa = {
2987        12, 13,                         /* DW_CFA_def_cfa sp, ... */
2988        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
2989        (FRAME_SIZE >> 7)
2990    },
2991    .fde_reg_ofs = {
2992        /* The following must match the stmdb in the prologue.  */
2993        0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
2994        0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
2995        0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
2996        0x89, 4,                        /* DW_CFA_offset, r9, -16 */
2997        0x88, 5,                        /* DW_CFA_offset, r8, -20 */
2998        0x87, 6,                        /* DW_CFA_offset, r7, -24 */
2999        0x86, 7,                        /* DW_CFA_offset, r6, -28 */
3000        0x85, 8,                        /* DW_CFA_offset, r5, -32 */
3001        0x84, 9,                        /* DW_CFA_offset, r4, -36 */
3002    }
3003};
3004
3005void tcg_register_jit(const void *buf, size_t buf_size)
3006{
3007    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3008}
3009