xref: /openbmc/qemu/tcg/arm/tcg-target.c.inc (revision 4d8722183932d9502e405ae86b1889e1d8a475e5)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-ldst.c.inc"
27#include "../tcg-pool.c.inc"
28
29int arm_arch = __ARM_ARCH;
30
31#ifndef use_idiv_instructions
32bool use_idiv_instructions;
33#endif
34#ifndef use_neon_instructions
35bool use_neon_instructions;
36#endif
37
38/* Used for function call generation. */
39#define TCG_TARGET_STACK_ALIGN          8
40#define TCG_TARGET_CALL_STACK_OFFSET    0
41#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_NORMAL
42#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_EVEN
43#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_EVEN
44#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_BY_REF
45
46#ifdef CONFIG_DEBUG_TCG
47static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
48    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
49    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%sp",  "%r14", "%pc",
50    "%q0",  "%q1",  "%q2",  "%q3",  "%q4",  "%q5",  "%q6",  "%q7",
51    "%q8",  "%q9",  "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
52};
53#endif
54
55static const int tcg_target_reg_alloc_order[] = {
56    TCG_REG_R4,
57    TCG_REG_R5,
58    TCG_REG_R6,
59    TCG_REG_R7,
60    TCG_REG_R8,
61    TCG_REG_R9,
62    TCG_REG_R10,
63    TCG_REG_R11,
64    TCG_REG_R13,
65    TCG_REG_R0,
66    TCG_REG_R1,
67    TCG_REG_R2,
68    TCG_REG_R3,
69    TCG_REG_R12,
70    TCG_REG_R14,
71
72    TCG_REG_Q0,
73    TCG_REG_Q1,
74    TCG_REG_Q2,
75    TCG_REG_Q3,
76    /* Q4 - Q7 are call-saved, and skipped. */
77    TCG_REG_Q8,
78    TCG_REG_Q9,
79    TCG_REG_Q10,
80    TCG_REG_Q11,
81    TCG_REG_Q12,
82    TCG_REG_Q13,
83    TCG_REG_Q14,
84    TCG_REG_Q15,
85};
86
87static const int tcg_target_call_iarg_regs[4] = {
88    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
89};
90
91static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
92{
93    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
94    tcg_debug_assert(slot >= 0 && slot <= 3);
95    return TCG_REG_R0 + slot;
96}
97
98#define TCG_REG_TMP  TCG_REG_R12
99#define TCG_VEC_TMP  TCG_REG_Q15
100#define TCG_REG_GUEST_BASE  TCG_REG_R11
101
102typedef enum {
103    COND_EQ = 0x0,
104    COND_NE = 0x1,
105    COND_CS = 0x2,	/* Unsigned greater or equal */
106    COND_CC = 0x3,	/* Unsigned less than */
107    COND_MI = 0x4,	/* Negative */
108    COND_PL = 0x5,	/* Zero or greater */
109    COND_VS = 0x6,	/* Overflow */
110    COND_VC = 0x7,	/* No overflow */
111    COND_HI = 0x8,	/* Unsigned greater than */
112    COND_LS = 0x9,	/* Unsigned less or equal */
113    COND_GE = 0xa,
114    COND_LT = 0xb,
115    COND_GT = 0xc,
116    COND_LE = 0xd,
117    COND_AL = 0xe,
118} ARMCond;
119
120#define TO_CPSR (1 << 20)
121
122#define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
123#define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
124#define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
125#define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
126#define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
127#define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
128#define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
129#define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
130
131typedef enum {
132    ARITH_AND = 0x0 << 21,
133    ARITH_EOR = 0x1 << 21,
134    ARITH_SUB = 0x2 << 21,
135    ARITH_RSB = 0x3 << 21,
136    ARITH_ADD = 0x4 << 21,
137    ARITH_ADC = 0x5 << 21,
138    ARITH_SBC = 0x6 << 21,
139    ARITH_RSC = 0x7 << 21,
140    ARITH_TST = 0x8 << 21 | TO_CPSR,
141    ARITH_CMP = 0xa << 21 | TO_CPSR,
142    ARITH_CMN = 0xb << 21 | TO_CPSR,
143    ARITH_ORR = 0xc << 21,
144    ARITH_MOV = 0xd << 21,
145    ARITH_BIC = 0xe << 21,
146    ARITH_MVN = 0xf << 21,
147
148    INSN_B         = 0x0a000000,
149
150    INSN_CLZ       = 0x016f0f10,
151    INSN_RBIT      = 0x06ff0f30,
152
153    INSN_LDMIA     = 0x08b00000,
154    INSN_STMDB     = 0x09200000,
155
156    INSN_LDR_IMM   = 0x04100000,
157    INSN_LDR_REG   = 0x06100000,
158    INSN_STR_IMM   = 0x04000000,
159    INSN_STR_REG   = 0x06000000,
160
161    INSN_LDRH_IMM  = 0x005000b0,
162    INSN_LDRH_REG  = 0x001000b0,
163    INSN_LDRSH_IMM = 0x005000f0,
164    INSN_LDRSH_REG = 0x001000f0,
165    INSN_STRH_IMM  = 0x004000b0,
166    INSN_STRH_REG  = 0x000000b0,
167
168    INSN_LDRB_IMM  = 0x04500000,
169    INSN_LDRB_REG  = 0x06500000,
170    INSN_LDRSB_IMM = 0x005000d0,
171    INSN_LDRSB_REG = 0x001000d0,
172    INSN_STRB_IMM  = 0x04400000,
173    INSN_STRB_REG  = 0x06400000,
174
175    INSN_LDRD_IMM  = 0x004000d0,
176    INSN_LDRD_REG  = 0x000000d0,
177    INSN_STRD_IMM  = 0x004000f0,
178    INSN_STRD_REG  = 0x000000f0,
179
180    INSN_DMB_ISH   = 0xf57ff05b,
181    INSN_DMB_MCR   = 0xee070fba,
182
183    /* Architected nop introduced in v6k.  */
184    /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
185       also Just So Happened to do nothing on pre-v6k so that we
186       don't need to conditionalize it?  */
187    INSN_NOP_v6k   = 0xe320f000,
188    /* Otherwise the assembler uses mov r0,r0 */
189    INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
190
191    INSN_VADD      = 0xf2000800,
192    INSN_VAND      = 0xf2000110,
193    INSN_VBIC      = 0xf2100110,
194    INSN_VEOR      = 0xf3000110,
195    INSN_VORN      = 0xf2300110,
196    INSN_VORR      = 0xf2200110,
197    INSN_VSUB      = 0xf3000800,
198    INSN_VMUL      = 0xf2000910,
199    INSN_VQADD     = 0xf2000010,
200    INSN_VQADD_U   = 0xf3000010,
201    INSN_VQSUB     = 0xf2000210,
202    INSN_VQSUB_U   = 0xf3000210,
203    INSN_VMAX      = 0xf2000600,
204    INSN_VMAX_U    = 0xf3000600,
205    INSN_VMIN      = 0xf2000610,
206    INSN_VMIN_U    = 0xf3000610,
207
208    INSN_VABS      = 0xf3b10300,
209    INSN_VMVN      = 0xf3b00580,
210    INSN_VNEG      = 0xf3b10380,
211
212    INSN_VCEQ0     = 0xf3b10100,
213    INSN_VCGT0     = 0xf3b10000,
214    INSN_VCGE0     = 0xf3b10080,
215    INSN_VCLE0     = 0xf3b10180,
216    INSN_VCLT0     = 0xf3b10200,
217
218    INSN_VCEQ      = 0xf3000810,
219    INSN_VCGE      = 0xf2000310,
220    INSN_VCGT      = 0xf2000300,
221    INSN_VCGE_U    = 0xf3000310,
222    INSN_VCGT_U    = 0xf3000300,
223
224    INSN_VSHLI     = 0xf2800510,  /* VSHL (immediate) */
225    INSN_VSARI     = 0xf2800010,  /* VSHR.S */
226    INSN_VSHRI     = 0xf3800010,  /* VSHR.U */
227    INSN_VSLI      = 0xf3800510,
228    INSN_VSHL_S    = 0xf2000400,  /* VSHL.S (register) */
229    INSN_VSHL_U    = 0xf3000400,  /* VSHL.U (register) */
230
231    INSN_VBSL      = 0xf3100110,
232    INSN_VBIT      = 0xf3200110,
233    INSN_VBIF      = 0xf3300110,
234
235    INSN_VTST      = 0xf2000810,
236
237    INSN_VDUP_G    = 0xee800b10,  /* VDUP (ARM core register) */
238    INSN_VDUP_S    = 0xf3b00c00,  /* VDUP (scalar) */
239    INSN_VLDR_D    = 0xed100b00,  /* VLDR.64 */
240    INSN_VLD1      = 0xf4200000,  /* VLD1 (multiple single elements) */
241    INSN_VLD1R     = 0xf4a00c00,  /* VLD1 (single element to all lanes) */
242    INSN_VST1      = 0xf4000000,  /* VST1 (multiple single elements) */
243    INSN_VMOVI     = 0xf2800010,  /* VMOV (immediate) */
244} ARMInsn;
245
246#define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
247
248static const uint8_t tcg_cond_to_arm_cond[] = {
249    [TCG_COND_EQ] = COND_EQ,
250    [TCG_COND_NE] = COND_NE,
251    [TCG_COND_LT] = COND_LT,
252    [TCG_COND_GE] = COND_GE,
253    [TCG_COND_LE] = COND_LE,
254    [TCG_COND_GT] = COND_GT,
255    /* unsigned */
256    [TCG_COND_LTU] = COND_CC,
257    [TCG_COND_GEU] = COND_CS,
258    [TCG_COND_LEU] = COND_LS,
259    [TCG_COND_GTU] = COND_HI,
260};
261
262static int encode_imm(uint32_t imm);
263
264/* TCG private relocation type: add with pc+imm8 */
265#define R_ARM_PC8  11
266
267/* TCG private relocation type: vldr with imm8 << 2 */
268#define R_ARM_PC11 12
269
270static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
271{
272    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
273    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
274
275    if (offset == sextract32(offset, 0, 24)) {
276        *src_rw = deposit32(*src_rw, 0, 24, offset);
277        return true;
278    }
279    return false;
280}
281
282static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
283{
284    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
285    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
286
287    if (offset >= -0xfff && offset <= 0xfff) {
288        tcg_insn_unit insn = *src_rw;
289        bool u = (offset >= 0);
290        if (!u) {
291            offset = -offset;
292        }
293        insn = deposit32(insn, 23, 1, u);
294        insn = deposit32(insn, 0, 12, offset);
295        *src_rw = insn;
296        return true;
297    }
298    return false;
299}
300
301static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
302{
303    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
304    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
305
306    if (offset >= -0xff && offset <= 0xff) {
307        tcg_insn_unit insn = *src_rw;
308        bool u = (offset >= 0);
309        if (!u) {
310            offset = -offset;
311        }
312        insn = deposit32(insn, 23, 1, u);
313        insn = deposit32(insn, 0, 8, offset);
314        *src_rw = insn;
315        return true;
316    }
317    return false;
318}
319
320static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
321{
322    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
323    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
324    int imm12 = encode_imm(offset);
325
326    if (imm12 >= 0) {
327        *src_rw = deposit32(*src_rw, 0, 12, imm12);
328        return true;
329    }
330    return false;
331}
332
333static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
334                        intptr_t value, intptr_t addend)
335{
336    tcg_debug_assert(addend == 0);
337    switch (type) {
338    case R_ARM_PC24:
339        return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
340    case R_ARM_PC13:
341        return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
342    case R_ARM_PC11:
343        return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
344    case R_ARM_PC8:
345        return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
346    default:
347        g_assert_not_reached();
348    }
349}
350
351#define TCG_CT_CONST_ARM  0x100
352#define TCG_CT_CONST_INV  0x200
353#define TCG_CT_CONST_NEG  0x400
354#define TCG_CT_CONST_ZERO 0x800
355#define TCG_CT_CONST_ORRI 0x1000
356#define TCG_CT_CONST_ANDI 0x2000
357
358#define ALL_GENERAL_REGS  0xffffu
359#define ALL_VECTOR_REGS   0xffff0000u
360
361/*
362 * r0-r3 will be overwritten when reading the tlb entry (system-mode only);
363 * r14 will be overwritten by the BLNE branching to the slow path.
364 */
365#define ALL_QLDST_REGS \
366    (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14)))
367
368/*
369 * ARM immediates for ALU instructions are made of an unsigned 8-bit
370 * right-rotated by an even amount between 0 and 30.
371 *
372 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
373 */
374static int encode_imm(uint32_t imm)
375{
376    uint32_t rot, imm8;
377
378    /* Simple case, no rotation required. */
379    if ((imm & ~0xff) == 0) {
380        return imm;
381    }
382
383    /* Next, try a simple even shift.  */
384    rot = ctz32(imm) & ~1;
385    imm8 = imm >> rot;
386    rot = 32 - rot;
387    if ((imm8 & ~0xff) == 0) {
388        goto found;
389    }
390
391    /*
392     * Finally, try harder with rotations.
393     * The ctz test above will have taken care of rotates >= 8.
394     */
395    for (rot = 2; rot < 8; rot += 2) {
396        imm8 = rol32(imm, rot);
397        if ((imm8 & ~0xff) == 0) {
398            goto found;
399        }
400    }
401    /* Fail: imm cannot be encoded. */
402    return -1;
403
404 found:
405    /* Note that rot is even, and we discard bit 0 by shifting by 7. */
406    return rot << 7 | imm8;
407}
408
409static int encode_imm_nofail(uint32_t imm)
410{
411    int ret = encode_imm(imm);
412    tcg_debug_assert(ret >= 0);
413    return ret;
414}
415
416static bool check_fit_imm(uint32_t imm)
417{
418    return encode_imm(imm) >= 0;
419}
420
421/* Return true if v16 is a valid 16-bit shifted immediate.  */
422static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
423{
424    if (v16 == (v16 & 0xff)) {
425        *cmode = 0x8;
426        *imm8 = v16 & 0xff;
427        return true;
428    } else if (v16 == (v16 & 0xff00)) {
429        *cmode = 0xa;
430        *imm8 = v16 >> 8;
431        return true;
432    }
433    return false;
434}
435
436/* Return true if v32 is a valid 32-bit shifted immediate.  */
437static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
438{
439    if (v32 == (v32 & 0xff)) {
440        *cmode = 0x0;
441        *imm8 = v32 & 0xff;
442        return true;
443    } else if (v32 == (v32 & 0xff00)) {
444        *cmode = 0x2;
445        *imm8 = (v32 >> 8) & 0xff;
446        return true;
447    } else if (v32 == (v32 & 0xff0000)) {
448        *cmode = 0x4;
449        *imm8 = (v32 >> 16) & 0xff;
450        return true;
451    } else if (v32 == (v32 & 0xff000000)) {
452        *cmode = 0x6;
453        *imm8 = v32 >> 24;
454        return true;
455    }
456    return false;
457}
458
459/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
460static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
461{
462    if ((v32 & 0xffff00ff) == 0xff) {
463        *cmode = 0xc;
464        *imm8 = (v32 >> 8) & 0xff;
465        return true;
466    } else if ((v32 & 0xff00ffff) == 0xffff) {
467        *cmode = 0xd;
468        *imm8 = (v32 >> 16) & 0xff;
469        return true;
470    }
471    return false;
472}
473
474/*
475 * Return non-zero if v32 can be formed by MOVI+ORR.
476 * Place the parameters for MOVI in (cmode, imm8).
477 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
478 */
479static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
480{
481    int i;
482
483    for (i = 6; i > 0; i -= 2) {
484        /* Mask out one byte we can add with ORR.  */
485        uint32_t tmp = v32 & ~(0xffu << (i * 4));
486        if (is_shimm32(tmp, cmode, imm8) ||
487            is_soimm32(tmp, cmode, imm8)) {
488            break;
489        }
490    }
491    return i;
492}
493
494/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
495static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
496{
497    if (v32 == deposit32(v32, 16, 16, v32)) {
498        return is_shimm16(v32, cmode, imm8);
499    } else {
500        return is_shimm32(v32, cmode, imm8);
501    }
502}
503
504/* Test if a constant matches the constraint.
505 * TODO: define constraints for:
506 *
507 * ldr/str offset:   between -0xfff and 0xfff
508 * ldrh/strh offset: between -0xff and 0xff
509 * mov operand2:     values represented with x << (2 * y), x < 0x100
510 * add, sub, eor...: ditto
511 */
512static bool tcg_target_const_match(int64_t val, int ct,
513                                   TCGType type, TCGCond cond, int vece)
514{
515    if (ct & TCG_CT_CONST) {
516        return 1;
517    } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
518        return 1;
519    } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
520        return 1;
521    } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
522        return 1;
523    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
524        return 1;
525    }
526
527    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
528    case 0:
529        break;
530    case TCG_CT_CONST_ANDI:
531        val = ~val;
532        /* fallthru */
533    case TCG_CT_CONST_ORRI:
534        if (val == deposit64(val, 32, 32, val)) {
535            int cmode, imm8;
536            return is_shimm1632(val, &cmode, &imm8);
537        }
538        break;
539    default:
540        /* Both bits should not be set for the same insn.  */
541        g_assert_not_reached();
542    }
543
544    return 0;
545}
546
547static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
548{
549    tcg_out32(s, (cond << 28) | INSN_B |
550                    (((offset - 8) >> 2) & 0x00ffffff));
551}
552
553static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
554{
555    tcg_out32(s, (cond << 28) | 0x0b000000 |
556                    (((offset - 8) >> 2) & 0x00ffffff));
557}
558
559static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
560{
561    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
562}
563
564static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
565{
566    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
567                (((offset - 8) >> 2) & 0x00ffffff));
568}
569
570static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
571                            TCGReg rd, TCGReg rn, TCGReg rm, int shift)
572{
573    tcg_out32(s, (cond << 28) | (0 << 25) | opc |
574                    (rn << 16) | (rd << 12) | shift | rm);
575}
576
577static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
578{
579    /* Simple reg-reg move, optimising out the 'do nothing' case */
580    if (rd != rm) {
581        tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
582    }
583}
584
585static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
586{
587    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
588}
589
590static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
591{
592    /*
593     * Unless the C portion of QEMU is compiled as thumb, we don't need
594     * true BX semantics; merely a branch to an address held in a register.
595     */
596    tcg_out_bx_reg(s, cond, rn);
597}
598
599static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
600                            TCGReg rd, TCGReg rn, int im)
601{
602    tcg_out32(s, (cond << 28) | (1 << 25) | opc |
603                    (rn << 16) | (rd << 12) | im);
604}
605
606static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
607                          TCGReg rn, uint16_t mask)
608{
609    tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
610}
611
612/* Note that this routine is used for both LDR and LDRH formats, so we do
613   not wish to include an immediate shift at this point.  */
614static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
615                            TCGReg rn, TCGReg rm, bool u, bool p, bool w)
616{
617    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
618              | (w << 21) | (rn << 16) | (rt << 12) | rm);
619}
620
621static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
622                            TCGReg rn, int imm8, bool p, bool w)
623{
624    bool u = 1;
625    if (imm8 < 0) {
626        imm8 = -imm8;
627        u = 0;
628    }
629    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
630              (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
631}
632
633static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
634                             TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
635{
636    bool u = 1;
637    if (imm12 < 0) {
638        imm12 = -imm12;
639        u = 0;
640    }
641    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
642              (rn << 16) | (rt << 12) | imm12);
643}
644
645static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
646                            TCGReg rn, int imm12)
647{
648    tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
649}
650
651static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
652                            TCGReg rn, int imm12)
653{
654    tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
655}
656
657static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
658                           TCGReg rn, TCGReg rm)
659{
660    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
661}
662
663static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
664                           TCGReg rn, TCGReg rm)
665{
666    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
667}
668
669static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
670                           TCGReg rn, int imm8)
671{
672    tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
673}
674
675static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
676                           TCGReg rn, TCGReg rm)
677{
678    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
679}
680
681static void __attribute__((unused))
682tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
683{
684    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
685}
686
687static void __attribute__((unused))
688tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8)
689{
690    tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
691}
692
693static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
694                           TCGReg rn, TCGReg rm)
695{
696    tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
697}
698
699/* Register pre-increment with base writeback.  */
700static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
701                             TCGReg rn, TCGReg rm)
702{
703    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
704}
705
706static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
707                             TCGReg rn, TCGReg rm)
708{
709    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
710}
711
712static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
713                            TCGReg rn, int imm8)
714{
715    tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
716}
717
718static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
719                           TCGReg rn, int imm8)
720{
721    tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
722}
723
724static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
725                            TCGReg rn, TCGReg rm)
726{
727    tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
728}
729
730static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
731                           TCGReg rn, TCGReg rm)
732{
733    tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
734}
735
736static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
737                            TCGReg rn, int imm8)
738{
739    tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
740}
741
742static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
743                            TCGReg rn, TCGReg rm)
744{
745    tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
746}
747
748static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
749                           TCGReg rn, int imm12)
750{
751    tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
752}
753
754static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
755                           TCGReg rn, int imm12)
756{
757    tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
758}
759
760static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
761                          TCGReg rn, TCGReg rm)
762{
763    tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
764}
765
766static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
767                          TCGReg rn, TCGReg rm)
768{
769    tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
770}
771
772static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
773                           TCGReg rn, int imm8)
774{
775    tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
776}
777
778static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
779                           TCGReg rn, TCGReg rm)
780{
781    tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
782}
783
784static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
785                              TCGReg rd, uint32_t arg)
786{
787    new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
788    tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
789}
790
791static void tcg_out_movi32(TCGContext *s, ARMCond cond,
792                           TCGReg rd, uint32_t arg)
793{
794    int imm12, diff, opc, sh1, sh2;
795    uint32_t tt0, tt1, tt2;
796
797    /* Check a single MOV/MVN before anything else.  */
798    imm12 = encode_imm(arg);
799    if (imm12 >= 0) {
800        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
801        return;
802    }
803    imm12 = encode_imm(~arg);
804    if (imm12 >= 0) {
805        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
806        return;
807    }
808
809    /* Check for a pc-relative address.  This will usually be the TB,
810       or within the TB, which is immediately before the code block.  */
811    diff = tcg_pcrel_diff(s, (void *)arg) - 8;
812    if (diff >= 0) {
813        imm12 = encode_imm(diff);
814        if (imm12 >= 0) {
815            tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
816            return;
817        }
818    } else {
819        imm12 = encode_imm(-diff);
820        if (imm12 >= 0) {
821            tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
822            return;
823        }
824    }
825
826    /* Use movw + movt.  */
827    if (use_armv7_instructions) {
828        /* movw */
829        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
830                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
831        if (arg & 0xffff0000) {
832            /* movt */
833            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
834                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
835        }
836        return;
837    }
838
839    /* Look for sequences of two insns.  If we have lots of 1's, we can
840       shorten the sequence by beginning with mvn and then clearing
841       higher bits with eor.  */
842    tt0 = arg;
843    opc = ARITH_MOV;
844    if (ctpop32(arg) > 16) {
845        tt0 = ~arg;
846        opc = ARITH_MVN;
847    }
848    sh1 = ctz32(tt0) & ~1;
849    tt1 = tt0 & ~(0xff << sh1);
850    sh2 = ctz32(tt1) & ~1;
851    tt2 = tt1 & ~(0xff << sh2);
852    if (tt2 == 0) {
853        int rot;
854
855        rot = ((32 - sh1) << 7) & 0xf00;
856        tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
857        rot = ((32 - sh2) << 7) & 0xf00;
858        tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
859                        ((tt0 >> sh2) & 0xff) | rot);
860        return;
861    }
862
863    /* Otherwise, drop it into the constant pool.  */
864    tcg_out_movi_pool(s, cond, rd, arg);
865}
866
867/*
868 * Emit either the reg,imm or reg,reg form of a data-processing insn.
869 * rhs must satisfy the "rI" constraint.
870 */
871static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
872                           TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
873{
874    if (rhs_is_const) {
875        tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
876    } else {
877        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
878    }
879}
880
881/*
882 * Emit either the reg,imm or reg,reg form of a data-processing insn.
883 * rhs must satisfy the "rIK" constraint.
884 */
885static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
886                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
887                            bool rhs_is_const)
888{
889    if (rhs_is_const) {
890        int imm12 = encode_imm(rhs);
891        if (imm12 < 0) {
892            imm12 = encode_imm_nofail(~rhs);
893            opc = opinv;
894        }
895        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
896    } else {
897        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
898    }
899}
900
901static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
902                            ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
903                            bool rhs_is_const)
904{
905    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
906     * rhs must satisfy the "rIN" constraint.
907     */
908    if (rhs_is_const) {
909        int imm12 = encode_imm(rhs);
910        if (imm12 < 0) {
911            imm12 = encode_imm_nofail(-rhs);
912            opc = opneg;
913        }
914        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
915    } else {
916        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
917    }
918}
919
920static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
921                          TCGReg rn, TCGReg rm)
922{
923    /* mul */
924    tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
925}
926
927static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
928                            TCGReg rd1, TCGReg rn, TCGReg rm)
929{
930    /* umull */
931    tcg_out32(s, (cond << 28) | 0x00800090 |
932              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
933}
934
935static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
936                            TCGReg rd1, TCGReg rn, TCGReg rm)
937{
938    /* smull */
939    tcg_out32(s, (cond << 28) | 0x00c00090 |
940              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
941}
942
943static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
944                         TCGReg rd, TCGReg rn, TCGReg rm)
945{
946    tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
947}
948
949static void tcg_out_udiv(TCGContext *s, ARMCond cond,
950                         TCGReg rd, TCGReg rn, TCGReg rm)
951{
952    tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
953}
954
955static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
956{
957    /* sxtb */
958    tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
959}
960
961static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
962{
963    tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
964}
965
966static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
967{
968    /* sxth */
969    tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
970}
971
972static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
973{
974    /* uxth */
975    tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn);
976}
977
978static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
979{
980    g_assert_not_reached();
981}
982
983static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
984{
985    g_assert_not_reached();
986}
987
988static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
989{
990    g_assert_not_reached();
991}
992
993static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
994{
995    g_assert_not_reached();
996}
997
998static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
999{
1000    g_assert_not_reached();
1001}
1002
1003static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
1004                            TCGReg rd, TCGReg rn, int flags)
1005{
1006    if (flags & TCG_BSWAP_OS) {
1007        /* revsh */
1008        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1009        return;
1010    }
1011
1012    /* rev16 */
1013    tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1014    if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1015        /* uxth */
1016        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
1017    }
1018}
1019
1020static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1021{
1022    /* rev */
1023    tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1024}
1025
1026static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1027                            TCGArg a1, int ofs, int len, bool const_a1)
1028{
1029    if (const_a1) {
1030        /* bfi becomes bfc with rn == 15.  */
1031        a1 = 15;
1032    }
1033    /* bfi/bfc */
1034    tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1035              | (ofs << 7) | ((ofs + len - 1) << 16));
1036}
1037
1038static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1039                            TCGReg rn, int ofs, int len)
1040{
1041    /* ubfx */
1042    tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1043              | (ofs << 7) | ((len - 1) << 16));
1044}
1045
1046static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1047                             TCGReg rn, int ofs, int len)
1048{
1049    /* sbfx */
1050    tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1051              | (ofs << 7) | ((len - 1) << 16));
1052}
1053
1054static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1055                          TCGReg rd, TCGReg rn, int32_t offset)
1056{
1057    if (offset > 0xfff || offset < -0xfff) {
1058        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1059        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1060    } else
1061        tcg_out_ld32_12(s, cond, rd, rn, offset);
1062}
1063
1064static void tcg_out_st32(TCGContext *s, ARMCond cond,
1065                         TCGReg rd, TCGReg rn, int32_t offset)
1066{
1067    if (offset > 0xfff || offset < -0xfff) {
1068        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1069        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1070    } else
1071        tcg_out_st32_12(s, cond, rd, rn, offset);
1072}
1073
1074static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1075                          TCGReg rd, TCGReg rn, int32_t offset)
1076{
1077    if (offset > 0xff || offset < -0xff) {
1078        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1079        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1080    } else
1081        tcg_out_ld16u_8(s, cond, rd, rn, offset);
1082}
1083
1084static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1085                          TCGReg rd, TCGReg rn, int32_t offset)
1086{
1087    if (offset > 0xff || offset < -0xff) {
1088        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1089        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1090    } else
1091        tcg_out_ld16s_8(s, cond, rd, rn, offset);
1092}
1093
1094static void tcg_out_st16(TCGContext *s, ARMCond cond,
1095                         TCGReg rd, TCGReg rn, int32_t offset)
1096{
1097    if (offset > 0xff || offset < -0xff) {
1098        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1099        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1100    } else
1101        tcg_out_st16_8(s, cond, rd, rn, offset);
1102}
1103
1104static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1105                         TCGReg rd, TCGReg rn, int32_t offset)
1106{
1107    if (offset > 0xfff || offset < -0xfff) {
1108        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1109        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1110    } else
1111        tcg_out_ld8_12(s, cond, rd, rn, offset);
1112}
1113
1114static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1115                         TCGReg rd, TCGReg rn, int32_t offset)
1116{
1117    if (offset > 0xff || offset < -0xff) {
1118        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1119        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1120    } else
1121        tcg_out_ld8s_8(s, cond, rd, rn, offset);
1122}
1123
1124static void tcg_out_st8(TCGContext *s, ARMCond cond,
1125                        TCGReg rd, TCGReg rn, int32_t offset)
1126{
1127    if (offset > 0xfff || offset < -0xfff) {
1128        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1129        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1130    } else
1131        tcg_out_st8_12(s, cond, rd, rn, offset);
1132}
1133
1134/*
1135 * The _goto case is normally between TBs within the same code buffer, and
1136 * with the code buffer limited to 16MB we wouldn't need the long case.
1137 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1138 */
1139static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1140{
1141    intptr_t addri = (intptr_t)addr;
1142    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1143    bool arm_mode = !(addri & 1);
1144
1145    if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1146        tcg_out_b_imm(s, cond, disp);
1147        return;
1148    }
1149
1150    /* LDR is interworking from v5t. */
1151    tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1152}
1153
1154/*
1155 * The call case is mostly used for helpers - so it's not unreasonable
1156 * for them to be beyond branch range.
1157 */
1158static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
1159{
1160    intptr_t addri = (intptr_t)addr;
1161    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1162    bool arm_mode = !(addri & 1);
1163
1164    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1165        if (arm_mode) {
1166            tcg_out_bl_imm(s, COND_AL, disp);
1167        } else {
1168            tcg_out_blx_imm(s, disp);
1169        }
1170        return;
1171    }
1172
1173    tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1174    tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1175}
1176
1177static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
1178                         const TCGHelperInfo *info)
1179{
1180    tcg_out_call_int(s, addr);
1181}
1182
1183static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1184{
1185    if (l->has_value) {
1186        tcg_out_goto(s, cond, l->u.value_ptr);
1187    } else {
1188        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1189        tcg_out_b_imm(s, cond, 0);
1190    }
1191}
1192
1193static void tcg_out_mb(TCGContext *s, TCGArg a0)
1194{
1195    if (use_armv7_instructions) {
1196        tcg_out32(s, INSN_DMB_ISH);
1197    } else {
1198        tcg_out32(s, INSN_DMB_MCR);
1199    }
1200}
1201
1202static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
1203                           TCGArg b, int b_const)
1204{
1205    if (!is_tst_cond(cond)) {
1206        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
1207        return cond;
1208    }
1209
1210    cond = tcg_tst_eqne_cond(cond);
1211    if (b_const) {
1212        int imm12 = encode_imm(b);
1213
1214        /*
1215         * The compare constraints allow rIN, but TST does not support N.
1216         * Be prepared to load the constant into a scratch register.
1217         */
1218        if (imm12 >= 0) {
1219            tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
1220            return cond;
1221        }
1222        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
1223        b = TCG_REG_TMP;
1224    }
1225    tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
1226    return cond;
1227}
1228
1229static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1230                            const int *const_args)
1231{
1232    TCGReg al = args[0];
1233    TCGReg ah = args[1];
1234    TCGArg bl = args[2];
1235    TCGArg bh = args[3];
1236    TCGCond cond = args[4];
1237    int const_bl = const_args[2];
1238    int const_bh = const_args[3];
1239
1240    switch (cond) {
1241    case TCG_COND_EQ:
1242    case TCG_COND_NE:
1243    case TCG_COND_LTU:
1244    case TCG_COND_LEU:
1245    case TCG_COND_GTU:
1246    case TCG_COND_GEU:
1247        /*
1248         * We perform a conditional comparison.  If the high half is
1249         * equal, then overwrite the flags with the comparison of the
1250         * low half.  The resulting flags cover the whole.
1251         */
1252        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1253        tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1254        return cond;
1255
1256    case TCG_COND_TSTEQ:
1257    case TCG_COND_TSTNE:
1258        /* Similar, but with TST instead of CMP. */
1259        tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh);
1260        tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl);
1261        return tcg_tst_eqne_cond(cond);
1262
1263    case TCG_COND_LT:
1264    case TCG_COND_GE:
1265        /* We perform a double-word subtraction and examine the result.
1266           We do not actually need the result of the subtract, so the
1267           low part "subtract" is a compare.  For the high half we have
1268           no choice but to compute into a temporary.  */
1269        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1270        tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1271                       TCG_REG_TMP, ah, bh, const_bh);
1272        return cond;
1273
1274    case TCG_COND_LE:
1275    case TCG_COND_GT:
1276        /* Similar, but with swapped arguments, via reversed subtract.  */
1277        tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1278                       TCG_REG_TMP, al, bl, const_bl);
1279        tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1280                       TCG_REG_TMP, ah, bh, const_bh);
1281        return tcg_swap_cond(cond);
1282
1283    default:
1284        g_assert_not_reached();
1285    }
1286}
1287
1288/*
1289 * Note that TCGReg references Q-registers.
1290 * Q-regno = 2 * D-regno, so shift left by 1 while inserting.
1291 */
1292static uint32_t encode_vd(TCGReg rd)
1293{
1294    tcg_debug_assert(rd >= TCG_REG_Q0);
1295    return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1296}
1297
1298static uint32_t encode_vn(TCGReg rn)
1299{
1300    tcg_debug_assert(rn >= TCG_REG_Q0);
1301    return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1302}
1303
1304static uint32_t encode_vm(TCGReg rm)
1305{
1306    tcg_debug_assert(rm >= TCG_REG_Q0);
1307    return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1308}
1309
1310static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1311                          TCGReg d, TCGReg m)
1312{
1313    tcg_out32(s, insn | (vece << 18) | (q << 6) |
1314              encode_vd(d) | encode_vm(m));
1315}
1316
1317static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1318                          TCGReg d, TCGReg n, TCGReg m)
1319{
1320    tcg_out32(s, insn | (vece << 20) | (q << 6) |
1321              encode_vd(d) | encode_vn(n) | encode_vm(m));
1322}
1323
1324static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1325                          int q, int op, int cmode, uint8_t imm8)
1326{
1327    tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1328              | (cmode << 8) | extract32(imm8, 0, 4)
1329              | (extract32(imm8, 4, 3) << 16)
1330              | (extract32(imm8, 7, 1) << 24));
1331}
1332
1333static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1334                            TCGReg rd, TCGReg rm, int l_imm6)
1335{
1336    tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1337              (extract32(l_imm6, 6, 1) << 7) |
1338              (extract32(l_imm6, 0, 6) << 16));
1339}
1340
1341static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1342                          TCGReg rd, TCGReg rn, int offset)
1343{
1344    if (offset != 0) {
1345        if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1346            tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1347                            TCG_REG_TMP, rn, offset, true);
1348        } else {
1349            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1350            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1351                            TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1352        }
1353        rn = TCG_REG_TMP;
1354    }
1355    tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1356}
1357
1358typedef struct {
1359    ARMCond cond;
1360    TCGReg base;
1361    int index;
1362    bool index_scratch;
1363    TCGAtomAlign aa;
1364} HostAddress;
1365
1366bool tcg_target_has_memory_bswap(MemOp memop)
1367{
1368    return false;
1369}
1370
1371static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
1372{
1373    /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */
1374    return TCG_REG_R14;
1375}
1376
1377static const TCGLdstHelperParam ldst_helper_param = {
1378    .ra_gen = ldst_ra_gen,
1379    .ntmp = 1,
1380    .tmp = { TCG_REG_TMP },
1381};
1382
1383static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1384{
1385    MemOp opc = get_memop(lb->oi);
1386
1387    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1388        return false;
1389    }
1390
1391    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1392    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1393    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1394
1395    tcg_out_goto(s, COND_AL, lb->raddr);
1396    return true;
1397}
1398
1399static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1400{
1401    MemOp opc = get_memop(lb->oi);
1402
1403    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1404        return false;
1405    }
1406
1407    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1408
1409    /* Tail-call to the helper, which will return to the fast path.  */
1410    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1411    return true;
1412}
1413
1414/* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
1415#define MIN_TLB_MASK_TABLE_OFS  -256
1416
1417static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1418                                           TCGReg addrlo, TCGReg addrhi,
1419                                           MemOpIdx oi, bool is_ld)
1420{
1421    TCGLabelQemuLdst *ldst = NULL;
1422    MemOp opc = get_memop(oi);
1423    unsigned a_mask;
1424
1425    if (tcg_use_softmmu) {
1426        *h = (HostAddress){
1427            .cond = COND_AL,
1428            .base = addrlo,
1429            .index = TCG_REG_R1,
1430            .index_scratch = true,
1431        };
1432    } else {
1433        *h = (HostAddress){
1434            .cond = COND_AL,
1435            .base = addrlo,
1436            .index = guest_base ? TCG_REG_GUEST_BASE : -1,
1437            .index_scratch = false,
1438        };
1439    }
1440
1441    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1442    a_mask = (1 << h->aa.align) - 1;
1443
1444    if (tcg_use_softmmu) {
1445        int mem_index = get_mmuidx(oi);
1446        int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1447                            : offsetof(CPUTLBEntry, addr_write);
1448        int fast_off = tlb_mask_table_ofs(s, mem_index);
1449        unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
1450        TCGReg t_addr;
1451
1452        ldst = new_ldst_label(s);
1453        ldst->is_ld = is_ld;
1454        ldst->oi = oi;
1455        ldst->addrlo_reg = addrlo;
1456        ldst->addrhi_reg = addrhi;
1457
1458        /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}.  */
1459        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1460        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1461        tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1462
1463        /* Extract the tlb index from the address into R0.  */
1464        tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1465                        SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
1466
1467        /*
1468         * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1469         * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1470         */
1471        QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1472        if (cmp_off == 0) {
1473            if (s->addr_type == TCG_TYPE_I32) {
1474                tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2,
1475                                 TCG_REG_R1, TCG_REG_R0);
1476            } else {
1477                tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2,
1478                                 TCG_REG_R1, TCG_REG_R0);
1479            }
1480        } else {
1481            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1482                            TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1483            if (s->addr_type == TCG_TYPE_I32) {
1484                tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1485            } else {
1486                tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1487            }
1488        }
1489
1490        /* Load the tlb addend.  */
1491        tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1492                        offsetof(CPUTLBEntry, addend));
1493
1494        /*
1495         * Check alignment, check comparators.
1496         * Do this in 2-4 insns.  Use MOVW for v7, if possible,
1497         * to reduce the number of sequential conditional instructions.
1498         * Almost all guests have at least 4k pages, which means that we need
1499         * to clear at least 9 bits even for an 8-byte memory, which means it
1500         * isn't worth checking for an immediate operand for BIC.
1501         *
1502         * For unaligned accesses, test the page of the last unit of alignment.
1503         * This leaves the least significant alignment bits unchanged, and of
1504         * course must be zero.
1505         */
1506        t_addr = addrlo;
1507        if (a_mask < s_mask) {
1508            t_addr = TCG_REG_R0;
1509            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
1510                            addrlo, s_mask - a_mask);
1511        }
1512        if (use_armv7_instructions && s->page_bits <= 16) {
1513            tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
1514            tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1515                            t_addr, TCG_REG_TMP, 0);
1516            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1517                            TCG_REG_R2, TCG_REG_TMP, 0);
1518        } else {
1519            if (a_mask) {
1520                tcg_debug_assert(a_mask <= 0xff);
1521                tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1522            }
1523            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
1524                            SHIFT_IMM_LSR(s->page_bits));
1525            tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
1526                            0, TCG_REG_R2, TCG_REG_TMP,
1527                            SHIFT_IMM_LSL(s->page_bits));
1528        }
1529
1530        if (s->addr_type != TCG_TYPE_I32) {
1531            tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1532        }
1533    } else if (a_mask) {
1534        ldst = new_ldst_label(s);
1535        ldst->is_ld = is_ld;
1536        ldst->oi = oi;
1537        ldst->addrlo_reg = addrlo;
1538        ldst->addrhi_reg = addrhi;
1539
1540        /* We are expecting alignment to max out at 7 */
1541        tcg_debug_assert(a_mask <= 0xff);
1542        /* tst addr, #mask */
1543        tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1544    }
1545
1546    return ldst;
1547}
1548
1549static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1550                                   TCGReg datahi, HostAddress h)
1551{
1552    TCGReg base;
1553
1554    /* Byte swapping is left to middle-end expansion. */
1555    tcg_debug_assert((opc & MO_BSWAP) == 0);
1556
1557    switch (opc & MO_SSIZE) {
1558    case MO_UB:
1559        if (h.index < 0) {
1560            tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
1561        } else {
1562            tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
1563        }
1564        break;
1565    case MO_SB:
1566        if (h.index < 0) {
1567            tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
1568        } else {
1569            tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
1570        }
1571        break;
1572    case MO_UW:
1573        if (h.index < 0) {
1574            tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
1575        } else {
1576            tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
1577        }
1578        break;
1579    case MO_SW:
1580        if (h.index < 0) {
1581            tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
1582        } else {
1583            tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
1584        }
1585        break;
1586    case MO_UL:
1587        if (h.index < 0) {
1588            tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
1589        } else {
1590            tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
1591        }
1592        break;
1593    case MO_UQ:
1594        /* We used pair allocation for datalo, so already should be aligned. */
1595        tcg_debug_assert((datalo & 1) == 0);
1596        tcg_debug_assert(datahi == datalo + 1);
1597        /* LDRD requires alignment; double-check that. */
1598        if (memop_alignment_bits(opc) >= MO_64) {
1599            if (h.index < 0) {
1600                tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
1601                break;
1602            }
1603            /*
1604             * Rm (the second address op) must not overlap Rt or Rt + 1.
1605             * Since datalo is aligned, we can simplify the test via alignment.
1606             * Flip the two address arguments if that works.
1607             */
1608            if ((h.index & ~1) != datalo) {
1609                tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
1610                break;
1611            }
1612            if ((h.base & ~1) != datalo) {
1613                tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
1614                break;
1615            }
1616        }
1617        if (h.index < 0) {
1618            base = h.base;
1619            if (datalo == h.base) {
1620                tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
1621                base = TCG_REG_TMP;
1622            }
1623        } else if (h.index_scratch) {
1624            tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
1625            tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
1626            break;
1627        } else {
1628            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1629                            h.base, h.index, SHIFT_IMM_LSL(0));
1630            base = TCG_REG_TMP;
1631        }
1632        tcg_out_ld32_12(s, h.cond, datalo, base, 0);
1633        tcg_out_ld32_12(s, h.cond, datahi, base, 4);
1634        break;
1635    default:
1636        g_assert_not_reached();
1637    }
1638}
1639
1640static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
1641                            TCGReg addrlo, TCGReg addrhi,
1642                            MemOpIdx oi, TCGType data_type)
1643{
1644    MemOp opc = get_memop(oi);
1645    TCGLabelQemuLdst *ldst;
1646    HostAddress h;
1647
1648    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
1649    if (ldst) {
1650        ldst->type = data_type;
1651        ldst->datalo_reg = datalo;
1652        ldst->datahi_reg = datahi;
1653
1654        /*
1655         * This a conditional BL only to load a pointer within this
1656         * opcode into LR for the slow path.  We will not be using
1657         * the value for a tail call.
1658         */
1659        ldst->label_ptr[0] = s->code_ptr;
1660        tcg_out_bl_imm(s, COND_NE, 0);
1661
1662        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1663        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1664    } else {
1665        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1666    }
1667}
1668
1669static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1670                                   TCGReg datahi, HostAddress h)
1671{
1672    /* Byte swapping is left to middle-end expansion. */
1673    tcg_debug_assert((opc & MO_BSWAP) == 0);
1674
1675    switch (opc & MO_SIZE) {
1676    case MO_8:
1677        if (h.index < 0) {
1678            tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
1679        } else {
1680            tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
1681        }
1682        break;
1683    case MO_16:
1684        if (h.index < 0) {
1685            tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
1686        } else {
1687            tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
1688        }
1689        break;
1690    case MO_32:
1691        if (h.index < 0) {
1692            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1693        } else {
1694            tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
1695        }
1696        break;
1697    case MO_64:
1698        /* We used pair allocation for datalo, so already should be aligned. */
1699        tcg_debug_assert((datalo & 1) == 0);
1700        tcg_debug_assert(datahi == datalo + 1);
1701        /* STRD requires alignment; double-check that. */
1702        if (memop_alignment_bits(opc) >= MO_64) {
1703            if (h.index < 0) {
1704                tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
1705            } else {
1706                tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
1707            }
1708        } else if (h.index < 0) {
1709            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1710            tcg_out_st32_12(s, h.cond, datahi, h.base, 4);
1711        } else if (h.index_scratch) {
1712            tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
1713            tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
1714        } else {
1715            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1716                            h.base, h.index, SHIFT_IMM_LSL(0));
1717            tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
1718            tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
1719        }
1720        break;
1721    default:
1722        g_assert_not_reached();
1723    }
1724}
1725
1726static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
1727                            TCGReg addrlo, TCGReg addrhi,
1728                            MemOpIdx oi, TCGType data_type)
1729{
1730    MemOp opc = get_memop(oi);
1731    TCGLabelQemuLdst *ldst;
1732    HostAddress h;
1733
1734    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
1735    if (ldst) {
1736        ldst->type = data_type;
1737        ldst->datalo_reg = datalo;
1738        ldst->datahi_reg = datahi;
1739
1740        h.cond = COND_EQ;
1741        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1742
1743        /* The conditional call is last, as we're going to return here. */
1744        ldst->label_ptr[0] = s->code_ptr;
1745        tcg_out_bl_imm(s, COND_NE, 0);
1746        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1747    } else {
1748        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1749    }
1750}
1751
1752static void tcg_out_epilogue(TCGContext *s);
1753
1754static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
1755{
1756    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
1757    tcg_out_epilogue(s);
1758}
1759
1760static void tcg_out_goto_tb(TCGContext *s, int which)
1761{
1762    uintptr_t i_addr;
1763    intptr_t i_disp;
1764
1765    /* Direct branch will be patched by tb_target_set_jmp_target. */
1766    set_jmp_insn_offset(s, which);
1767    tcg_out32(s, INSN_NOP);
1768
1769    /* When branch is out of range, fall through to indirect. */
1770    i_addr = get_jmp_target_addr(s, which);
1771    i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
1772    tcg_debug_assert(i_disp < 0);
1773    if (i_disp >= -0xfff) {
1774        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
1775    } else {
1776        /*
1777         * The TB is close, but outside the 12 bits addressable by
1778         * the load.  We can extend this to 20 bits with a sub of a
1779         * shifted immediate from pc.
1780         */
1781        int h = -i_disp;
1782        int l = -(h & 0xfff);
1783
1784        h = encode_imm_nofail(h + l);
1785        tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
1786        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
1787    }
1788    set_jmp_reset_offset(s, which);
1789}
1790
1791void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1792                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1793{
1794    uintptr_t addr = tb->jmp_target_addr[n];
1795    ptrdiff_t offset = addr - (jmp_rx + 8);
1796    tcg_insn_unit insn;
1797
1798    /* Either directly branch, or fall through to indirect branch. */
1799    if (offset == sextract64(offset, 0, 26)) {
1800        /* B <addr> */
1801        insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
1802    } else {
1803        insn = INSN_NOP;
1804    }
1805
1806    qatomic_set((uint32_t *)jmp_rw, insn);
1807    flush_idcache_range(jmp_rx, jmp_rw, 4);
1808}
1809
1810static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1811                       const TCGArg args[TCG_MAX_OP_ARGS],
1812                       const int const_args[TCG_MAX_OP_ARGS])
1813{
1814    TCGArg a0, a1, a2, a3, a4, a5;
1815    int c;
1816
1817    switch (opc) {
1818    case INDEX_op_goto_ptr:
1819        tcg_out_b_reg(s, COND_AL, args[0]);
1820        break;
1821    case INDEX_op_br:
1822        tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
1823        break;
1824
1825    case INDEX_op_ld8u_i32:
1826        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1827        break;
1828    case INDEX_op_ld8s_i32:
1829        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1830        break;
1831    case INDEX_op_ld16u_i32:
1832        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1833        break;
1834    case INDEX_op_ld16s_i32:
1835        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1836        break;
1837    case INDEX_op_ld_i32:
1838        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1839        break;
1840    case INDEX_op_st8_i32:
1841        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1842        break;
1843    case INDEX_op_st16_i32:
1844        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1845        break;
1846    case INDEX_op_st_i32:
1847        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1848        break;
1849
1850    case INDEX_op_movcond_i32:
1851        /* Constraints mean that v2 is always in the same register as dest,
1852         * so we only need to do "if condition passed, move v1 to dest".
1853         */
1854        c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]);
1855        tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
1856                        ARITH_MVN, args[0], 0, args[3], const_args[3]);
1857        break;
1858    case INDEX_op_add_i32:
1859        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1860                        args[0], args[1], args[2], const_args[2]);
1861        break;
1862    case INDEX_op_sub_i32:
1863        if (const_args[1]) {
1864            if (const_args[2]) {
1865                tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1866            } else {
1867                tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1868                               args[0], args[2], args[1], 1);
1869            }
1870        } else {
1871            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1872                            args[0], args[1], args[2], const_args[2]);
1873        }
1874        break;
1875    case INDEX_op_and_i32:
1876        tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1877                        args[0], args[1], args[2], const_args[2]);
1878        break;
1879    case INDEX_op_andc_i32:
1880        tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1881                        args[0], args[1], args[2], const_args[2]);
1882        break;
1883    case INDEX_op_or_i32:
1884        c = ARITH_ORR;
1885        goto gen_arith;
1886    case INDEX_op_xor_i32:
1887        c = ARITH_EOR;
1888        /* Fall through.  */
1889    gen_arith:
1890        tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1891        break;
1892    case INDEX_op_add2_i32:
1893        a0 = args[0], a1 = args[1], a2 = args[2];
1894        a3 = args[3], a4 = args[4], a5 = args[5];
1895        if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1896            a0 = TCG_REG_TMP;
1897        }
1898        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1899                        a0, a2, a4, const_args[4]);
1900        tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1901                        a1, a3, a5, const_args[5]);
1902        tcg_out_mov_reg(s, COND_AL, args[0], a0);
1903        break;
1904    case INDEX_op_sub2_i32:
1905        a0 = args[0], a1 = args[1], a2 = args[2];
1906        a3 = args[3], a4 = args[4], a5 = args[5];
1907        if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1908            a0 = TCG_REG_TMP;
1909        }
1910        if (const_args[2]) {
1911            if (const_args[4]) {
1912                tcg_out_movi32(s, COND_AL, a0, a4);
1913                a4 = a0;
1914            }
1915            tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1916        } else {
1917            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1918                            ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1919        }
1920        if (const_args[3]) {
1921            if (const_args[5]) {
1922                tcg_out_movi32(s, COND_AL, a1, a5);
1923                a5 = a1;
1924            }
1925            tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1926        } else {
1927            tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1928                            a1, a3, a5, const_args[5]);
1929        }
1930        tcg_out_mov_reg(s, COND_AL, args[0], a0);
1931        break;
1932    case INDEX_op_neg_i32:
1933        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1934        break;
1935    case INDEX_op_not_i32:
1936        tcg_out_dat_reg(s, COND_AL,
1937                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1938        break;
1939    case INDEX_op_mul_i32:
1940        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1941        break;
1942    case INDEX_op_mulu2_i32:
1943        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1944        break;
1945    case INDEX_op_muls2_i32:
1946        tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1947        break;
1948    /* XXX: Perhaps args[2] & 0x1f is wrong */
1949    case INDEX_op_shl_i32:
1950        c = const_args[2] ?
1951                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1952        goto gen_shift32;
1953    case INDEX_op_shr_i32:
1954        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1955                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1956        goto gen_shift32;
1957    case INDEX_op_sar_i32:
1958        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1959                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1960        goto gen_shift32;
1961    case INDEX_op_rotr_i32:
1962        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1963                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1964        /* Fall through.  */
1965    gen_shift32:
1966        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1967        break;
1968
1969    case INDEX_op_rotl_i32:
1970        if (const_args[2]) {
1971            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1972                            ((0x20 - args[2]) & 0x1f) ?
1973                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1974                            SHIFT_IMM_LSL(0));
1975        } else {
1976            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
1977            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1978                            SHIFT_REG_ROR(TCG_REG_TMP));
1979        }
1980        break;
1981
1982    case INDEX_op_ctz_i32:
1983        tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
1984        a1 = TCG_REG_TMP;
1985        goto do_clz;
1986
1987    case INDEX_op_clz_i32:
1988        a1 = args[1];
1989    do_clz:
1990        a0 = args[0];
1991        a2 = args[2];
1992        c = const_args[2];
1993        if (c && a2 == 32) {
1994            tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
1995            break;
1996        }
1997        tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1998        tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1999        if (c || a0 != a2) {
2000            tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
2001        }
2002        break;
2003
2004    case INDEX_op_brcond_i32:
2005        c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
2006        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
2007        break;
2008    case INDEX_op_setcond_i32:
2009        c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
2010        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
2011                        ARITH_MOV, args[0], 0, 1);
2012        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2013                        ARITH_MOV, args[0], 0, 0);
2014        break;
2015    case INDEX_op_negsetcond_i32:
2016        c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
2017        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
2018                        ARITH_MVN, args[0], 0, 0);
2019        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2020                        ARITH_MOV, args[0], 0, 0);
2021        break;
2022
2023    case INDEX_op_brcond2_i32:
2024        c = tcg_out_cmp2(s, args, const_args);
2025        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2026        break;
2027    case INDEX_op_setcond2_i32:
2028        c = tcg_out_cmp2(s, args + 1, const_args + 1);
2029        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2030        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2031                        ARITH_MOV, args[0], 0, 0);
2032        break;
2033
2034    case INDEX_op_qemu_ld_a32_i32:
2035        tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
2036        break;
2037    case INDEX_op_qemu_ld_a64_i32:
2038        tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
2039                        args[3], TCG_TYPE_I32);
2040        break;
2041    case INDEX_op_qemu_ld_a32_i64:
2042        tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
2043                        args[3], TCG_TYPE_I64);
2044        break;
2045    case INDEX_op_qemu_ld_a64_i64:
2046        tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
2047                        args[4], TCG_TYPE_I64);
2048        break;
2049
2050    case INDEX_op_qemu_st_a32_i32:
2051        tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
2052        break;
2053    case INDEX_op_qemu_st_a64_i32:
2054        tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
2055                        args[3], TCG_TYPE_I32);
2056        break;
2057    case INDEX_op_qemu_st_a32_i64:
2058        tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
2059                        args[3], TCG_TYPE_I64);
2060        break;
2061    case INDEX_op_qemu_st_a64_i64:
2062        tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
2063                        args[4], TCG_TYPE_I64);
2064        break;
2065
2066    case INDEX_op_bswap16_i32:
2067        tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2068        break;
2069    case INDEX_op_bswap32_i32:
2070        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2071        break;
2072
2073    case INDEX_op_deposit_i32:
2074        tcg_out_deposit(s, COND_AL, args[0], args[2],
2075                        args[3], args[4], const_args[2]);
2076        break;
2077    case INDEX_op_extract_i32:
2078        tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2079        break;
2080    case INDEX_op_sextract_i32:
2081        tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2082        break;
2083    case INDEX_op_extract2_i32:
2084        /* ??? These optimization vs zero should be generic.  */
2085        /* ??? But we can't substitute 2 for 1 in the opcode stream yet.  */
2086        if (const_args[1]) {
2087            if (const_args[2]) {
2088                tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2089            } else {
2090                tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2091                                args[2], SHIFT_IMM_LSL(32 - args[3]));
2092            }
2093        } else if (const_args[2]) {
2094            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2095                            args[1], SHIFT_IMM_LSR(args[3]));
2096        } else {
2097            /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
2098            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2099                            args[2], SHIFT_IMM_LSL(32 - args[3]));
2100            tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2101                            args[1], SHIFT_IMM_LSR(args[3]));
2102        }
2103        break;
2104
2105    case INDEX_op_div_i32:
2106        tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2107        break;
2108    case INDEX_op_divu_i32:
2109        tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2110        break;
2111
2112    case INDEX_op_mb:
2113        tcg_out_mb(s, args[0]);
2114        break;
2115
2116    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2117    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2118    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2119    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2120    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2121    case INDEX_op_ext8u_i32:
2122    case INDEX_op_ext16s_i32:
2123    case INDEX_op_ext16u_i32:
2124    default:
2125        g_assert_not_reached();
2126    }
2127}
2128
2129static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2130{
2131    switch (op) {
2132    case INDEX_op_goto_ptr:
2133        return C_O0_I1(r);
2134
2135    case INDEX_op_ld8u_i32:
2136    case INDEX_op_ld8s_i32:
2137    case INDEX_op_ld16u_i32:
2138    case INDEX_op_ld16s_i32:
2139    case INDEX_op_ld_i32:
2140    case INDEX_op_neg_i32:
2141    case INDEX_op_not_i32:
2142    case INDEX_op_bswap16_i32:
2143    case INDEX_op_bswap32_i32:
2144    case INDEX_op_ext8s_i32:
2145    case INDEX_op_ext16s_i32:
2146    case INDEX_op_ext16u_i32:
2147    case INDEX_op_extract_i32:
2148    case INDEX_op_sextract_i32:
2149        return C_O1_I1(r, r);
2150
2151    case INDEX_op_st8_i32:
2152    case INDEX_op_st16_i32:
2153    case INDEX_op_st_i32:
2154        return C_O0_I2(r, r);
2155
2156    case INDEX_op_add_i32:
2157    case INDEX_op_sub_i32:
2158    case INDEX_op_setcond_i32:
2159    case INDEX_op_negsetcond_i32:
2160        return C_O1_I2(r, r, rIN);
2161
2162    case INDEX_op_and_i32:
2163    case INDEX_op_andc_i32:
2164    case INDEX_op_clz_i32:
2165    case INDEX_op_ctz_i32:
2166        return C_O1_I2(r, r, rIK);
2167
2168    case INDEX_op_mul_i32:
2169    case INDEX_op_div_i32:
2170    case INDEX_op_divu_i32:
2171        return C_O1_I2(r, r, r);
2172
2173    case INDEX_op_mulu2_i32:
2174    case INDEX_op_muls2_i32:
2175        return C_O2_I2(r, r, r, r);
2176
2177    case INDEX_op_or_i32:
2178    case INDEX_op_xor_i32:
2179        return C_O1_I2(r, r, rI);
2180
2181    case INDEX_op_shl_i32:
2182    case INDEX_op_shr_i32:
2183    case INDEX_op_sar_i32:
2184    case INDEX_op_rotl_i32:
2185    case INDEX_op_rotr_i32:
2186        return C_O1_I2(r, r, ri);
2187
2188    case INDEX_op_brcond_i32:
2189        return C_O0_I2(r, rIN);
2190    case INDEX_op_deposit_i32:
2191        return C_O1_I2(r, 0, rZ);
2192    case INDEX_op_extract2_i32:
2193        return C_O1_I2(r, rZ, rZ);
2194    case INDEX_op_movcond_i32:
2195        return C_O1_I4(r, r, rIN, rIK, 0);
2196    case INDEX_op_add2_i32:
2197        return C_O2_I4(r, r, r, r, rIN, rIK);
2198    case INDEX_op_sub2_i32:
2199        return C_O2_I4(r, r, rI, rI, rIN, rIK);
2200    case INDEX_op_brcond2_i32:
2201        return C_O0_I4(r, r, rI, rI);
2202    case INDEX_op_setcond2_i32:
2203        return C_O1_I4(r, r, r, rI, rI);
2204
2205    case INDEX_op_qemu_ld_a32_i32:
2206        return C_O1_I1(r, q);
2207    case INDEX_op_qemu_ld_a64_i32:
2208        return C_O1_I2(r, q, q);
2209    case INDEX_op_qemu_ld_a32_i64:
2210        return C_O2_I1(e, p, q);
2211    case INDEX_op_qemu_ld_a64_i64:
2212        return C_O2_I2(e, p, q, q);
2213    case INDEX_op_qemu_st_a32_i32:
2214        return C_O0_I2(q, q);
2215    case INDEX_op_qemu_st_a64_i32:
2216        return C_O0_I3(q, q, q);
2217    case INDEX_op_qemu_st_a32_i64:
2218        return C_O0_I3(Q, p, q);
2219    case INDEX_op_qemu_st_a64_i64:
2220        return C_O0_I4(Q, p, q, q);
2221
2222    case INDEX_op_st_vec:
2223        return C_O0_I2(w, r);
2224    case INDEX_op_ld_vec:
2225    case INDEX_op_dupm_vec:
2226        return C_O1_I1(w, r);
2227    case INDEX_op_dup_vec:
2228        return C_O1_I1(w, wr);
2229    case INDEX_op_abs_vec:
2230    case INDEX_op_neg_vec:
2231    case INDEX_op_not_vec:
2232    case INDEX_op_shli_vec:
2233    case INDEX_op_shri_vec:
2234    case INDEX_op_sari_vec:
2235        return C_O1_I1(w, w);
2236    case INDEX_op_dup2_vec:
2237    case INDEX_op_add_vec:
2238    case INDEX_op_mul_vec:
2239    case INDEX_op_smax_vec:
2240    case INDEX_op_smin_vec:
2241    case INDEX_op_ssadd_vec:
2242    case INDEX_op_sssub_vec:
2243    case INDEX_op_sub_vec:
2244    case INDEX_op_umax_vec:
2245    case INDEX_op_umin_vec:
2246    case INDEX_op_usadd_vec:
2247    case INDEX_op_ussub_vec:
2248    case INDEX_op_xor_vec:
2249    case INDEX_op_arm_sshl_vec:
2250    case INDEX_op_arm_ushl_vec:
2251        return C_O1_I2(w, w, w);
2252    case INDEX_op_arm_sli_vec:
2253        return C_O1_I2(w, 0, w);
2254    case INDEX_op_or_vec:
2255    case INDEX_op_andc_vec:
2256        return C_O1_I2(w, w, wO);
2257    case INDEX_op_and_vec:
2258    case INDEX_op_orc_vec:
2259        return C_O1_I2(w, w, wV);
2260    case INDEX_op_cmp_vec:
2261        return C_O1_I2(w, w, wZ);
2262    case INDEX_op_bitsel_vec:
2263        return C_O1_I3(w, w, w, w);
2264    default:
2265        g_assert_not_reached();
2266    }
2267}
2268
2269static void tcg_target_init(TCGContext *s)
2270{
2271    /*
2272     * Only probe for the platform and capabilities if we haven't already
2273     * determined maximum values at compile time.
2274     */
2275#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2276    {
2277        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2278#ifndef use_idiv_instructions
2279        use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2280#endif
2281#ifndef use_neon_instructions
2282        use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2283#endif
2284    }
2285#endif
2286
2287    if (__ARM_ARCH < 7) {
2288        const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2289        if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2290            arm_arch = pl[1] - '0';
2291        }
2292
2293        if (arm_arch < 6) {
2294            error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
2295            exit(EXIT_FAILURE);
2296        }
2297    }
2298
2299    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2300
2301    tcg_target_call_clobber_regs = 0;
2302    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2303    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2304    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2305    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2306    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2307    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2308
2309    if (use_neon_instructions) {
2310        tcg_target_available_regs[TCG_TYPE_V64]  = ALL_VECTOR_REGS;
2311        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2312
2313        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2314        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2315        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2316        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2317        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2318        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2319        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2320        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2321        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2322        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2323        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2324        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2325    }
2326
2327    s->reserved_regs = 0;
2328    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2329    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2330    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2331    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2332}
2333
2334static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2335                       TCGReg arg1, intptr_t arg2)
2336{
2337    switch (type) {
2338    case TCG_TYPE_I32:
2339        tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2340        return;
2341    case TCG_TYPE_V64:
2342        /* regs 1; size 8; align 8 */
2343        tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2344        return;
2345    case TCG_TYPE_V128:
2346        /*
2347         * We have only 8-byte alignment for the stack per the ABI.
2348         * Rather than dynamically re-align the stack, it's easier
2349         * to simply not request alignment beyond that.  So:
2350         * regs 2; size 8; align 8
2351         */
2352        tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
2353        return;
2354    default:
2355        g_assert_not_reached();
2356    }
2357}
2358
2359static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2360                       TCGReg arg1, intptr_t arg2)
2361{
2362    switch (type) {
2363    case TCG_TYPE_I32:
2364        tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2365        return;
2366    case TCG_TYPE_V64:
2367        /* regs 1; size 8; align 8 */
2368        tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2369        return;
2370    case TCG_TYPE_V128:
2371        /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
2372        tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
2373        return;
2374    default:
2375        g_assert_not_reached();
2376    }
2377}
2378
2379static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2380                        TCGReg base, intptr_t ofs)
2381{
2382    return false;
2383}
2384
2385static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2386{
2387    if (ret == arg) {
2388        return true;
2389    }
2390    switch (type) {
2391    case TCG_TYPE_I32:
2392        if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2393            tcg_out_mov_reg(s, COND_AL, ret, arg);
2394            return true;
2395        }
2396        return false;
2397
2398    case TCG_TYPE_V64:
2399    case TCG_TYPE_V128:
2400        /* "VMOV D,N" is an alias for "VORR D,N,N". */
2401        tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2402        return true;
2403
2404    default:
2405        g_assert_not_reached();
2406    }
2407}
2408
2409static void tcg_out_movi(TCGContext *s, TCGType type,
2410                         TCGReg ret, tcg_target_long arg)
2411{
2412    tcg_debug_assert(type == TCG_TYPE_I32);
2413    tcg_debug_assert(ret < TCG_REG_Q0);
2414    tcg_out_movi32(s, COND_AL, ret, arg);
2415}
2416
2417static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
2418{
2419    return false;
2420}
2421
2422static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
2423                             tcg_target_long imm)
2424{
2425    int enc, opc = ARITH_ADD;
2426
2427    /* All of the easiest immediates to encode are positive. */
2428    if (imm < 0) {
2429        imm = -imm;
2430        opc = ARITH_SUB;
2431    }
2432    enc = encode_imm(imm);
2433    if (enc >= 0) {
2434        tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
2435    } else {
2436        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
2437        tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
2438                        TCG_REG_TMP, SHIFT_IMM_LSL(0));
2439    }
2440}
2441
2442/* Type is always V128, with I64 elements.  */
2443static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2444{
2445    /* Move high element into place first. */
2446    /* VMOV Dd+1, Ds */
2447    tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2448    /* Move low element into place; tcg_out_mov will check for nop. */
2449    tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2450}
2451
2452static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2453                            TCGReg rd, TCGReg rs)
2454{
2455    int q = type - TCG_TYPE_V64;
2456
2457    if (vece == MO_64) {
2458        if (type == TCG_TYPE_V128) {
2459            tcg_out_dup2_vec(s, rd, rs, rs);
2460        } else {
2461            tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2462        }
2463    } else if (rs < TCG_REG_Q0) {
2464        int b = (vece == MO_8);
2465        int e = (vece == MO_16);
2466        tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2467                  encode_vn(rd) | (rs << 12));
2468    } else {
2469        int imm4 = 1 << vece;
2470        tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2471                  encode_vd(rd) | encode_vm(rs));
2472    }
2473    return true;
2474}
2475
2476static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2477                             TCGReg rd, TCGReg base, intptr_t offset)
2478{
2479    if (vece == MO_64) {
2480        tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2481        if (type == TCG_TYPE_V128) {
2482            tcg_out_dup2_vec(s, rd, rd, rd);
2483        }
2484    } else {
2485        int q = type - TCG_TYPE_V64;
2486        tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2487                      rd, base, offset);
2488    }
2489    return true;
2490}
2491
2492static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2493                             TCGReg rd, int64_t v64)
2494{
2495    int q = type - TCG_TYPE_V64;
2496    int cmode, imm8, i;
2497
2498    /* Test all bytes equal first.  */
2499    if (vece == MO_8) {
2500        tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2501        return;
2502    }
2503
2504    /*
2505     * Test all bytes 0x00 or 0xff second.  This can match cases that
2506     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2507     */
2508    for (i = imm8 = 0; i < 8; i++) {
2509        uint8_t byte = v64 >> (i * 8);
2510        if (byte == 0xff) {
2511            imm8 |= 1 << i;
2512        } else if (byte != 0) {
2513            goto fail_bytes;
2514        }
2515    }
2516    tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2517    return;
2518 fail_bytes:
2519
2520    /*
2521     * Tests for various replications.  For each element width, if we
2522     * cannot find an expansion there's no point checking a larger
2523     * width because we already know by replication it cannot match.
2524     */
2525    if (vece == MO_16) {
2526        uint16_t v16 = v64;
2527
2528        if (is_shimm16(v16, &cmode, &imm8)) {
2529            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2530            return;
2531        }
2532        if (is_shimm16(~v16, &cmode, &imm8)) {
2533            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2534            return;
2535        }
2536
2537        /*
2538         * Otherwise, all remaining constants can be loaded in two insns:
2539         * rd = v16 & 0xff, rd |= v16 & 0xff00.
2540         */
2541        tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2542        tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8);   /* VORRI */
2543        return;
2544    }
2545
2546    if (vece == MO_32) {
2547        uint32_t v32 = v64;
2548
2549        if (is_shimm32(v32, &cmode, &imm8) ||
2550            is_soimm32(v32, &cmode, &imm8)) {
2551            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2552            return;
2553        }
2554        if (is_shimm32(~v32, &cmode, &imm8) ||
2555            is_soimm32(~v32, &cmode, &imm8)) {
2556            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2557            return;
2558        }
2559
2560        /*
2561         * Restrict the set of constants to those we can load with
2562         * two instructions.  Others we load from the pool.
2563         */
2564        i = is_shimm32_pair(v32, &cmode, &imm8);
2565        if (i) {
2566            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2567            tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2568            return;
2569        }
2570        i = is_shimm32_pair(~v32, &cmode, &imm8);
2571        if (i) {
2572            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2573            tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2574            return;
2575        }
2576    }
2577
2578    /*
2579     * As a last resort, load from the constant pool.
2580     */
2581    if (!q || vece == MO_64) {
2582        new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2583        /* VLDR Dd, [pc + offset] */
2584        tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2585        if (q) {
2586            tcg_out_dup2_vec(s, rd, rd, rd);
2587        }
2588    } else {
2589        new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2590        /* add tmp, pc, offset */
2591        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2592        tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2593    }
2594}
2595
2596static const ARMInsn vec_cmp_insn[16] = {
2597    [TCG_COND_EQ] = INSN_VCEQ,
2598    [TCG_COND_GT] = INSN_VCGT,
2599    [TCG_COND_GE] = INSN_VCGE,
2600    [TCG_COND_GTU] = INSN_VCGT_U,
2601    [TCG_COND_GEU] = INSN_VCGE_U,
2602};
2603
2604static const ARMInsn vec_cmp0_insn[16] = {
2605    [TCG_COND_EQ] = INSN_VCEQ0,
2606    [TCG_COND_GT] = INSN_VCGT0,
2607    [TCG_COND_GE] = INSN_VCGE0,
2608    [TCG_COND_LT] = INSN_VCLT0,
2609    [TCG_COND_LE] = INSN_VCLE0,
2610};
2611
2612static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2613                           unsigned vecl, unsigned vece,
2614                           const TCGArg args[TCG_MAX_OP_ARGS],
2615                           const int const_args[TCG_MAX_OP_ARGS])
2616{
2617    TCGType type = vecl + TCG_TYPE_V64;
2618    unsigned q = vecl;
2619    TCGArg a0, a1, a2, a3;
2620    int cmode, imm8;
2621
2622    a0 = args[0];
2623    a1 = args[1];
2624    a2 = args[2];
2625
2626    switch (opc) {
2627    case INDEX_op_ld_vec:
2628        tcg_out_ld(s, type, a0, a1, a2);
2629        return;
2630    case INDEX_op_st_vec:
2631        tcg_out_st(s, type, a0, a1, a2);
2632        return;
2633    case INDEX_op_dupm_vec:
2634        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2635        return;
2636    case INDEX_op_dup2_vec:
2637        tcg_out_dup2_vec(s, a0, a1, a2);
2638        return;
2639    case INDEX_op_abs_vec:
2640        tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2641        return;
2642    case INDEX_op_neg_vec:
2643        tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2644        return;
2645    case INDEX_op_not_vec:
2646        tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2647        return;
2648    case INDEX_op_add_vec:
2649        tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2650        return;
2651    case INDEX_op_mul_vec:
2652        tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2653        return;
2654    case INDEX_op_smax_vec:
2655        tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2656        return;
2657    case INDEX_op_smin_vec:
2658        tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2659        return;
2660    case INDEX_op_sub_vec:
2661        tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2662        return;
2663    case INDEX_op_ssadd_vec:
2664        tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2665        return;
2666    case INDEX_op_sssub_vec:
2667        tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2668        return;
2669    case INDEX_op_umax_vec:
2670        tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2671        return;
2672    case INDEX_op_umin_vec:
2673        tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2674        return;
2675    case INDEX_op_usadd_vec:
2676        tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2677        return;
2678    case INDEX_op_ussub_vec:
2679        tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2680        return;
2681    case INDEX_op_xor_vec:
2682        tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2683        return;
2684    case INDEX_op_arm_sshl_vec:
2685        /*
2686         * Note that Vm is the data and Vn is the shift count,
2687         * therefore the arguments appear reversed.
2688         */
2689        tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2690        return;
2691    case INDEX_op_arm_ushl_vec:
2692        /* See above. */
2693        tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2694        return;
2695    case INDEX_op_shli_vec:
2696        tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2697        return;
2698    case INDEX_op_shri_vec:
2699        tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2700        return;
2701    case INDEX_op_sari_vec:
2702        tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2703        return;
2704    case INDEX_op_arm_sli_vec:
2705        tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2706        return;
2707
2708    case INDEX_op_andc_vec:
2709        if (!const_args[2]) {
2710            tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2711            return;
2712        }
2713        a2 = ~a2;
2714        /* fall through */
2715    case INDEX_op_and_vec:
2716        if (const_args[2]) {
2717            is_shimm1632(~a2, &cmode, &imm8);
2718            if (a0 == a1) {
2719                tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2720                return;
2721            }
2722            tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2723            a2 = a0;
2724        }
2725        tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2726        return;
2727
2728    case INDEX_op_orc_vec:
2729        if (!const_args[2]) {
2730            tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2731            return;
2732        }
2733        a2 = ~a2;
2734        /* fall through */
2735    case INDEX_op_or_vec:
2736        if (const_args[2]) {
2737            is_shimm1632(a2, &cmode, &imm8);
2738            if (a0 == a1) {
2739                tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2740                return;
2741            }
2742            tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2743            a2 = a0;
2744        }
2745        tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2746        return;
2747
2748    case INDEX_op_cmp_vec:
2749        {
2750            TCGCond cond = args[3];
2751            ARMInsn insn;
2752
2753            switch (cond) {
2754            case TCG_COND_NE:
2755                if (const_args[2]) {
2756                    tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2757                } else {
2758                    tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2759                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2760                }
2761                break;
2762
2763            case TCG_COND_TSTNE:
2764            case TCG_COND_TSTEQ:
2765                if (const_args[2]) {
2766                    /* (x & 0) == 0 */
2767                    tcg_out_dupi_vec(s, type, MO_8, a0,
2768                                     -(cond == TCG_COND_TSTEQ));
2769                    break;
2770                }
2771                tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a2);
2772                if (cond == TCG_COND_TSTEQ) {
2773                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2774                }
2775                break;
2776
2777            default:
2778                if (const_args[2]) {
2779                    insn = vec_cmp0_insn[cond];
2780                    if (insn) {
2781                        tcg_out_vreg2(s, insn, q, vece, a0, a1);
2782                        return;
2783                    }
2784                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2785                    a2 = TCG_VEC_TMP;
2786                }
2787                insn = vec_cmp_insn[cond];
2788                if (insn == 0) {
2789                    TCGArg t;
2790                    t = a1, a1 = a2, a2 = t;
2791                    cond = tcg_swap_cond(cond);
2792                    insn = vec_cmp_insn[cond];
2793                    tcg_debug_assert(insn != 0);
2794                }
2795                tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2796                break;
2797            }
2798        }
2799        return;
2800
2801    case INDEX_op_bitsel_vec:
2802        a3 = args[3];
2803        if (a0 == a3) {
2804            tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2805        } else if (a0 == a2) {
2806            tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2807        } else {
2808            tcg_out_mov(s, type, a0, a1);
2809            tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2810        }
2811        return;
2812
2813    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
2814    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
2815    default:
2816        g_assert_not_reached();
2817    }
2818}
2819
2820int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2821{
2822    switch (opc) {
2823    case INDEX_op_add_vec:
2824    case INDEX_op_sub_vec:
2825    case INDEX_op_and_vec:
2826    case INDEX_op_andc_vec:
2827    case INDEX_op_or_vec:
2828    case INDEX_op_orc_vec:
2829    case INDEX_op_xor_vec:
2830    case INDEX_op_not_vec:
2831    case INDEX_op_shli_vec:
2832    case INDEX_op_shri_vec:
2833    case INDEX_op_sari_vec:
2834    case INDEX_op_ssadd_vec:
2835    case INDEX_op_sssub_vec:
2836    case INDEX_op_usadd_vec:
2837    case INDEX_op_ussub_vec:
2838    case INDEX_op_bitsel_vec:
2839        return 1;
2840    case INDEX_op_abs_vec:
2841    case INDEX_op_cmp_vec:
2842    case INDEX_op_mul_vec:
2843    case INDEX_op_neg_vec:
2844    case INDEX_op_smax_vec:
2845    case INDEX_op_smin_vec:
2846    case INDEX_op_umax_vec:
2847    case INDEX_op_umin_vec:
2848        return vece < MO_64;
2849    case INDEX_op_shlv_vec:
2850    case INDEX_op_shrv_vec:
2851    case INDEX_op_sarv_vec:
2852    case INDEX_op_rotli_vec:
2853    case INDEX_op_rotlv_vec:
2854    case INDEX_op_rotrv_vec:
2855        return -1;
2856    default:
2857        return 0;
2858    }
2859}
2860
2861void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2862                       TCGArg a0, ...)
2863{
2864    va_list va;
2865    TCGv_vec v0, v1, v2, t1, t2, c1;
2866    TCGArg a2;
2867
2868    va_start(va, a0);
2869    v0 = temp_tcgv_vec(arg_temp(a0));
2870    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2871    a2 = va_arg(va, TCGArg);
2872    va_end(va);
2873
2874    switch (opc) {
2875    case INDEX_op_shlv_vec:
2876        /*
2877         * Merely propagate shlv_vec to arm_ushl_vec.
2878         * In this way we don't set TCG_TARGET_HAS_shv_vec
2879         * because everything is done via expansion.
2880         */
2881        v2 = temp_tcgv_vec(arg_temp(a2));
2882        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2883                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2884        break;
2885
2886    case INDEX_op_shrv_vec:
2887    case INDEX_op_sarv_vec:
2888        /* Right shifts are negative left shifts for NEON.  */
2889        v2 = temp_tcgv_vec(arg_temp(a2));
2890        t1 = tcg_temp_new_vec(type);
2891        tcg_gen_neg_vec(vece, t1, v2);
2892        if (opc == INDEX_op_shrv_vec) {
2893            opc = INDEX_op_arm_ushl_vec;
2894        } else {
2895            opc = INDEX_op_arm_sshl_vec;
2896        }
2897        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
2898                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2899        tcg_temp_free_vec(t1);
2900        break;
2901
2902    case INDEX_op_rotli_vec:
2903        t1 = tcg_temp_new_vec(type);
2904        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
2905        vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
2906                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
2907        tcg_temp_free_vec(t1);
2908        break;
2909
2910    case INDEX_op_rotlv_vec:
2911        v2 = temp_tcgv_vec(arg_temp(a2));
2912        t1 = tcg_temp_new_vec(type);
2913        c1 = tcg_constant_vec(type, vece, 8 << vece);
2914        tcg_gen_sub_vec(vece, t1, v2, c1);
2915        /* Right shifts are negative left shifts for NEON.  */
2916        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
2917                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2918        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2919                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2920        tcg_gen_or_vec(vece, v0, v0, t1);
2921        tcg_temp_free_vec(t1);
2922        break;
2923
2924    case INDEX_op_rotrv_vec:
2925        v2 = temp_tcgv_vec(arg_temp(a2));
2926        t1 = tcg_temp_new_vec(type);
2927        t2 = tcg_temp_new_vec(type);
2928        c1 = tcg_constant_vec(type, vece, 8 << vece);
2929        tcg_gen_neg_vec(vece, t1, v2);
2930        tcg_gen_sub_vec(vece, t2, c1, v2);
2931        /* Right shifts are negative left shifts for NEON.  */
2932        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
2933                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2934        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
2935                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
2936        tcg_gen_or_vec(vece, v0, t1, t2);
2937        tcg_temp_free_vec(t1);
2938        tcg_temp_free_vec(t2);
2939        break;
2940
2941    default:
2942        g_assert_not_reached();
2943    }
2944}
2945
2946static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2947{
2948    int i;
2949    for (i = 0; i < count; ++i) {
2950        p[i] = INSN_NOP;
2951    }
2952}
2953
2954/* Compute frame size via macros, to share between tcg_target_qemu_prologue
2955   and tcg_register_jit.  */
2956
2957#define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2958
2959#define FRAME_SIZE \
2960    ((PUSH_SIZE \
2961      + TCG_STATIC_CALL_ARGS_SIZE \
2962      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2963      + TCG_TARGET_STACK_ALIGN - 1) \
2964     & -TCG_TARGET_STACK_ALIGN)
2965
2966#define STACK_ADDEND  (FRAME_SIZE - PUSH_SIZE)
2967
2968static void tcg_target_qemu_prologue(TCGContext *s)
2969{
2970    /* Calling convention requires us to save r4-r11 and lr.  */
2971    /* stmdb sp!, { r4 - r11, lr } */
2972    tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
2973                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
2974                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
2975                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
2976
2977    /* Reserve callee argument and tcg temp space.  */
2978    tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2979                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2980    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2981                  CPU_TEMP_BUF_NLONGS * sizeof(long));
2982
2983    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2984
2985    if (!tcg_use_softmmu && guest_base) {
2986        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
2987        tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
2988    }
2989
2990    tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
2991
2992    /*
2993     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2994     * and fall through to the rest of the epilogue.
2995     */
2996    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2997    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
2998    tcg_out_epilogue(s);
2999}
3000
3001static void tcg_out_epilogue(TCGContext *s)
3002{
3003    /* Release local stack frame.  */
3004    tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
3005                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3006
3007    /* ldmia sp!, { r4 - r11, pc } */
3008    tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
3009                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3010                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3011                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
3012}
3013
3014static void tcg_out_tb_start(TCGContext *s)
3015{
3016    /* nothing to do */
3017}
3018
3019typedef struct {
3020    DebugFrameHeader h;
3021    uint8_t fde_def_cfa[4];
3022    uint8_t fde_reg_ofs[18];
3023} DebugFrame;
3024
3025#define ELF_HOST_MACHINE EM_ARM
3026
3027/* We're expecting a 2 byte uleb128 encoded value.  */
3028QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3029
3030static const DebugFrame debug_frame = {
3031    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3032    .h.cie.id = -1,
3033    .h.cie.version = 1,
3034    .h.cie.code_align = 1,
3035    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
3036    .h.cie.return_column = 14,
3037
3038    /* Total FDE size does not include the "len" member.  */
3039    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3040
3041    .fde_def_cfa = {
3042        12, 13,                         /* DW_CFA_def_cfa sp, ... */
3043        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3044        (FRAME_SIZE >> 7)
3045    },
3046    .fde_reg_ofs = {
3047        /* The following must match the stmdb in the prologue.  */
3048        0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
3049        0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
3050        0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
3051        0x89, 4,                        /* DW_CFA_offset, r9, -16 */
3052        0x88, 5,                        /* DW_CFA_offset, r8, -20 */
3053        0x87, 6,                        /* DW_CFA_offset, r7, -24 */
3054        0x86, 7,                        /* DW_CFA_offset, r6, -28 */
3055        0x85, 8,                        /* DW_CFA_offset, r5, -32 */
3056        0x84, 9,                        /* DW_CFA_offset, r4, -36 */
3057    }
3058};
3059
3060void tcg_register_jit(const void *buf, size_t buf_size)
3061{
3062    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3063}
3064