xref: /openbmc/qemu/tcg/arm/tcg-target.c.inc (revision bfe964809bf6ce951b2e674929d7b730c754e298)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26
27int arm_arch = __ARM_ARCH;
28
29#ifndef use_idiv_instructions
30bool use_idiv_instructions;
31#endif
32#ifndef use_neon_instructions
33bool use_neon_instructions;
34#endif
35
36/* Used for function call generation. */
37#define TCG_TARGET_STACK_ALIGN          8
38#define TCG_TARGET_CALL_STACK_OFFSET    0
39#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_NORMAL
40#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_EVEN
41#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_EVEN
42#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_BY_REF
43
44#ifdef CONFIG_DEBUG_TCG
45static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
46    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
47    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%sp",  "%r14", "%pc",
48    "%q0",  "%q1",  "%q2",  "%q3",  "%q4",  "%q5",  "%q6",  "%q7",
49    "%q8",  "%q9",  "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
50};
51#endif
52
53static const int tcg_target_reg_alloc_order[] = {
54    TCG_REG_R4,
55    TCG_REG_R5,
56    TCG_REG_R6,
57    TCG_REG_R7,
58    TCG_REG_R8,
59    TCG_REG_R9,
60    TCG_REG_R10,
61    TCG_REG_R11,
62    TCG_REG_R13,
63    TCG_REG_R0,
64    TCG_REG_R1,
65    TCG_REG_R2,
66    TCG_REG_R3,
67    TCG_REG_R12,
68    TCG_REG_R14,
69
70    TCG_REG_Q0,
71    TCG_REG_Q1,
72    TCG_REG_Q2,
73    TCG_REG_Q3,
74    /* Q4 - Q7 are call-saved, and skipped. */
75    TCG_REG_Q8,
76    TCG_REG_Q9,
77    TCG_REG_Q10,
78    TCG_REG_Q11,
79    TCG_REG_Q12,
80    TCG_REG_Q13,
81    TCG_REG_Q14,
82    TCG_REG_Q15,
83};
84
85static const int tcg_target_call_iarg_regs[4] = {
86    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
87};
88
89static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
90{
91    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
92    tcg_debug_assert(slot >= 0 && slot <= 3);
93    return TCG_REG_R0 + slot;
94}
95
96#define TCG_REG_TMP  TCG_REG_R12
97#define TCG_VEC_TMP  TCG_REG_Q15
98#define TCG_REG_GUEST_BASE  TCG_REG_R11
99
100typedef enum {
101    COND_EQ = 0x0,
102    COND_NE = 0x1,
103    COND_CS = 0x2,	/* Unsigned greater or equal */
104    COND_CC = 0x3,	/* Unsigned less than */
105    COND_MI = 0x4,	/* Negative */
106    COND_PL = 0x5,	/* Zero or greater */
107    COND_VS = 0x6,	/* Overflow */
108    COND_VC = 0x7,	/* No overflow */
109    COND_HI = 0x8,	/* Unsigned greater than */
110    COND_LS = 0x9,	/* Unsigned less or equal */
111    COND_GE = 0xa,
112    COND_LT = 0xb,
113    COND_GT = 0xc,
114    COND_LE = 0xd,
115    COND_AL = 0xe,
116} ARMCond;
117
118#define TO_CPSR (1 << 20)
119
120#define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
121#define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
122#define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
123#define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
124#define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
125#define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
126#define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
127#define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
128
129typedef enum {
130    ARITH_AND = 0x0 << 21,
131    ARITH_EOR = 0x1 << 21,
132    ARITH_SUB = 0x2 << 21,
133    ARITH_RSB = 0x3 << 21,
134    ARITH_ADD = 0x4 << 21,
135    ARITH_ADC = 0x5 << 21,
136    ARITH_SBC = 0x6 << 21,
137    ARITH_RSC = 0x7 << 21,
138    ARITH_TST = 0x8 << 21 | TO_CPSR,
139    ARITH_CMP = 0xa << 21 | TO_CPSR,
140    ARITH_CMN = 0xb << 21 | TO_CPSR,
141    ARITH_ORR = 0xc << 21,
142    ARITH_MOV = 0xd << 21,
143    ARITH_BIC = 0xe << 21,
144    ARITH_MVN = 0xf << 21,
145
146    INSN_B         = 0x0a000000,
147
148    INSN_CLZ       = 0x016f0f10,
149    INSN_RBIT      = 0x06ff0f30,
150
151    INSN_LDMIA     = 0x08b00000,
152    INSN_STMDB     = 0x09200000,
153
154    INSN_LDR_IMM   = 0x04100000,
155    INSN_LDR_REG   = 0x06100000,
156    INSN_STR_IMM   = 0x04000000,
157    INSN_STR_REG   = 0x06000000,
158
159    INSN_LDRH_IMM  = 0x005000b0,
160    INSN_LDRH_REG  = 0x001000b0,
161    INSN_LDRSH_IMM = 0x005000f0,
162    INSN_LDRSH_REG = 0x001000f0,
163    INSN_STRH_IMM  = 0x004000b0,
164    INSN_STRH_REG  = 0x000000b0,
165
166    INSN_LDRB_IMM  = 0x04500000,
167    INSN_LDRB_REG  = 0x06500000,
168    INSN_LDRSB_IMM = 0x005000d0,
169    INSN_LDRSB_REG = 0x001000d0,
170    INSN_STRB_IMM  = 0x04400000,
171    INSN_STRB_REG  = 0x06400000,
172
173    INSN_LDRD_IMM  = 0x004000d0,
174    INSN_LDRD_REG  = 0x000000d0,
175    INSN_STRD_IMM  = 0x004000f0,
176    INSN_STRD_REG  = 0x000000f0,
177
178    INSN_DMB_ISH   = 0xf57ff05b,
179    INSN_DMB_MCR   = 0xee070fba,
180
181    /* Architected nop introduced in v6k.  */
182    /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
183       also Just So Happened to do nothing on pre-v6k so that we
184       don't need to conditionalize it?  */
185    INSN_NOP_v6k   = 0xe320f000,
186    /* Otherwise the assembler uses mov r0,r0 */
187    INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
188
189    INSN_VADD      = 0xf2000800,
190    INSN_VAND      = 0xf2000110,
191    INSN_VBIC      = 0xf2100110,
192    INSN_VEOR      = 0xf3000110,
193    INSN_VORN      = 0xf2300110,
194    INSN_VORR      = 0xf2200110,
195    INSN_VSUB      = 0xf3000800,
196    INSN_VMUL      = 0xf2000910,
197    INSN_VQADD     = 0xf2000010,
198    INSN_VQADD_U   = 0xf3000010,
199    INSN_VQSUB     = 0xf2000210,
200    INSN_VQSUB_U   = 0xf3000210,
201    INSN_VMAX      = 0xf2000600,
202    INSN_VMAX_U    = 0xf3000600,
203    INSN_VMIN      = 0xf2000610,
204    INSN_VMIN_U    = 0xf3000610,
205
206    INSN_VABS      = 0xf3b10300,
207    INSN_VMVN      = 0xf3b00580,
208    INSN_VNEG      = 0xf3b10380,
209
210    INSN_VCEQ0     = 0xf3b10100,
211    INSN_VCGT0     = 0xf3b10000,
212    INSN_VCGE0     = 0xf3b10080,
213    INSN_VCLE0     = 0xf3b10180,
214    INSN_VCLT0     = 0xf3b10200,
215
216    INSN_VCEQ      = 0xf3000810,
217    INSN_VCGE      = 0xf2000310,
218    INSN_VCGT      = 0xf2000300,
219    INSN_VCGE_U    = 0xf3000310,
220    INSN_VCGT_U    = 0xf3000300,
221
222    INSN_VSHLI     = 0xf2800510,  /* VSHL (immediate) */
223    INSN_VSARI     = 0xf2800010,  /* VSHR.S */
224    INSN_VSHRI     = 0xf3800010,  /* VSHR.U */
225    INSN_VSLI      = 0xf3800510,
226    INSN_VSHL_S    = 0xf2000400,  /* VSHL.S (register) */
227    INSN_VSHL_U    = 0xf3000400,  /* VSHL.U (register) */
228
229    INSN_VBSL      = 0xf3100110,
230    INSN_VBIT      = 0xf3200110,
231    INSN_VBIF      = 0xf3300110,
232
233    INSN_VTST      = 0xf2000810,
234
235    INSN_VDUP_G    = 0xee800b10,  /* VDUP (ARM core register) */
236    INSN_VDUP_S    = 0xf3b00c00,  /* VDUP (scalar) */
237    INSN_VLDR_D    = 0xed100b00,  /* VLDR.64 */
238    INSN_VLD1      = 0xf4200000,  /* VLD1 (multiple single elements) */
239    INSN_VLD1R     = 0xf4a00c00,  /* VLD1 (single element to all lanes) */
240    INSN_VST1      = 0xf4000000,  /* VST1 (multiple single elements) */
241    INSN_VMOVI     = 0xf2800010,  /* VMOV (immediate) */
242} ARMInsn;
243
244#define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
245
246static const uint8_t tcg_cond_to_arm_cond[] = {
247    [TCG_COND_EQ] = COND_EQ,
248    [TCG_COND_NE] = COND_NE,
249    [TCG_COND_LT] = COND_LT,
250    [TCG_COND_GE] = COND_GE,
251    [TCG_COND_LE] = COND_LE,
252    [TCG_COND_GT] = COND_GT,
253    /* unsigned */
254    [TCG_COND_LTU] = COND_CC,
255    [TCG_COND_GEU] = COND_CS,
256    [TCG_COND_LEU] = COND_LS,
257    [TCG_COND_GTU] = COND_HI,
258};
259
260static int encode_imm(uint32_t imm);
261
262/* TCG private relocation type: add with pc+imm8 */
263#define R_ARM_PC8  11
264
265/* TCG private relocation type: vldr with imm8 << 2 */
266#define R_ARM_PC11 12
267
268static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
269{
270    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
271    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
272
273    if (offset == sextract32(offset, 0, 24)) {
274        *src_rw = deposit32(*src_rw, 0, 24, offset);
275        return true;
276    }
277    return false;
278}
279
280static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
281{
282    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
283    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
284
285    if (offset >= -0xfff && offset <= 0xfff) {
286        tcg_insn_unit insn = *src_rw;
287        bool u = (offset >= 0);
288        if (!u) {
289            offset = -offset;
290        }
291        insn = deposit32(insn, 23, 1, u);
292        insn = deposit32(insn, 0, 12, offset);
293        *src_rw = insn;
294        return true;
295    }
296    return false;
297}
298
299static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
300{
301    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
302    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
303
304    if (offset >= -0xff && offset <= 0xff) {
305        tcg_insn_unit insn = *src_rw;
306        bool u = (offset >= 0);
307        if (!u) {
308            offset = -offset;
309        }
310        insn = deposit32(insn, 23, 1, u);
311        insn = deposit32(insn, 0, 8, offset);
312        *src_rw = insn;
313        return true;
314    }
315    return false;
316}
317
318static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
319{
320    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
321    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
322    int imm12 = encode_imm(offset);
323
324    if (imm12 >= 0) {
325        *src_rw = deposit32(*src_rw, 0, 12, imm12);
326        return true;
327    }
328    return false;
329}
330
331static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
332                        intptr_t value, intptr_t addend)
333{
334    tcg_debug_assert(addend == 0);
335    switch (type) {
336    case R_ARM_PC24:
337        return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
338    case R_ARM_PC13:
339        return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
340    case R_ARM_PC11:
341        return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
342    case R_ARM_PC8:
343        return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
344    default:
345        g_assert_not_reached();
346    }
347}
348
349#define TCG_CT_CONST_ARM  0x100
350#define TCG_CT_CONST_INV  0x200
351#define TCG_CT_CONST_NEG  0x400
352#define TCG_CT_CONST_ZERO 0x800
353#define TCG_CT_CONST_ORRI 0x1000
354#define TCG_CT_CONST_ANDI 0x2000
355
356#define ALL_GENERAL_REGS  0xffffu
357#define ALL_VECTOR_REGS   0xffff0000u
358
359/*
360 * r0-r3 will be overwritten when reading the tlb entry (system-mode only);
361 * r14 will be overwritten by the BLNE branching to the slow path.
362 */
363#define ALL_QLDST_REGS \
364    (ALL_GENERAL_REGS & ~((tcg_use_softmmu ? 0xf : 0) | (1 << TCG_REG_R14)))
365
366/*
367 * ARM immediates for ALU instructions are made of an unsigned 8-bit
368 * right-rotated by an even amount between 0 and 30.
369 *
370 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
371 */
372static int encode_imm(uint32_t imm)
373{
374    uint32_t rot, imm8;
375
376    /* Simple case, no rotation required. */
377    if ((imm & ~0xff) == 0) {
378        return imm;
379    }
380
381    /* Next, try a simple even shift.  */
382    rot = ctz32(imm) & ~1;
383    imm8 = imm >> rot;
384    rot = 32 - rot;
385    if ((imm8 & ~0xff) == 0) {
386        goto found;
387    }
388
389    /*
390     * Finally, try harder with rotations.
391     * The ctz test above will have taken care of rotates >= 8.
392     */
393    for (rot = 2; rot < 8; rot += 2) {
394        imm8 = rol32(imm, rot);
395        if ((imm8 & ~0xff) == 0) {
396            goto found;
397        }
398    }
399    /* Fail: imm cannot be encoded. */
400    return -1;
401
402 found:
403    /* Note that rot is even, and we discard bit 0 by shifting by 7. */
404    return rot << 7 | imm8;
405}
406
407static int encode_imm_nofail(uint32_t imm)
408{
409    int ret = encode_imm(imm);
410    tcg_debug_assert(ret >= 0);
411    return ret;
412}
413
414static bool check_fit_imm(uint32_t imm)
415{
416    return encode_imm(imm) >= 0;
417}
418
419/* Return true if v16 is a valid 16-bit shifted immediate.  */
420static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
421{
422    if (v16 == (v16 & 0xff)) {
423        *cmode = 0x8;
424        *imm8 = v16 & 0xff;
425        return true;
426    } else if (v16 == (v16 & 0xff00)) {
427        *cmode = 0xa;
428        *imm8 = v16 >> 8;
429        return true;
430    }
431    return false;
432}
433
434/* Return true if v32 is a valid 32-bit shifted immediate.  */
435static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
436{
437    if (v32 == (v32 & 0xff)) {
438        *cmode = 0x0;
439        *imm8 = v32 & 0xff;
440        return true;
441    } else if (v32 == (v32 & 0xff00)) {
442        *cmode = 0x2;
443        *imm8 = (v32 >> 8) & 0xff;
444        return true;
445    } else if (v32 == (v32 & 0xff0000)) {
446        *cmode = 0x4;
447        *imm8 = (v32 >> 16) & 0xff;
448        return true;
449    } else if (v32 == (v32 & 0xff000000)) {
450        *cmode = 0x6;
451        *imm8 = v32 >> 24;
452        return true;
453    }
454    return false;
455}
456
457/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
458static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
459{
460    if ((v32 & 0xffff00ff) == 0xff) {
461        *cmode = 0xc;
462        *imm8 = (v32 >> 8) & 0xff;
463        return true;
464    } else if ((v32 & 0xff00ffff) == 0xffff) {
465        *cmode = 0xd;
466        *imm8 = (v32 >> 16) & 0xff;
467        return true;
468    }
469    return false;
470}
471
472/*
473 * Return non-zero if v32 can be formed by MOVI+ORR.
474 * Place the parameters for MOVI in (cmode, imm8).
475 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
476 */
477static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
478{
479    int i;
480
481    for (i = 6; i > 0; i -= 2) {
482        /* Mask out one byte we can add with ORR.  */
483        uint32_t tmp = v32 & ~(0xffu << (i * 4));
484        if (is_shimm32(tmp, cmode, imm8) ||
485            is_soimm32(tmp, cmode, imm8)) {
486            break;
487        }
488    }
489    return i;
490}
491
492/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
493static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
494{
495    if (v32 == deposit32(v32, 16, 16, v32)) {
496        return is_shimm16(v32, cmode, imm8);
497    } else {
498        return is_shimm32(v32, cmode, imm8);
499    }
500}
501
502/* Test if a constant matches the constraint.
503 * TODO: define constraints for:
504 *
505 * ldr/str offset:   between -0xfff and 0xfff
506 * ldrh/strh offset: between -0xff and 0xff
507 * mov operand2:     values represented with x << (2 * y), x < 0x100
508 * add, sub, eor...: ditto
509 */
510static bool tcg_target_const_match(int64_t val, int ct,
511                                   TCGType type, TCGCond cond, int vece)
512{
513    if (ct & TCG_CT_CONST) {
514        return 1;
515    } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
516        return 1;
517    } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
518        return 1;
519    } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
520        return 1;
521    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
522        return 1;
523    }
524
525    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
526    case 0:
527        break;
528    case TCG_CT_CONST_ANDI:
529        val = ~val;
530        /* fallthru */
531    case TCG_CT_CONST_ORRI:
532        if (val == deposit64(val, 32, 32, val)) {
533            int cmode, imm8;
534            return is_shimm1632(val, &cmode, &imm8);
535        }
536        break;
537    default:
538        /* Both bits should not be set for the same insn.  */
539        g_assert_not_reached();
540    }
541
542    return 0;
543}
544
545static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
546{
547    tcg_out32(s, (cond << 28) | INSN_B |
548                    (((offset - 8) >> 2) & 0x00ffffff));
549}
550
551static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
552{
553    tcg_out32(s, (cond << 28) | 0x0b000000 |
554                    (((offset - 8) >> 2) & 0x00ffffff));
555}
556
557static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
558{
559    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
560}
561
562static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
563{
564    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
565                (((offset - 8) >> 2) & 0x00ffffff));
566}
567
568static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
569                            TCGReg rd, TCGReg rn, TCGReg rm, int shift)
570{
571    tcg_out32(s, (cond << 28) | (0 << 25) | opc |
572                    (rn << 16) | (rd << 12) | shift | rm);
573}
574
575static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
576{
577    /* Simple reg-reg move, optimising out the 'do nothing' case */
578    if (rd != rm) {
579        tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
580    }
581}
582
583static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
584{
585    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
586}
587
588static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
589{
590    /*
591     * Unless the C portion of QEMU is compiled as thumb, we don't need
592     * true BX semantics; merely a branch to an address held in a register.
593     */
594    tcg_out_bx_reg(s, cond, rn);
595}
596
597static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
598                            TCGReg rd, TCGReg rn, int im)
599{
600    tcg_out32(s, (cond << 28) | (1 << 25) | opc |
601                    (rn << 16) | (rd << 12) | im);
602}
603
604static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
605                          TCGReg rn, uint16_t mask)
606{
607    tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
608}
609
610/* Note that this routine is used for both LDR and LDRH formats, so we do
611   not wish to include an immediate shift at this point.  */
612static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
613                            TCGReg rn, TCGReg rm, bool u, bool p, bool w)
614{
615    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
616              | (w << 21) | (rn << 16) | (rt << 12) | rm);
617}
618
619static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
620                            TCGReg rn, int imm8, bool p, bool w)
621{
622    bool u = 1;
623    if (imm8 < 0) {
624        imm8 = -imm8;
625        u = 0;
626    }
627    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
628              (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
629}
630
631static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
632                             TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
633{
634    bool u = 1;
635    if (imm12 < 0) {
636        imm12 = -imm12;
637        u = 0;
638    }
639    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
640              (rn << 16) | (rt << 12) | imm12);
641}
642
643static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
644                            TCGReg rn, int imm12)
645{
646    tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
647}
648
649static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
650                            TCGReg rn, int imm12)
651{
652    tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
653}
654
655static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
656                           TCGReg rn, TCGReg rm)
657{
658    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
659}
660
661static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
662                           TCGReg rn, TCGReg rm)
663{
664    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
665}
666
667static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
668                           TCGReg rn, int imm8)
669{
670    tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
671}
672
673static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
674                           TCGReg rn, TCGReg rm)
675{
676    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
677}
678
679static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
680                           TCGReg rn, int imm8)
681{
682    tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
683}
684
685static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
686                           TCGReg rn, TCGReg rm)
687{
688    tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
689}
690
691/* Register pre-increment with base writeback.  */
692static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
693                             TCGReg rn, TCGReg rm)
694{
695    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
696}
697
698static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
699                             TCGReg rn, TCGReg rm)
700{
701    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
702}
703
704static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
705                            TCGReg rn, int imm8)
706{
707    tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
708}
709
710static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
711                           TCGReg rn, int imm8)
712{
713    tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
714}
715
716static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
717                            TCGReg rn, TCGReg rm)
718{
719    tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
720}
721
722static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
723                           TCGReg rn, TCGReg rm)
724{
725    tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
726}
727
728static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
729                            TCGReg rn, int imm8)
730{
731    tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
732}
733
734static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
735                            TCGReg rn, TCGReg rm)
736{
737    tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
738}
739
740static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
741                           TCGReg rn, int imm12)
742{
743    tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
744}
745
746static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
747                           TCGReg rn, int imm12)
748{
749    tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
750}
751
752static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
753                          TCGReg rn, TCGReg rm)
754{
755    tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
756}
757
758static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
759                          TCGReg rn, TCGReg rm)
760{
761    tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
762}
763
764static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
765                           TCGReg rn, int imm8)
766{
767    tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
768}
769
770static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
771                           TCGReg rn, TCGReg rm)
772{
773    tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
774}
775
776static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
777                              TCGReg rd, uint32_t arg)
778{
779    new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
780    tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
781}
782
783static void tcg_out_movi32(TCGContext *s, ARMCond cond,
784                           TCGReg rd, uint32_t arg)
785{
786    int imm12, diff, opc, sh1, sh2;
787    uint32_t tt0, tt1, tt2;
788
789    /* Check a single MOV/MVN before anything else.  */
790    imm12 = encode_imm(arg);
791    if (imm12 >= 0) {
792        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
793        return;
794    }
795    imm12 = encode_imm(~arg);
796    if (imm12 >= 0) {
797        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
798        return;
799    }
800
801    /* Check for a pc-relative address.  This will usually be the TB,
802       or within the TB, which is immediately before the code block.  */
803    diff = tcg_pcrel_diff(s, (void *)arg) - 8;
804    if (diff >= 0) {
805        imm12 = encode_imm(diff);
806        if (imm12 >= 0) {
807            tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
808            return;
809        }
810    } else {
811        imm12 = encode_imm(-diff);
812        if (imm12 >= 0) {
813            tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
814            return;
815        }
816    }
817
818    /* Use movw + movt.  */
819    if (use_armv7_instructions) {
820        /* movw */
821        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
822                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
823        if (arg & 0xffff0000) {
824            /* movt */
825            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
826                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
827        }
828        return;
829    }
830
831    /* Look for sequences of two insns.  If we have lots of 1's, we can
832       shorten the sequence by beginning with mvn and then clearing
833       higher bits with eor.  */
834    tt0 = arg;
835    opc = ARITH_MOV;
836    if (ctpop32(arg) > 16) {
837        tt0 = ~arg;
838        opc = ARITH_MVN;
839    }
840    sh1 = ctz32(tt0) & ~1;
841    tt1 = tt0 & ~(0xff << sh1);
842    sh2 = ctz32(tt1) & ~1;
843    tt2 = tt1 & ~(0xff << sh2);
844    if (tt2 == 0) {
845        int rot;
846
847        rot = ((32 - sh1) << 7) & 0xf00;
848        tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
849        rot = ((32 - sh2) << 7) & 0xf00;
850        tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
851                        ((tt0 >> sh2) & 0xff) | rot);
852        return;
853    }
854
855    /* Otherwise, drop it into the constant pool.  */
856    tcg_out_movi_pool(s, cond, rd, arg);
857}
858
859/*
860 * Emit either the reg,imm or reg,reg form of a data-processing insn.
861 * rhs must satisfy the "rI" constraint.
862 */
863static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
864                           TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
865{
866    if (rhs_is_const) {
867        tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
868    } else {
869        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
870    }
871}
872
873/*
874 * Emit either the reg,imm or reg,reg form of a data-processing insn.
875 * rhs must satisfy the "rIK" constraint.
876 */
877static void tcg_out_dat_IK(TCGContext *s, ARMCond cond, ARMInsn opc,
878                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs)
879{
880    int imm12 = encode_imm(rhs);
881    if (imm12 < 0) {
882        imm12 = encode_imm_nofail(~rhs);
883        opc = opinv;
884    }
885    tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
886}
887
888static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
889                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
890                            bool rhs_is_const)
891{
892    if (rhs_is_const) {
893        tcg_out_dat_IK(s, cond, opc, opinv, dst, lhs, rhs);
894    } else {
895        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
896    }
897}
898
899static void tcg_out_dat_IN(TCGContext *s, ARMCond cond, ARMInsn opc,
900                           ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs)
901{
902    int imm12 = encode_imm(rhs);
903    if (imm12 < 0) {
904        imm12 = encode_imm_nofail(-rhs);
905        opc = opneg;
906    }
907    tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
908}
909
910static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
911                            ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
912                            bool rhs_is_const)
913{
914    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
915     * rhs must satisfy the "rIN" constraint.
916     */
917    if (rhs_is_const) {
918        tcg_out_dat_IN(s, cond, opc, opneg, dst, lhs, rhs);
919    } else {
920        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
921    }
922}
923
924static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
925                            TCGReg rd1, TCGReg rn, TCGReg rm)
926{
927    /* umull */
928    tcg_out32(s, (cond << 28) | 0x00800090 |
929              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
930}
931
932static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
933{
934    /* sxtb */
935    tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
936}
937
938static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
939{
940    tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
941}
942
943static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
944{
945    /* sxth */
946    tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
947}
948
949static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
950{
951    /* uxth */
952    tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn);
953}
954
955static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
956{
957    g_assert_not_reached();
958}
959
960static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
961{
962    g_assert_not_reached();
963}
964
965static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
966{
967    g_assert_not_reached();
968}
969
970static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
971{
972    g_assert_not_reached();
973}
974
975static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
976{
977    g_assert_not_reached();
978}
979
980static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
981                            TCGReg rd, TCGReg rn, int flags)
982{
983    if (flags & TCG_BSWAP_OS) {
984        /* revsh */
985        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
986        return;
987    }
988
989    /* rev16 */
990    tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
991    if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
992        /* uxth */
993        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
994    }
995}
996
997static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
998{
999    /* rev */
1000    tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1001}
1002
1003static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1004                            TCGArg a1, int ofs, int len, bool const_a1)
1005{
1006    if (const_a1) {
1007        /* bfi becomes bfc with rn == 15.  */
1008        a1 = 15;
1009    }
1010    /* bfi/bfc */
1011    tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1012              | (ofs << 7) | ((ofs + len - 1) << 16));
1013}
1014
1015static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1016                            TCGReg rn, int ofs, int len)
1017{
1018    /* According to gcc, AND can be faster. */
1019    if (ofs == 0 && len <= 8) {
1020        tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn,
1021                        encode_imm_nofail((1 << len) - 1));
1022        return;
1023    }
1024
1025    if (use_armv7_instructions) {
1026        /* ubfx */
1027        tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1028                  | (ofs << 7) | ((len - 1) << 16));
1029        return;
1030    }
1031
1032    assert(ofs % 8 == 0);
1033    switch (len) {
1034    case 8:
1035        /* uxtb */
1036        tcg_out32(s, 0x06ef0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
1037        break;
1038    case 16:
1039        /* uxth */
1040        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
1041        break;
1042    default:
1043        g_assert_not_reached();
1044    }
1045}
1046
1047static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1048                             TCGReg rn, int ofs, int len)
1049{
1050    if (use_armv7_instructions) {
1051        /* sbfx */
1052        tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1053                  | (ofs << 7) | ((len - 1) << 16));
1054        return;
1055    }
1056
1057    assert(ofs % 8 == 0);
1058    switch (len) {
1059    case 8:
1060        /* sxtb */
1061        tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
1062        break;
1063    case 16:
1064        /* sxth */
1065        tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
1066        break;
1067    default:
1068        g_assert_not_reached();
1069    }
1070}
1071
1072
1073static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1074                          TCGReg rd, TCGReg rn, int32_t offset)
1075{
1076    if (offset > 0xfff || offset < -0xfff) {
1077        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1078        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1079    } else
1080        tcg_out_ld32_12(s, cond, rd, rn, offset);
1081}
1082
1083static void tcg_out_st32(TCGContext *s, ARMCond cond,
1084                         TCGReg rd, TCGReg rn, int32_t offset)
1085{
1086    if (offset > 0xfff || offset < -0xfff) {
1087        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1088        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1089    } else
1090        tcg_out_st32_12(s, cond, rd, rn, offset);
1091}
1092
1093static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1094                          TCGReg rd, TCGReg rn, int32_t offset)
1095{
1096    if (offset > 0xff || offset < -0xff) {
1097        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1098        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1099    } else
1100        tcg_out_ld16u_8(s, cond, rd, rn, offset);
1101}
1102
1103static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1104                          TCGReg rd, TCGReg rn, int32_t offset)
1105{
1106    if (offset > 0xff || offset < -0xff) {
1107        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1108        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1109    } else
1110        tcg_out_ld16s_8(s, cond, rd, rn, offset);
1111}
1112
1113static void tcg_out_st16(TCGContext *s, ARMCond cond,
1114                         TCGReg rd, TCGReg rn, int32_t offset)
1115{
1116    if (offset > 0xff || offset < -0xff) {
1117        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1118        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1119    } else
1120        tcg_out_st16_8(s, cond, rd, rn, offset);
1121}
1122
1123static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1124                         TCGReg rd, TCGReg rn, int32_t offset)
1125{
1126    if (offset > 0xfff || offset < -0xfff) {
1127        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1128        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1129    } else
1130        tcg_out_ld8_12(s, cond, rd, rn, offset);
1131}
1132
1133static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1134                         TCGReg rd, TCGReg rn, int32_t offset)
1135{
1136    if (offset > 0xff || offset < -0xff) {
1137        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1138        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1139    } else
1140        tcg_out_ld8s_8(s, cond, rd, rn, offset);
1141}
1142
1143static void tcg_out_st8(TCGContext *s, ARMCond cond,
1144                        TCGReg rd, TCGReg rn, int32_t offset)
1145{
1146    if (offset > 0xfff || offset < -0xfff) {
1147        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1148        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1149    } else
1150        tcg_out_st8_12(s, cond, rd, rn, offset);
1151}
1152
1153/*
1154 * The _goto case is normally between TBs within the same code buffer, and
1155 * with the code buffer limited to 16MB we wouldn't need the long case.
1156 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1157 */
1158static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1159{
1160    intptr_t addri = (intptr_t)addr;
1161    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1162    bool arm_mode = !(addri & 1);
1163
1164    if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1165        tcg_out_b_imm(s, cond, disp);
1166        return;
1167    }
1168
1169    /* LDR is interworking from v5t. */
1170    tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1171}
1172
1173/*
1174 * The call case is mostly used for helpers - so it's not unreasonable
1175 * for them to be beyond branch range.
1176 */
1177static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
1178{
1179    intptr_t addri = (intptr_t)addr;
1180    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1181    bool arm_mode = !(addri & 1);
1182
1183    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1184        if (arm_mode) {
1185            tcg_out_bl_imm(s, COND_AL, disp);
1186        } else {
1187            tcg_out_blx_imm(s, disp);
1188        }
1189        return;
1190    }
1191
1192    tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1193    tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1194}
1195
1196static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
1197                         const TCGHelperInfo *info)
1198{
1199    tcg_out_call_int(s, addr);
1200}
1201
1202static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1203{
1204    if (l->has_value) {
1205        tcg_out_goto(s, cond, l->u.value_ptr);
1206    } else {
1207        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1208        tcg_out_b_imm(s, cond, 0);
1209    }
1210}
1211
1212static void tcg_out_mb(TCGContext *s, TCGArg a0)
1213{
1214    if (use_armv7_instructions) {
1215        tcg_out32(s, INSN_DMB_ISH);
1216    } else {
1217        tcg_out32(s, INSN_DMB_MCR);
1218    }
1219}
1220
1221static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
1222                           TCGArg b, int b_const)
1223{
1224    if (!is_tst_cond(cond)) {
1225        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
1226        return cond;
1227    }
1228
1229    cond = tcg_tst_eqne_cond(cond);
1230    if (b_const) {
1231        int imm12 = encode_imm(b);
1232
1233        /*
1234         * The compare constraints allow rIN, but TST does not support N.
1235         * Be prepared to load the constant into a scratch register.
1236         */
1237        if (imm12 >= 0) {
1238            tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
1239            return cond;
1240        }
1241        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
1242        b = TCG_REG_TMP;
1243    }
1244    tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
1245    return cond;
1246}
1247
1248static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1249                            const int *const_args)
1250{
1251    TCGReg al = args[0];
1252    TCGReg ah = args[1];
1253    TCGArg bl = args[2];
1254    TCGArg bh = args[3];
1255    TCGCond cond = args[4];
1256    int const_bl = const_args[2];
1257    int const_bh = const_args[3];
1258
1259    switch (cond) {
1260    case TCG_COND_EQ:
1261    case TCG_COND_NE:
1262    case TCG_COND_LTU:
1263    case TCG_COND_LEU:
1264    case TCG_COND_GTU:
1265    case TCG_COND_GEU:
1266        /*
1267         * We perform a conditional comparison.  If the high half is
1268         * equal, then overwrite the flags with the comparison of the
1269         * low half.  The resulting flags cover the whole.
1270         */
1271        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1272        tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1273        return cond;
1274
1275    case TCG_COND_TSTEQ:
1276    case TCG_COND_TSTNE:
1277        /* Similar, but with TST instead of CMP. */
1278        tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh);
1279        tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl);
1280        return tcg_tst_eqne_cond(cond);
1281
1282    case TCG_COND_LT:
1283    case TCG_COND_GE:
1284        /* We perform a double-word subtraction and examine the result.
1285           We do not actually need the result of the subtract, so the
1286           low part "subtract" is a compare.  For the high half we have
1287           no choice but to compute into a temporary.  */
1288        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1289        tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1290                       TCG_REG_TMP, ah, bh, const_bh);
1291        return cond;
1292
1293    case TCG_COND_LE:
1294    case TCG_COND_GT:
1295        /* Similar, but with swapped arguments, via reversed subtract.  */
1296        tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1297                       TCG_REG_TMP, al, bl, const_bl);
1298        tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1299                       TCG_REG_TMP, ah, bh, const_bh);
1300        return tcg_swap_cond(cond);
1301
1302    default:
1303        g_assert_not_reached();
1304    }
1305}
1306
1307/*
1308 * Note that TCGReg references Q-registers.
1309 * Q-regno = 2 * D-regno, so shift left by 1 while inserting.
1310 */
1311static uint32_t encode_vd(TCGReg rd)
1312{
1313    tcg_debug_assert(rd >= TCG_REG_Q0);
1314    return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1315}
1316
1317static uint32_t encode_vn(TCGReg rn)
1318{
1319    tcg_debug_assert(rn >= TCG_REG_Q0);
1320    return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1321}
1322
1323static uint32_t encode_vm(TCGReg rm)
1324{
1325    tcg_debug_assert(rm >= TCG_REG_Q0);
1326    return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1327}
1328
1329static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1330                          TCGReg d, TCGReg m)
1331{
1332    tcg_out32(s, insn | (vece << 18) | (q << 6) |
1333              encode_vd(d) | encode_vm(m));
1334}
1335
1336static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1337                          TCGReg d, TCGReg n, TCGReg m)
1338{
1339    tcg_out32(s, insn | (vece << 20) | (q << 6) |
1340              encode_vd(d) | encode_vn(n) | encode_vm(m));
1341}
1342
1343static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1344                          int q, int op, int cmode, uint8_t imm8)
1345{
1346    tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1347              | (cmode << 8) | extract32(imm8, 0, 4)
1348              | (extract32(imm8, 4, 3) << 16)
1349              | (extract32(imm8, 7, 1) << 24));
1350}
1351
1352static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1353                            TCGReg rd, TCGReg rm, int l_imm6)
1354{
1355    tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1356              (extract32(l_imm6, 6, 1) << 7) |
1357              (extract32(l_imm6, 0, 6) << 16));
1358}
1359
1360static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1361                          TCGReg rd, TCGReg rn, int offset)
1362{
1363    if (offset != 0) {
1364        if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1365            tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1366                            TCG_REG_TMP, rn, offset, true);
1367        } else {
1368            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1369            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1370                            TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1371        }
1372        rn = TCG_REG_TMP;
1373    }
1374    tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1375}
1376
1377typedef struct {
1378    ARMCond cond;
1379    TCGReg base;
1380    int index;
1381    bool index_scratch;
1382    TCGAtomAlign aa;
1383} HostAddress;
1384
1385bool tcg_target_has_memory_bswap(MemOp memop)
1386{
1387    return false;
1388}
1389
1390static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
1391{
1392    /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */
1393    return TCG_REG_R14;
1394}
1395
1396static const TCGLdstHelperParam ldst_helper_param = {
1397    .ra_gen = ldst_ra_gen,
1398    .ntmp = 1,
1399    .tmp = { TCG_REG_TMP },
1400};
1401
1402static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1403{
1404    MemOp opc = get_memop(lb->oi);
1405
1406    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1407        return false;
1408    }
1409
1410    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1411    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1412    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1413
1414    tcg_out_goto(s, COND_AL, lb->raddr);
1415    return true;
1416}
1417
1418static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1419{
1420    MemOp opc = get_memop(lb->oi);
1421
1422    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1423        return false;
1424    }
1425
1426    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1427
1428    /* Tail-call to the helper, which will return to the fast path.  */
1429    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1430    return true;
1431}
1432
1433/* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
1434#define MIN_TLB_MASK_TABLE_OFS  -256
1435
1436static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1437                                           TCGReg addr, MemOpIdx oi, bool is_ld)
1438{
1439    TCGLabelQemuLdst *ldst = NULL;
1440    MemOp opc = get_memop(oi);
1441    unsigned a_mask;
1442
1443    if (tcg_use_softmmu) {
1444        *h = (HostAddress){
1445            .cond = COND_AL,
1446            .base = addr,
1447            .index = TCG_REG_R1,
1448            .index_scratch = true,
1449        };
1450    } else {
1451        *h = (HostAddress){
1452            .cond = COND_AL,
1453            .base = addr,
1454            .index = guest_base ? TCG_REG_GUEST_BASE : -1,
1455            .index_scratch = false,
1456        };
1457    }
1458
1459    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1460    a_mask = (1 << h->aa.align) - 1;
1461
1462    if (tcg_use_softmmu) {
1463        int mem_index = get_mmuidx(oi);
1464        int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1465                            : offsetof(CPUTLBEntry, addr_write);
1466        int fast_off = tlb_mask_table_ofs(s, mem_index);
1467        unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
1468        TCGReg t_addr;
1469
1470        ldst = new_ldst_label(s);
1471        ldst->is_ld = is_ld;
1472        ldst->oi = oi;
1473        ldst->addr_reg = addr;
1474
1475        /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}.  */
1476        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1477        QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1478        tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1479
1480        /* Extract the tlb index from the address into R0.  */
1481        tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addr,
1482                        SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
1483
1484        /*
1485         * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1486         * Load the tlb comparator into R2 and the fast path addend into R1.
1487         */
1488        if (cmp_off == 0) {
1489            tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1490        } else {
1491            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1492                            TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1493            tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1494        }
1495
1496        /* Load the tlb addend.  */
1497        tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1498                        offsetof(CPUTLBEntry, addend));
1499
1500        /*
1501         * Check alignment, check comparators.
1502         * Do this in 2-4 insns.  Use MOVW for v7, if possible,
1503         * to reduce the number of sequential conditional instructions.
1504         * Almost all guests have at least 4k pages, which means that we need
1505         * to clear at least 9 bits even for an 8-byte memory, which means it
1506         * isn't worth checking for an immediate operand for BIC.
1507         *
1508         * For unaligned accesses, test the page of the last unit of alignment.
1509         * This leaves the least significant alignment bits unchanged, and of
1510         * course must be zero.
1511         */
1512        t_addr = addr;
1513        if (a_mask < s_mask) {
1514            t_addr = TCG_REG_R0;
1515            tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
1516                            addr, s_mask - a_mask);
1517        }
1518        if (use_armv7_instructions && s->page_bits <= 16) {
1519            tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
1520            tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1521                            t_addr, TCG_REG_TMP, 0);
1522            tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1523                            TCG_REG_R2, TCG_REG_TMP, 0);
1524        } else {
1525            if (a_mask) {
1526                tcg_debug_assert(a_mask <= 0xff);
1527                tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
1528            }
1529            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
1530                            SHIFT_IMM_LSR(s->page_bits));
1531            tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
1532                            0, TCG_REG_R2, TCG_REG_TMP,
1533                            SHIFT_IMM_LSL(s->page_bits));
1534        }
1535    } else if (a_mask) {
1536        ldst = new_ldst_label(s);
1537        ldst->is_ld = is_ld;
1538        ldst->oi = oi;
1539        ldst->addr_reg = addr;
1540
1541        /* We are expecting alignment to max out at 7 */
1542        tcg_debug_assert(a_mask <= 0xff);
1543        /* tst addr, #mask */
1544        tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addr, a_mask);
1545    }
1546
1547    return ldst;
1548}
1549
1550static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1551                                   TCGReg datahi, HostAddress h)
1552{
1553    TCGReg base;
1554
1555    /* Byte swapping is left to middle-end expansion. */
1556    tcg_debug_assert((opc & MO_BSWAP) == 0);
1557
1558    switch (opc & MO_SSIZE) {
1559    case MO_UB:
1560        if (h.index < 0) {
1561            tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
1562        } else {
1563            tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
1564        }
1565        break;
1566    case MO_SB:
1567        if (h.index < 0) {
1568            tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
1569        } else {
1570            tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
1571        }
1572        break;
1573    case MO_UW:
1574        if (h.index < 0) {
1575            tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
1576        } else {
1577            tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
1578        }
1579        break;
1580    case MO_SW:
1581        if (h.index < 0) {
1582            tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
1583        } else {
1584            tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
1585        }
1586        break;
1587    case MO_UL:
1588        if (h.index < 0) {
1589            tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
1590        } else {
1591            tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
1592        }
1593        break;
1594    case MO_UQ:
1595        /* We used pair allocation for datalo, so already should be aligned. */
1596        tcg_debug_assert((datalo & 1) == 0);
1597        tcg_debug_assert(datahi == datalo + 1);
1598        /* LDRD requires alignment; double-check that. */
1599        if (memop_alignment_bits(opc) >= MO_64) {
1600            if (h.index < 0) {
1601                tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
1602                break;
1603            }
1604            /*
1605             * Rm (the second address op) must not overlap Rt or Rt + 1.
1606             * Since datalo is aligned, we can simplify the test via alignment.
1607             * Flip the two address arguments if that works.
1608             */
1609            if ((h.index & ~1) != datalo) {
1610                tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
1611                break;
1612            }
1613            if ((h.base & ~1) != datalo) {
1614                tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
1615                break;
1616            }
1617        }
1618        if (h.index < 0) {
1619            base = h.base;
1620            if (datalo == h.base) {
1621                tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
1622                base = TCG_REG_TMP;
1623            }
1624        } else if (h.index_scratch) {
1625            tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
1626            tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
1627            break;
1628        } else {
1629            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1630                            h.base, h.index, SHIFT_IMM_LSL(0));
1631            base = TCG_REG_TMP;
1632        }
1633        tcg_out_ld32_12(s, h.cond, datalo, base, 0);
1634        tcg_out_ld32_12(s, h.cond, datahi, base, 4);
1635        break;
1636    default:
1637        g_assert_not_reached();
1638    }
1639}
1640
1641static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
1642                            TCGReg addr, MemOpIdx oi, TCGType data_type)
1643{
1644    MemOp opc = get_memop(oi);
1645    TCGLabelQemuLdst *ldst;
1646    HostAddress h;
1647
1648    ldst = prepare_host_addr(s, &h, addr, oi, true);
1649    if (ldst) {
1650        ldst->type = data_type;
1651        ldst->datalo_reg = datalo;
1652        ldst->datahi_reg = datahi;
1653
1654        /*
1655         * This a conditional BL only to load a pointer within this
1656         * opcode into LR for the slow path.  We will not be using
1657         * the value for a tail call.
1658         */
1659        ldst->label_ptr[0] = s->code_ptr;
1660        tcg_out_bl_imm(s, COND_NE, 0);
1661
1662        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1663        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1664    } else {
1665        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1666    }
1667}
1668
1669static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1670                                   TCGReg datahi, HostAddress h)
1671{
1672    /* Byte swapping is left to middle-end expansion. */
1673    tcg_debug_assert((opc & MO_BSWAP) == 0);
1674
1675    switch (opc & MO_SIZE) {
1676    case MO_8:
1677        if (h.index < 0) {
1678            tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
1679        } else {
1680            tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
1681        }
1682        break;
1683    case MO_16:
1684        if (h.index < 0) {
1685            tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
1686        } else {
1687            tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
1688        }
1689        break;
1690    case MO_32:
1691        if (h.index < 0) {
1692            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1693        } else {
1694            tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
1695        }
1696        break;
1697    case MO_64:
1698        /* We used pair allocation for datalo, so already should be aligned. */
1699        tcg_debug_assert((datalo & 1) == 0);
1700        tcg_debug_assert(datahi == datalo + 1);
1701        /* STRD requires alignment; double-check that. */
1702        if (memop_alignment_bits(opc) >= MO_64) {
1703            if (h.index < 0) {
1704                tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
1705            } else {
1706                tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
1707            }
1708        } else if (h.index < 0) {
1709            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1710            tcg_out_st32_12(s, h.cond, datahi, h.base, 4);
1711        } else if (h.index_scratch) {
1712            tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
1713            tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
1714        } else {
1715            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1716                            h.base, h.index, SHIFT_IMM_LSL(0));
1717            tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
1718            tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
1719        }
1720        break;
1721    default:
1722        g_assert_not_reached();
1723    }
1724}
1725
1726static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
1727                            TCGReg addr, MemOpIdx oi, TCGType data_type)
1728{
1729    MemOp opc = get_memop(oi);
1730    TCGLabelQemuLdst *ldst;
1731    HostAddress h;
1732
1733    ldst = prepare_host_addr(s, &h, addr, oi, false);
1734    if (ldst) {
1735        ldst->type = data_type;
1736        ldst->datalo_reg = datalo;
1737        ldst->datahi_reg = datahi;
1738
1739        h.cond = COND_EQ;
1740        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1741
1742        /* The conditional call is last, as we're going to return here. */
1743        ldst->label_ptr[0] = s->code_ptr;
1744        tcg_out_bl_imm(s, COND_NE, 0);
1745        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1746    } else {
1747        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1748    }
1749}
1750
1751static void tcg_out_epilogue(TCGContext *s);
1752
1753static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
1754{
1755    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
1756    tcg_out_epilogue(s);
1757}
1758
1759static void tcg_out_goto_tb(TCGContext *s, int which)
1760{
1761    uintptr_t i_addr;
1762    intptr_t i_disp;
1763
1764    /* Direct branch will be patched by tb_target_set_jmp_target. */
1765    set_jmp_insn_offset(s, which);
1766    tcg_out32(s, INSN_NOP);
1767
1768    /* When branch is out of range, fall through to indirect. */
1769    i_addr = get_jmp_target_addr(s, which);
1770    i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
1771    tcg_debug_assert(i_disp < 0);
1772    if (i_disp >= -0xfff) {
1773        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
1774    } else {
1775        /*
1776         * The TB is close, but outside the 12 bits addressable by
1777         * the load.  We can extend this to 20 bits with a sub of a
1778         * shifted immediate from pc.
1779         */
1780        int h = -i_disp;
1781        int l = -(h & 0xfff);
1782
1783        h = encode_imm_nofail(h + l);
1784        tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
1785        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
1786    }
1787    set_jmp_reset_offset(s, which);
1788}
1789
1790void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1791                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1792{
1793    uintptr_t addr = tb->jmp_target_addr[n];
1794    ptrdiff_t offset = addr - (jmp_rx + 8);
1795    tcg_insn_unit insn;
1796
1797    /* Either directly branch, or fall through to indirect branch. */
1798    if (offset == sextract64(offset, 0, 26)) {
1799        /* B <addr> */
1800        insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
1801    } else {
1802        insn = INSN_NOP;
1803    }
1804
1805    qatomic_set((uint32_t *)jmp_rw, insn);
1806    flush_idcache_range(jmp_rx, jmp_rw, 4);
1807}
1808
1809
1810static void tgen_add(TCGContext *s, TCGType type,
1811                     TCGReg a0, TCGReg a1, TCGReg a2)
1812{
1813    tcg_out_dat_reg(s, COND_AL, ARITH_ADD, a0, a1, a2, SHIFT_IMM_LSL(0));
1814}
1815
1816static void tgen_addi(TCGContext *s, TCGType type,
1817                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1818{
1819    tcg_out_dat_IN(s, COND_AL, ARITH_ADD, ARITH_SUB, a0, a1, a2);
1820}
1821
1822static const TCGOutOpBinary outop_add = {
1823    .base.static_constraint = C_O1_I2(r, r, rIN),
1824    .out_rrr = tgen_add,
1825    .out_rri = tgen_addi,
1826};
1827
1828static void tgen_and(TCGContext *s, TCGType type,
1829                     TCGReg a0, TCGReg a1, TCGReg a2)
1830{
1831    tcg_out_dat_reg(s, COND_AL, ARITH_AND, a0, a1, a2, SHIFT_IMM_LSL(0));
1832}
1833
1834static void tgen_andi(TCGContext *s, TCGType type,
1835                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1836{
1837    tcg_out_dat_IK(s, COND_AL, ARITH_AND, ARITH_BIC, a0, a1, a2);
1838}
1839
1840static const TCGOutOpBinary outop_and = {
1841    .base.static_constraint = C_O1_I2(r, r, rIK),
1842    .out_rrr = tgen_and,
1843    .out_rri = tgen_andi,
1844};
1845
1846static void tgen_andc(TCGContext *s, TCGType type,
1847                      TCGReg a0, TCGReg a1, TCGReg a2)
1848{
1849    tcg_out_dat_reg(s, COND_AL, ARITH_BIC, a0, a1, a2, SHIFT_IMM_LSL(0));
1850}
1851
1852static const TCGOutOpBinary outop_andc = {
1853    .base.static_constraint = C_O1_I2(r, r, r),
1854    .out_rrr = tgen_andc,
1855};
1856
1857static void tgen_clz(TCGContext *s, TCGType type,
1858                     TCGReg a0, TCGReg a1, TCGReg a2)
1859{
1860    tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1861    tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1862    tcg_out_mov_reg(s, COND_EQ, a0, a2);
1863}
1864
1865static void tgen_clzi(TCGContext *s, TCGType type,
1866                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1867{
1868    if (a2 == 32) {
1869        tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
1870    } else {
1871        tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1872        tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1873        tcg_out_movi32(s, COND_EQ, a0, a2);
1874    }
1875}
1876
1877static const TCGOutOpBinary outop_clz = {
1878    .base.static_constraint = C_O1_I2(r, r, rIK),
1879    .out_rrr = tgen_clz,
1880    .out_rri = tgen_clzi,
1881};
1882
1883static const TCGOutOpUnary outop_ctpop = {
1884    .base.static_constraint = C_NotImplemented,
1885};
1886
1887static void tgen_ctz(TCGContext *s, TCGType type,
1888                     TCGReg a0, TCGReg a1, TCGReg a2)
1889{
1890    tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0);
1891    tgen_clz(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2);
1892}
1893
1894static void tgen_ctzi(TCGContext *s, TCGType type,
1895                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1896{
1897    tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, a1, 0);
1898    tgen_clzi(s, TCG_TYPE_I32, a0, TCG_REG_TMP, a2);
1899}
1900
1901static TCGConstraintSetIndex cset_ctz(TCGType type, unsigned flags)
1902{
1903    return use_armv7_instructions ? C_O1_I2(r, r, rIK) : C_NotImplemented;
1904}
1905
1906static const TCGOutOpBinary outop_ctz = {
1907    .base.static_constraint = C_Dynamic,
1908    .base.dynamic_constraint = cset_ctz,
1909    .out_rrr = tgen_ctz,
1910    .out_rri = tgen_ctzi,
1911};
1912
1913static TCGConstraintSetIndex cset_idiv(TCGType type, unsigned flags)
1914{
1915    return use_idiv_instructions ? C_O1_I2(r, r, r) : C_NotImplemented;
1916}
1917
1918static void tgen_divs(TCGContext *s, TCGType type,
1919                      TCGReg a0, TCGReg a1, TCGReg a2)
1920{
1921    /* sdiv */
1922    tcg_out32(s, 0x0710f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8));
1923}
1924
1925static const TCGOutOpBinary outop_divs = {
1926    .base.static_constraint = C_Dynamic,
1927    .base.dynamic_constraint = cset_idiv,
1928    .out_rrr = tgen_divs,
1929};
1930
1931static const TCGOutOpDivRem outop_divs2 = {
1932    .base.static_constraint = C_NotImplemented,
1933};
1934
1935static void tgen_divu(TCGContext *s, TCGType type,
1936                      TCGReg a0, TCGReg a1, TCGReg a2)
1937{
1938    /* udiv */
1939    tcg_out32(s, 0x0730f010 | (COND_AL << 28) | (a0 << 16) | a1 | (a2 << 8));
1940}
1941
1942static const TCGOutOpBinary outop_divu = {
1943    .base.static_constraint = C_Dynamic,
1944    .base.dynamic_constraint = cset_idiv,
1945    .out_rrr = tgen_divu,
1946};
1947
1948static const TCGOutOpDivRem outop_divu2 = {
1949    .base.static_constraint = C_NotImplemented,
1950};
1951
1952static const TCGOutOpBinary outop_eqv = {
1953    .base.static_constraint = C_NotImplemented,
1954};
1955
1956static void tgen_mul(TCGContext *s, TCGType type,
1957                     TCGReg a0, TCGReg a1, TCGReg a2)
1958{
1959    /* mul */
1960    tcg_out32(s, (COND_AL << 28) | 0x90 | (a0 << 16) | (a1 << 8) | a2);
1961}
1962
1963static const TCGOutOpBinary outop_mul = {
1964    .base.static_constraint = C_O1_I2(r, r, r),
1965    .out_rrr = tgen_mul,
1966};
1967
1968static void tgen_muls2(TCGContext *s, TCGType type,
1969                       TCGReg rd0, TCGReg rd1, TCGReg rn, TCGReg rm)
1970{
1971    /* smull */
1972    tcg_out32(s, (COND_AL << 28) | 0x00c00090 |
1973              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
1974}
1975
1976static const TCGOutOpMul2 outop_muls2 = {
1977    .base.static_constraint = C_O2_I2(r, r, r, r),
1978    .out_rrrr = tgen_muls2,
1979};
1980
1981static const TCGOutOpBinary outop_mulsh = {
1982    .base.static_constraint = C_NotImplemented,
1983};
1984
1985static const TCGOutOpBinary outop_muluh = {
1986    .base.static_constraint = C_NotImplemented,
1987};
1988
1989static const TCGOutOpBinary outop_nand = {
1990    .base.static_constraint = C_NotImplemented,
1991};
1992
1993static const TCGOutOpBinary outop_nor = {
1994    .base.static_constraint = C_NotImplemented,
1995};
1996
1997static void tgen_or(TCGContext *s, TCGType type,
1998                     TCGReg a0, TCGReg a1, TCGReg a2)
1999{
2000    tcg_out_dat_reg(s, COND_AL, ARITH_ORR, a0, a1, a2, SHIFT_IMM_LSL(0));
2001}
2002
2003static void tgen_ori(TCGContext *s, TCGType type,
2004                     TCGReg a0, TCGReg a1, tcg_target_long a2)
2005{
2006    tcg_out_dat_imm(s, COND_AL, ARITH_ORR, a0, a1, encode_imm_nofail(a2));
2007}
2008
2009static const TCGOutOpBinary outop_or = {
2010    .base.static_constraint = C_O1_I2(r, r, rI),
2011    .out_rrr = tgen_or,
2012    .out_rri = tgen_ori,
2013};
2014
2015static const TCGOutOpBinary outop_orc = {
2016    .base.static_constraint = C_NotImplemented,
2017};
2018
2019static const TCGOutOpBinary outop_rems = {
2020    .base.static_constraint = C_NotImplemented,
2021};
2022
2023static const TCGOutOpBinary outop_remu = {
2024    .base.static_constraint = C_NotImplemented,
2025};
2026
2027static const TCGOutOpBinary outop_rotl = {
2028    .base.static_constraint = C_NotImplemented,
2029};
2030
2031static void tgen_rotr(TCGContext *s, TCGType type,
2032                      TCGReg a0, TCGReg a1, TCGReg a2)
2033{
2034    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ROR(a2));
2035}
2036
2037static void tgen_rotri(TCGContext *s, TCGType type,
2038                       TCGReg a0, TCGReg a1, tcg_target_long a2)
2039{
2040    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_IMM_ROR(a2 & 0x1f));
2041}
2042
2043static const TCGOutOpBinary outop_rotr = {
2044    .base.static_constraint = C_O1_I2(r, r, ri),
2045    .out_rrr = tgen_rotr,
2046    .out_rri = tgen_rotri,
2047};
2048
2049static void tgen_sar(TCGContext *s, TCGType type,
2050                     TCGReg a0, TCGReg a1, TCGReg a2)
2051{
2052    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_ASR(a2));
2053}
2054
2055static void tgen_sari(TCGContext *s, TCGType type,
2056                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2057{
2058    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
2059                    SHIFT_IMM_ASR(a2 & 0x1f));
2060}
2061
2062static const TCGOutOpBinary outop_sar = {
2063    .base.static_constraint = C_O1_I2(r, r, ri),
2064    .out_rrr = tgen_sar,
2065    .out_rri = tgen_sari,
2066};
2067
2068static void tgen_shl(TCGContext *s, TCGType type,
2069                     TCGReg a0, TCGReg a1, TCGReg a2)
2070{
2071    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSL(a2));
2072}
2073
2074static void tgen_shli(TCGContext *s, TCGType type,
2075                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2076{
2077    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
2078                    SHIFT_IMM_LSL(a2 & 0x1f));
2079}
2080
2081static const TCGOutOpBinary outop_shl = {
2082    .base.static_constraint = C_O1_I2(r, r, ri),
2083    .out_rrr = tgen_shl,
2084    .out_rri = tgen_shli,
2085};
2086
2087static void tgen_shr(TCGContext *s, TCGType type,
2088                     TCGReg a0, TCGReg a1, TCGReg a2)
2089{
2090    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1, SHIFT_REG_LSR(a2));
2091}
2092
2093static void tgen_shri(TCGContext *s, TCGType type,
2094                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2095{
2096    tcg_out_dat_reg(s, COND_AL, ARITH_MOV, a0, 0, a1,
2097                    SHIFT_IMM_LSR(a2 & 0x1f));
2098}
2099
2100static const TCGOutOpBinary outop_shr = {
2101    .base.static_constraint = C_O1_I2(r, r, ri),
2102    .out_rrr = tgen_shr,
2103    .out_rri = tgen_shri,
2104};
2105
2106static void tgen_sub(TCGContext *s, TCGType type,
2107                     TCGReg a0, TCGReg a1, TCGReg a2)
2108{
2109    tcg_out_dat_reg(s, COND_AL, ARITH_SUB, a0, a1, a2, SHIFT_IMM_LSL(0));
2110}
2111
2112static void tgen_subfi(TCGContext *s, TCGType type,
2113                       TCGReg a0, tcg_target_long a1, TCGReg a2)
2114{
2115    tcg_out_dat_imm(s, COND_AL, ARITH_RSB, a0, a2, encode_imm_nofail(a1));
2116}
2117
2118static const TCGOutOpSubtract outop_sub = {
2119    .base.static_constraint = C_O1_I2(r, rI, r),
2120    .out_rrr = tgen_sub,
2121    .out_rir = tgen_subfi,
2122};
2123
2124static void tgen_xor(TCGContext *s, TCGType type,
2125                     TCGReg a0, TCGReg a1, TCGReg a2)
2126{
2127    tcg_out_dat_reg(s, COND_AL, ARITH_EOR, a0, a1, a2, SHIFT_IMM_LSL(0));
2128}
2129
2130static void tgen_xori(TCGContext *s, TCGType type,
2131                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2132{
2133    tcg_out_dat_imm(s, COND_AL, ARITH_EOR, a0, a1, encode_imm_nofail(a2));
2134}
2135
2136static const TCGOutOpBinary outop_xor = {
2137    .base.static_constraint = C_O1_I2(r, r, rI),
2138    .out_rrr = tgen_xor,
2139    .out_rri = tgen_xori,
2140};
2141
2142static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
2143{
2144    tgen_subfi(s, type, a0, 0, a1);
2145}
2146
2147static const TCGOutOpUnary outop_neg = {
2148    .base.static_constraint = C_O1_I1(r, r),
2149    .out_rr = tgen_neg,
2150};
2151
2152static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
2153{
2154    tcg_out_dat_reg(s, COND_AL, ARITH_MVN, a0, 0, a1, SHIFT_IMM_LSL(0));
2155}
2156
2157static const TCGOutOpUnary outop_not = {
2158    .base.static_constraint = C_O1_I1(r, r),
2159    .out_rr = tgen_not,
2160};
2161
2162
2163static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
2164                       const TCGArg args[TCG_MAX_OP_ARGS],
2165                       const int const_args[TCG_MAX_OP_ARGS])
2166{
2167    TCGArg a0, a1, a2, a3, a4, a5;
2168    int c;
2169
2170    switch (opc) {
2171    case INDEX_op_goto_ptr:
2172        tcg_out_b_reg(s, COND_AL, args[0]);
2173        break;
2174    case INDEX_op_br:
2175        tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
2176        break;
2177
2178    case INDEX_op_ld8u_i32:
2179        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
2180        break;
2181    case INDEX_op_ld8s_i32:
2182        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
2183        break;
2184    case INDEX_op_ld16u_i32:
2185        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
2186        break;
2187    case INDEX_op_ld16s_i32:
2188        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
2189        break;
2190    case INDEX_op_ld_i32:
2191        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
2192        break;
2193    case INDEX_op_st8_i32:
2194        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
2195        break;
2196    case INDEX_op_st16_i32:
2197        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
2198        break;
2199    case INDEX_op_st_i32:
2200        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
2201        break;
2202
2203    case INDEX_op_movcond_i32:
2204        /* Constraints mean that v2 is always in the same register as dest,
2205         * so we only need to do "if condition passed, move v1 to dest".
2206         */
2207        c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]);
2208        tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
2209                        ARITH_MVN, args[0], 0, args[3], const_args[3]);
2210        break;
2211    case INDEX_op_add2_i32:
2212        a0 = args[0], a1 = args[1], a2 = args[2];
2213        a3 = args[3], a4 = args[4], a5 = args[5];
2214        if (a0 == a3 || (a0 == a5 && !const_args[5])) {
2215            a0 = TCG_REG_TMP;
2216        }
2217        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
2218                        a0, a2, a4, const_args[4]);
2219        tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
2220                        a1, a3, a5, const_args[5]);
2221        tcg_out_mov_reg(s, COND_AL, args[0], a0);
2222        break;
2223    case INDEX_op_sub2_i32:
2224        a0 = args[0], a1 = args[1], a2 = args[2];
2225        a3 = args[3], a4 = args[4], a5 = args[5];
2226        if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
2227            a0 = TCG_REG_TMP;
2228        }
2229        if (const_args[2]) {
2230            if (const_args[4]) {
2231                tcg_out_movi32(s, COND_AL, a0, a4);
2232                a4 = a0;
2233            }
2234            tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
2235        } else {
2236            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
2237                            ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
2238        }
2239        if (const_args[3]) {
2240            if (const_args[5]) {
2241                tcg_out_movi32(s, COND_AL, a1, a5);
2242                a5 = a1;
2243            }
2244            tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
2245        } else {
2246            tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
2247                            a1, a3, a5, const_args[5]);
2248        }
2249        tcg_out_mov_reg(s, COND_AL, args[0], a0);
2250        break;
2251    case INDEX_op_mulu2_i32:
2252        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2253        break;
2254
2255    case INDEX_op_brcond_i32:
2256        c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
2257        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
2258        break;
2259    case INDEX_op_setcond_i32:
2260        c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
2261        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
2262                        ARITH_MOV, args[0], 0, 1);
2263        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2264                        ARITH_MOV, args[0], 0, 0);
2265        break;
2266    case INDEX_op_negsetcond_i32:
2267        c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
2268        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
2269                        ARITH_MVN, args[0], 0, 0);
2270        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2271                        ARITH_MOV, args[0], 0, 0);
2272        break;
2273
2274    case INDEX_op_brcond2_i32:
2275        c = tcg_out_cmp2(s, args, const_args);
2276        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2277        break;
2278    case INDEX_op_setcond2_i32:
2279        c = tcg_out_cmp2(s, args + 1, const_args + 1);
2280        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2281        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2282                        ARITH_MOV, args[0], 0, 0);
2283        break;
2284
2285    case INDEX_op_qemu_ld_i32:
2286        tcg_out_qemu_ld(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
2287        break;
2288    case INDEX_op_qemu_ld_i64:
2289        tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
2290        break;
2291
2292    case INDEX_op_qemu_st_i32:
2293        tcg_out_qemu_st(s, args[0], -1, args[1], args[2], TCG_TYPE_I32);
2294        break;
2295    case INDEX_op_qemu_st_i64:
2296        tcg_out_qemu_st(s, args[0], args[1], args[2], args[3], TCG_TYPE_I64);
2297        break;
2298
2299    case INDEX_op_bswap16_i32:
2300        tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2301        break;
2302    case INDEX_op_bswap32_i32:
2303        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2304        break;
2305
2306    case INDEX_op_deposit_i32:
2307        tcg_out_deposit(s, COND_AL, args[0], args[2],
2308                        args[3], args[4], const_args[2]);
2309        break;
2310    case INDEX_op_extract_i32:
2311        tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2312        break;
2313    case INDEX_op_sextract_i32:
2314        tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2315        break;
2316    case INDEX_op_extract2_i32:
2317        /* ??? These optimization vs zero should be generic.  */
2318        /* ??? But we can't substitute 2 for 1 in the opcode stream yet.  */
2319        if (const_args[1]) {
2320            if (const_args[2]) {
2321                tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2322            } else {
2323                tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2324                                args[2], SHIFT_IMM_LSL(32 - args[3]));
2325            }
2326        } else if (const_args[2]) {
2327            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2328                            args[1], SHIFT_IMM_LSR(args[3]));
2329        } else {
2330            /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
2331            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2332                            args[2], SHIFT_IMM_LSL(32 - args[3]));
2333            tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2334                            args[1], SHIFT_IMM_LSR(args[3]));
2335        }
2336        break;
2337
2338    case INDEX_op_mb:
2339        tcg_out_mb(s, args[0]);
2340        break;
2341
2342    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2343    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2344    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2345    default:
2346        g_assert_not_reached();
2347    }
2348}
2349
2350static TCGConstraintSetIndex
2351tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
2352{
2353    switch (op) {
2354    case INDEX_op_goto_ptr:
2355        return C_O0_I1(r);
2356
2357    case INDEX_op_ld8u_i32:
2358    case INDEX_op_ld8s_i32:
2359    case INDEX_op_ld16u_i32:
2360    case INDEX_op_ld16s_i32:
2361    case INDEX_op_ld_i32:
2362    case INDEX_op_bswap16_i32:
2363    case INDEX_op_bswap32_i32:
2364    case INDEX_op_extract_i32:
2365    case INDEX_op_sextract_i32:
2366        return C_O1_I1(r, r);
2367
2368    case INDEX_op_st8_i32:
2369    case INDEX_op_st16_i32:
2370    case INDEX_op_st_i32:
2371        return C_O0_I2(r, r);
2372
2373    case INDEX_op_setcond_i32:
2374    case INDEX_op_negsetcond_i32:
2375        return C_O1_I2(r, r, rIN);
2376
2377    case INDEX_op_mulu2_i32:
2378        return C_O2_I2(r, r, r, r);
2379
2380    case INDEX_op_brcond_i32:
2381        return C_O0_I2(r, rIN);
2382    case INDEX_op_deposit_i32:
2383        return C_O1_I2(r, 0, rZ);
2384    case INDEX_op_extract2_i32:
2385        return C_O1_I2(r, rZ, rZ);
2386    case INDEX_op_movcond_i32:
2387        return C_O1_I4(r, r, rIN, rIK, 0);
2388    case INDEX_op_add2_i32:
2389        return C_O2_I4(r, r, r, r, rIN, rIK);
2390    case INDEX_op_sub2_i32:
2391        return C_O2_I4(r, r, rI, rI, rIN, rIK);
2392    case INDEX_op_brcond2_i32:
2393        return C_O0_I4(r, r, rI, rI);
2394    case INDEX_op_setcond2_i32:
2395        return C_O1_I4(r, r, r, rI, rI);
2396
2397    case INDEX_op_qemu_ld_i32:
2398        return C_O1_I1(r, q);
2399    case INDEX_op_qemu_ld_i64:
2400        return C_O2_I1(e, p, q);
2401    case INDEX_op_qemu_st_i32:
2402        return C_O0_I2(q, q);
2403    case INDEX_op_qemu_st_i64:
2404        return C_O0_I3(Q, p, q);
2405
2406    case INDEX_op_st_vec:
2407        return C_O0_I2(w, r);
2408    case INDEX_op_ld_vec:
2409    case INDEX_op_dupm_vec:
2410        return C_O1_I1(w, r);
2411    case INDEX_op_dup_vec:
2412        return C_O1_I1(w, wr);
2413    case INDEX_op_abs_vec:
2414    case INDEX_op_neg_vec:
2415    case INDEX_op_not_vec:
2416    case INDEX_op_shli_vec:
2417    case INDEX_op_shri_vec:
2418    case INDEX_op_sari_vec:
2419        return C_O1_I1(w, w);
2420    case INDEX_op_dup2_vec:
2421    case INDEX_op_add_vec:
2422    case INDEX_op_mul_vec:
2423    case INDEX_op_smax_vec:
2424    case INDEX_op_smin_vec:
2425    case INDEX_op_ssadd_vec:
2426    case INDEX_op_sssub_vec:
2427    case INDEX_op_sub_vec:
2428    case INDEX_op_umax_vec:
2429    case INDEX_op_umin_vec:
2430    case INDEX_op_usadd_vec:
2431    case INDEX_op_ussub_vec:
2432    case INDEX_op_xor_vec:
2433    case INDEX_op_arm_sshl_vec:
2434    case INDEX_op_arm_ushl_vec:
2435        return C_O1_I2(w, w, w);
2436    case INDEX_op_arm_sli_vec:
2437        return C_O1_I2(w, 0, w);
2438    case INDEX_op_or_vec:
2439    case INDEX_op_andc_vec:
2440        return C_O1_I2(w, w, wO);
2441    case INDEX_op_and_vec:
2442    case INDEX_op_orc_vec:
2443        return C_O1_I2(w, w, wV);
2444    case INDEX_op_cmp_vec:
2445        return C_O1_I2(w, w, wZ);
2446    case INDEX_op_bitsel_vec:
2447        return C_O1_I3(w, w, w, w);
2448    default:
2449        return C_NotImplemented;
2450    }
2451}
2452
2453static void tcg_target_init(TCGContext *s)
2454{
2455    /*
2456     * Only probe for the platform and capabilities if we haven't already
2457     * determined maximum values at compile time.
2458     */
2459#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2460    {
2461        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2462#ifndef use_idiv_instructions
2463        use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2464#endif
2465#ifndef use_neon_instructions
2466        use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2467#endif
2468    }
2469#endif
2470
2471    if (__ARM_ARCH < 7) {
2472        const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2473        if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2474            arm_arch = pl[1] - '0';
2475        }
2476
2477        if (arm_arch < 6) {
2478            error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
2479            exit(EXIT_FAILURE);
2480        }
2481    }
2482
2483    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2484
2485    tcg_target_call_clobber_regs = 0;
2486    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2487    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2488    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2489    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2490    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2491    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2492
2493    if (use_neon_instructions) {
2494        tcg_target_available_regs[TCG_TYPE_V64]  = ALL_VECTOR_REGS;
2495        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2496
2497        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2498        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2499        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2500        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2501        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2502        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2503        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2504        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2505        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2506        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2507        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2508        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2509    }
2510
2511    s->reserved_regs = 0;
2512    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2513    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2514    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2515    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2516}
2517
2518static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2519                       TCGReg arg1, intptr_t arg2)
2520{
2521    switch (type) {
2522    case TCG_TYPE_I32:
2523        tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2524        return;
2525    case TCG_TYPE_V64:
2526        /* regs 1; size 8; align 8 */
2527        tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2528        return;
2529    case TCG_TYPE_V128:
2530        /*
2531         * We have only 8-byte alignment for the stack per the ABI.
2532         * Rather than dynamically re-align the stack, it's easier
2533         * to simply not request alignment beyond that.  So:
2534         * regs 2; size 8; align 8
2535         */
2536        tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
2537        return;
2538    default:
2539        g_assert_not_reached();
2540    }
2541}
2542
2543static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2544                       TCGReg arg1, intptr_t arg2)
2545{
2546    switch (type) {
2547    case TCG_TYPE_I32:
2548        tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2549        return;
2550    case TCG_TYPE_V64:
2551        /* regs 1; size 8; align 8 */
2552        tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2553        return;
2554    case TCG_TYPE_V128:
2555        /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
2556        tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
2557        return;
2558    default:
2559        g_assert_not_reached();
2560    }
2561}
2562
2563static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2564                        TCGReg base, intptr_t ofs)
2565{
2566    return false;
2567}
2568
2569static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2570{
2571    if (ret == arg) {
2572        return true;
2573    }
2574    switch (type) {
2575    case TCG_TYPE_I32:
2576        if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2577            tcg_out_mov_reg(s, COND_AL, ret, arg);
2578            return true;
2579        }
2580        return false;
2581
2582    case TCG_TYPE_V64:
2583    case TCG_TYPE_V128:
2584        /* "VMOV D,N" is an alias for "VORR D,N,N". */
2585        tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2586        return true;
2587
2588    default:
2589        g_assert_not_reached();
2590    }
2591}
2592
2593static void tcg_out_movi(TCGContext *s, TCGType type,
2594                         TCGReg ret, tcg_target_long arg)
2595{
2596    tcg_debug_assert(type == TCG_TYPE_I32);
2597    tcg_debug_assert(ret < TCG_REG_Q0);
2598    tcg_out_movi32(s, COND_AL, ret, arg);
2599}
2600
2601static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
2602{
2603    return false;
2604}
2605
2606static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
2607                             tcg_target_long imm)
2608{
2609    int enc, opc = ARITH_ADD;
2610
2611    /* All of the easiest immediates to encode are positive. */
2612    if (imm < 0) {
2613        imm = -imm;
2614        opc = ARITH_SUB;
2615    }
2616    enc = encode_imm(imm);
2617    if (enc >= 0) {
2618        tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
2619    } else {
2620        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
2621        tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
2622                        TCG_REG_TMP, SHIFT_IMM_LSL(0));
2623    }
2624}
2625
2626/* Type is always V128, with I64 elements.  */
2627static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2628{
2629    /* Move high element into place first. */
2630    /* VMOV Dd+1, Ds */
2631    tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2632    /* Move low element into place; tcg_out_mov will check for nop. */
2633    tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2634}
2635
2636static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2637                            TCGReg rd, TCGReg rs)
2638{
2639    int q = type - TCG_TYPE_V64;
2640
2641    if (vece == MO_64) {
2642        if (type == TCG_TYPE_V128) {
2643            tcg_out_dup2_vec(s, rd, rs, rs);
2644        } else {
2645            tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2646        }
2647    } else if (rs < TCG_REG_Q0) {
2648        int b = (vece == MO_8);
2649        int e = (vece == MO_16);
2650        tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2651                  encode_vn(rd) | (rs << 12));
2652    } else {
2653        int imm4 = 1 << vece;
2654        tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2655                  encode_vd(rd) | encode_vm(rs));
2656    }
2657    return true;
2658}
2659
2660static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2661                             TCGReg rd, TCGReg base, intptr_t offset)
2662{
2663    if (vece == MO_64) {
2664        tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2665        if (type == TCG_TYPE_V128) {
2666            tcg_out_dup2_vec(s, rd, rd, rd);
2667        }
2668    } else {
2669        int q = type - TCG_TYPE_V64;
2670        tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2671                      rd, base, offset);
2672    }
2673    return true;
2674}
2675
2676static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2677                             TCGReg rd, int64_t v64)
2678{
2679    int q = type - TCG_TYPE_V64;
2680    int cmode, imm8, i;
2681
2682    /* Test all bytes equal first.  */
2683    if (vece == MO_8) {
2684        tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2685        return;
2686    }
2687
2688    /*
2689     * Test all bytes 0x00 or 0xff second.  This can match cases that
2690     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2691     */
2692    for (i = imm8 = 0; i < 8; i++) {
2693        uint8_t byte = v64 >> (i * 8);
2694        if (byte == 0xff) {
2695            imm8 |= 1 << i;
2696        } else if (byte != 0) {
2697            goto fail_bytes;
2698        }
2699    }
2700    tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2701    return;
2702 fail_bytes:
2703
2704    /*
2705     * Tests for various replications.  For each element width, if we
2706     * cannot find an expansion there's no point checking a larger
2707     * width because we already know by replication it cannot match.
2708     */
2709    if (vece == MO_16) {
2710        uint16_t v16 = v64;
2711
2712        if (is_shimm16(v16, &cmode, &imm8)) {
2713            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2714            return;
2715        }
2716        if (is_shimm16(~v16, &cmode, &imm8)) {
2717            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2718            return;
2719        }
2720
2721        /*
2722         * Otherwise, all remaining constants can be loaded in two insns:
2723         * rd = v16 & 0xff, rd |= v16 & 0xff00.
2724         */
2725        tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2726        tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8);   /* VORRI */
2727        return;
2728    }
2729
2730    if (vece == MO_32) {
2731        uint32_t v32 = v64;
2732
2733        if (is_shimm32(v32, &cmode, &imm8) ||
2734            is_soimm32(v32, &cmode, &imm8)) {
2735            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2736            return;
2737        }
2738        if (is_shimm32(~v32, &cmode, &imm8) ||
2739            is_soimm32(~v32, &cmode, &imm8)) {
2740            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2741            return;
2742        }
2743
2744        /*
2745         * Restrict the set of constants to those we can load with
2746         * two instructions.  Others we load from the pool.
2747         */
2748        i = is_shimm32_pair(v32, &cmode, &imm8);
2749        if (i) {
2750            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2751            tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2752            return;
2753        }
2754        i = is_shimm32_pair(~v32, &cmode, &imm8);
2755        if (i) {
2756            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2757            tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2758            return;
2759        }
2760    }
2761
2762    /*
2763     * As a last resort, load from the constant pool.
2764     */
2765    if (!q || vece == MO_64) {
2766        new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2767        /* VLDR Dd, [pc + offset] */
2768        tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2769        if (q) {
2770            tcg_out_dup2_vec(s, rd, rd, rd);
2771        }
2772    } else {
2773        new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2774        /* add tmp, pc, offset */
2775        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2776        tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2777    }
2778}
2779
2780static const ARMInsn vec_cmp_insn[16] = {
2781    [TCG_COND_EQ] = INSN_VCEQ,
2782    [TCG_COND_GT] = INSN_VCGT,
2783    [TCG_COND_GE] = INSN_VCGE,
2784    [TCG_COND_GTU] = INSN_VCGT_U,
2785    [TCG_COND_GEU] = INSN_VCGE_U,
2786};
2787
2788static const ARMInsn vec_cmp0_insn[16] = {
2789    [TCG_COND_EQ] = INSN_VCEQ0,
2790    [TCG_COND_GT] = INSN_VCGT0,
2791    [TCG_COND_GE] = INSN_VCGE0,
2792    [TCG_COND_LT] = INSN_VCLT0,
2793    [TCG_COND_LE] = INSN_VCLE0,
2794};
2795
2796static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2797                           unsigned vecl, unsigned vece,
2798                           const TCGArg args[TCG_MAX_OP_ARGS],
2799                           const int const_args[TCG_MAX_OP_ARGS])
2800{
2801    TCGType type = vecl + TCG_TYPE_V64;
2802    unsigned q = vecl;
2803    TCGArg a0, a1, a2, a3;
2804    int cmode, imm8;
2805
2806    a0 = args[0];
2807    a1 = args[1];
2808    a2 = args[2];
2809
2810    switch (opc) {
2811    case INDEX_op_ld_vec:
2812        tcg_out_ld(s, type, a0, a1, a2);
2813        return;
2814    case INDEX_op_st_vec:
2815        tcg_out_st(s, type, a0, a1, a2);
2816        return;
2817    case INDEX_op_dupm_vec:
2818        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2819        return;
2820    case INDEX_op_dup2_vec:
2821        tcg_out_dup2_vec(s, a0, a1, a2);
2822        return;
2823    case INDEX_op_abs_vec:
2824        tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2825        return;
2826    case INDEX_op_neg_vec:
2827        tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2828        return;
2829    case INDEX_op_not_vec:
2830        tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2831        return;
2832    case INDEX_op_add_vec:
2833        tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2834        return;
2835    case INDEX_op_mul_vec:
2836        tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2837        return;
2838    case INDEX_op_smax_vec:
2839        tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2840        return;
2841    case INDEX_op_smin_vec:
2842        tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2843        return;
2844    case INDEX_op_sub_vec:
2845        tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2846        return;
2847    case INDEX_op_ssadd_vec:
2848        tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2849        return;
2850    case INDEX_op_sssub_vec:
2851        tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2852        return;
2853    case INDEX_op_umax_vec:
2854        tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2855        return;
2856    case INDEX_op_umin_vec:
2857        tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2858        return;
2859    case INDEX_op_usadd_vec:
2860        tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2861        return;
2862    case INDEX_op_ussub_vec:
2863        tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2864        return;
2865    case INDEX_op_xor_vec:
2866        tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2867        return;
2868    case INDEX_op_arm_sshl_vec:
2869        /*
2870         * Note that Vm is the data and Vn is the shift count,
2871         * therefore the arguments appear reversed.
2872         */
2873        tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2874        return;
2875    case INDEX_op_arm_ushl_vec:
2876        /* See above. */
2877        tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2878        return;
2879    case INDEX_op_shli_vec:
2880        tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2881        return;
2882    case INDEX_op_shri_vec:
2883        tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2884        return;
2885    case INDEX_op_sari_vec:
2886        tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2887        return;
2888    case INDEX_op_arm_sli_vec:
2889        tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2890        return;
2891
2892    case INDEX_op_andc_vec:
2893        if (!const_args[2]) {
2894            tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2895            return;
2896        }
2897        a2 = ~a2;
2898        /* fall through */
2899    case INDEX_op_and_vec:
2900        if (const_args[2]) {
2901            is_shimm1632(~a2, &cmode, &imm8);
2902            if (a0 == a1) {
2903                tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2904                return;
2905            }
2906            tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2907            a2 = a0;
2908        }
2909        tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2910        return;
2911
2912    case INDEX_op_orc_vec:
2913        if (!const_args[2]) {
2914            tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2915            return;
2916        }
2917        a2 = ~a2;
2918        /* fall through */
2919    case INDEX_op_or_vec:
2920        if (const_args[2]) {
2921            is_shimm1632(a2, &cmode, &imm8);
2922            if (a0 == a1) {
2923                tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2924                return;
2925            }
2926            tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2927            a2 = a0;
2928        }
2929        tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2930        return;
2931
2932    case INDEX_op_cmp_vec:
2933        {
2934            TCGCond cond = args[3];
2935            ARMInsn insn;
2936
2937            switch (cond) {
2938            case TCG_COND_NE:
2939                if (const_args[2]) {
2940                    tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2941                } else {
2942                    tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2943                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2944                }
2945                break;
2946
2947            case TCG_COND_TSTNE:
2948            case TCG_COND_TSTEQ:
2949                if (const_args[2]) {
2950                    /* (x & 0) == 0 */
2951                    tcg_out_dupi_vec(s, type, MO_8, a0,
2952                                     -(cond == TCG_COND_TSTEQ));
2953                    break;
2954                }
2955                tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a2);
2956                if (cond == TCG_COND_TSTEQ) {
2957                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2958                }
2959                break;
2960
2961            default:
2962                if (const_args[2]) {
2963                    insn = vec_cmp0_insn[cond];
2964                    if (insn) {
2965                        tcg_out_vreg2(s, insn, q, vece, a0, a1);
2966                        return;
2967                    }
2968                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2969                    a2 = TCG_VEC_TMP;
2970                }
2971                insn = vec_cmp_insn[cond];
2972                if (insn == 0) {
2973                    TCGArg t;
2974                    t = a1, a1 = a2, a2 = t;
2975                    cond = tcg_swap_cond(cond);
2976                    insn = vec_cmp_insn[cond];
2977                    tcg_debug_assert(insn != 0);
2978                }
2979                tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2980                break;
2981            }
2982        }
2983        return;
2984
2985    case INDEX_op_bitsel_vec:
2986        a3 = args[3];
2987        if (a0 == a3) {
2988            tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2989        } else if (a0 == a2) {
2990            tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2991        } else {
2992            tcg_out_mov(s, type, a0, a1);
2993            tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2994        }
2995        return;
2996
2997    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
2998    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
2999    default:
3000        g_assert_not_reached();
3001    }
3002}
3003
3004int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3005{
3006    switch (opc) {
3007    case INDEX_op_add_vec:
3008    case INDEX_op_sub_vec:
3009    case INDEX_op_and_vec:
3010    case INDEX_op_andc_vec:
3011    case INDEX_op_or_vec:
3012    case INDEX_op_orc_vec:
3013    case INDEX_op_xor_vec:
3014    case INDEX_op_not_vec:
3015    case INDEX_op_shli_vec:
3016    case INDEX_op_shri_vec:
3017    case INDEX_op_sari_vec:
3018    case INDEX_op_ssadd_vec:
3019    case INDEX_op_sssub_vec:
3020    case INDEX_op_usadd_vec:
3021    case INDEX_op_ussub_vec:
3022    case INDEX_op_bitsel_vec:
3023        return 1;
3024    case INDEX_op_abs_vec:
3025    case INDEX_op_cmp_vec:
3026    case INDEX_op_mul_vec:
3027    case INDEX_op_neg_vec:
3028    case INDEX_op_smax_vec:
3029    case INDEX_op_smin_vec:
3030    case INDEX_op_umax_vec:
3031    case INDEX_op_umin_vec:
3032        return vece < MO_64;
3033    case INDEX_op_shlv_vec:
3034    case INDEX_op_shrv_vec:
3035    case INDEX_op_sarv_vec:
3036    case INDEX_op_rotli_vec:
3037    case INDEX_op_rotlv_vec:
3038    case INDEX_op_rotrv_vec:
3039        return -1;
3040    default:
3041        return 0;
3042    }
3043}
3044
3045void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3046                       TCGArg a0, ...)
3047{
3048    va_list va;
3049    TCGv_vec v0, v1, v2, t1, t2, c1;
3050    TCGArg a2;
3051
3052    va_start(va, a0);
3053    v0 = temp_tcgv_vec(arg_temp(a0));
3054    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3055    a2 = va_arg(va, TCGArg);
3056    va_end(va);
3057
3058    switch (opc) {
3059    case INDEX_op_shlv_vec:
3060        /*
3061         * Merely propagate shlv_vec to arm_ushl_vec.
3062         * In this way we don't set TCG_TARGET_HAS_shv_vec
3063         * because everything is done via expansion.
3064         */
3065        v2 = temp_tcgv_vec(arg_temp(a2));
3066        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3067                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3068        break;
3069
3070    case INDEX_op_shrv_vec:
3071    case INDEX_op_sarv_vec:
3072        /* Right shifts are negative left shifts for NEON.  */
3073        v2 = temp_tcgv_vec(arg_temp(a2));
3074        t1 = tcg_temp_new_vec(type);
3075        tcg_gen_neg_vec(vece, t1, v2);
3076        if (opc == INDEX_op_shrv_vec) {
3077            opc = INDEX_op_arm_ushl_vec;
3078        } else {
3079            opc = INDEX_op_arm_sshl_vec;
3080        }
3081        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
3082                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3083        tcg_temp_free_vec(t1);
3084        break;
3085
3086    case INDEX_op_rotli_vec:
3087        t1 = tcg_temp_new_vec(type);
3088        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
3089        vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
3090                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
3091        tcg_temp_free_vec(t1);
3092        break;
3093
3094    case INDEX_op_rotlv_vec:
3095        v2 = temp_tcgv_vec(arg_temp(a2));
3096        t1 = tcg_temp_new_vec(type);
3097        c1 = tcg_constant_vec(type, vece, 8 << vece);
3098        tcg_gen_sub_vec(vece, t1, v2, c1);
3099        /* Right shifts are negative left shifts for NEON.  */
3100        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3101                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3102        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3103                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3104        tcg_gen_or_vec(vece, v0, v0, t1);
3105        tcg_temp_free_vec(t1);
3106        break;
3107
3108    case INDEX_op_rotrv_vec:
3109        v2 = temp_tcgv_vec(arg_temp(a2));
3110        t1 = tcg_temp_new_vec(type);
3111        t2 = tcg_temp_new_vec(type);
3112        c1 = tcg_constant_vec(type, vece, 8 << vece);
3113        tcg_gen_neg_vec(vece, t1, v2);
3114        tcg_gen_sub_vec(vece, t2, c1, v2);
3115        /* Right shifts are negative left shifts for NEON.  */
3116        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3117                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3118        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
3119                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
3120        tcg_gen_or_vec(vece, v0, t1, t2);
3121        tcg_temp_free_vec(t1);
3122        tcg_temp_free_vec(t2);
3123        break;
3124
3125    default:
3126        g_assert_not_reached();
3127    }
3128}
3129
3130static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3131{
3132    int i;
3133    for (i = 0; i < count; ++i) {
3134        p[i] = INSN_NOP;
3135    }
3136}
3137
3138/* Compute frame size via macros, to share between tcg_target_qemu_prologue
3139   and tcg_register_jit.  */
3140
3141#define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
3142
3143#define FRAME_SIZE \
3144    ((PUSH_SIZE \
3145      + TCG_STATIC_CALL_ARGS_SIZE \
3146      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3147      + TCG_TARGET_STACK_ALIGN - 1) \
3148     & -TCG_TARGET_STACK_ALIGN)
3149
3150#define STACK_ADDEND  (FRAME_SIZE - PUSH_SIZE)
3151
3152static void tcg_target_qemu_prologue(TCGContext *s)
3153{
3154    /* Calling convention requires us to save r4-r11 and lr.  */
3155    /* stmdb sp!, { r4 - r11, lr } */
3156    tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
3157                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3158                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3159                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
3160
3161    /* Reserve callee argument and tcg temp space.  */
3162    tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
3163                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3164    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3165                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3166
3167    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3168
3169    if (!tcg_use_softmmu && guest_base) {
3170        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
3171        tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
3172    }
3173
3174    tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
3175
3176    /*
3177     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3178     * and fall through to the rest of the epilogue.
3179     */
3180    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3181    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
3182    tcg_out_epilogue(s);
3183}
3184
3185static void tcg_out_epilogue(TCGContext *s)
3186{
3187    /* Release local stack frame.  */
3188    tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
3189                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3190
3191    /* ldmia sp!, { r4 - r11, pc } */
3192    tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
3193                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3194                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3195                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
3196}
3197
3198static void tcg_out_tb_start(TCGContext *s)
3199{
3200    /* nothing to do */
3201}
3202
3203typedef struct {
3204    DebugFrameHeader h;
3205    uint8_t fde_def_cfa[4];
3206    uint8_t fde_reg_ofs[18];
3207} DebugFrame;
3208
3209#define ELF_HOST_MACHINE EM_ARM
3210
3211/* We're expecting a 2 byte uleb128 encoded value.  */
3212QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3213
3214static const DebugFrame debug_frame = {
3215    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3216    .h.cie.id = -1,
3217    .h.cie.version = 1,
3218    .h.cie.code_align = 1,
3219    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
3220    .h.cie.return_column = 14,
3221
3222    /* Total FDE size does not include the "len" member.  */
3223    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3224
3225    .fde_def_cfa = {
3226        12, 13,                         /* DW_CFA_def_cfa sp, ... */
3227        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3228        (FRAME_SIZE >> 7)
3229    },
3230    .fde_reg_ofs = {
3231        /* The following must match the stmdb in the prologue.  */
3232        0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
3233        0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
3234        0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
3235        0x89, 4,                        /* DW_CFA_offset, r9, -16 */
3236        0x88, 5,                        /* DW_CFA_offset, r8, -20 */
3237        0x87, 6,                        /* DW_CFA_offset, r7, -24 */
3238        0x86, 7,                        /* DW_CFA_offset, r6, -28 */
3239        0x85, 8,                        /* DW_CFA_offset, r5, -32 */
3240        0x84, 9,                        /* DW_CFA_offset, r4, -36 */
3241    }
3242};
3243
3244void tcg_register_jit(const void *buf, size_t buf_size)
3245{
3246    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3247}
3248