xref: /openbmc/qemu/tcg/arm/tcg-target.c.inc (revision f9e1ef74)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-ldst.c.inc"
27#include "../tcg-pool.c.inc"
28
29int arm_arch = __ARM_ARCH;
30
31#ifndef use_idiv_instructions
32bool use_idiv_instructions;
33#endif
34#ifndef use_neon_instructions
35bool use_neon_instructions;
36#endif
37
38#ifdef CONFIG_DEBUG_TCG
39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
41    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%sp",  "%r14", "%pc",
42    "%q0",  "%q1",  "%q2",  "%q3",  "%q4",  "%q5",  "%q6",  "%q7",
43    "%q8",  "%q9",  "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
44};
45#endif
46
47static const int tcg_target_reg_alloc_order[] = {
48    TCG_REG_R4,
49    TCG_REG_R5,
50    TCG_REG_R6,
51    TCG_REG_R7,
52    TCG_REG_R8,
53    TCG_REG_R9,
54    TCG_REG_R10,
55    TCG_REG_R11,
56    TCG_REG_R13,
57    TCG_REG_R0,
58    TCG_REG_R1,
59    TCG_REG_R2,
60    TCG_REG_R3,
61    TCG_REG_R12,
62    TCG_REG_R14,
63
64    TCG_REG_Q0,
65    TCG_REG_Q1,
66    TCG_REG_Q2,
67    TCG_REG_Q3,
68    /* Q4 - Q7 are call-saved, and skipped. */
69    TCG_REG_Q8,
70    TCG_REG_Q9,
71    TCG_REG_Q10,
72    TCG_REG_Q11,
73    TCG_REG_Q12,
74    TCG_REG_Q13,
75    TCG_REG_Q14,
76    TCG_REG_Q15,
77};
78
79static const int tcg_target_call_iarg_regs[4] = {
80    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
81};
82
83static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
84{
85    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
86    tcg_debug_assert(slot >= 0 && slot <= 3);
87    return TCG_REG_R0 + slot;
88}
89
90#define TCG_REG_TMP  TCG_REG_R12
91#define TCG_VEC_TMP  TCG_REG_Q15
92#ifndef CONFIG_SOFTMMU
93#define TCG_REG_GUEST_BASE  TCG_REG_R11
94#endif
95
96typedef enum {
97    COND_EQ = 0x0,
98    COND_NE = 0x1,
99    COND_CS = 0x2,	/* Unsigned greater or equal */
100    COND_CC = 0x3,	/* Unsigned less than */
101    COND_MI = 0x4,	/* Negative */
102    COND_PL = 0x5,	/* Zero or greater */
103    COND_VS = 0x6,	/* Overflow */
104    COND_VC = 0x7,	/* No overflow */
105    COND_HI = 0x8,	/* Unsigned greater than */
106    COND_LS = 0x9,	/* Unsigned less or equal */
107    COND_GE = 0xa,
108    COND_LT = 0xb,
109    COND_GT = 0xc,
110    COND_LE = 0xd,
111    COND_AL = 0xe,
112} ARMCond;
113
114#define TO_CPSR (1 << 20)
115
116#define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
117#define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
118#define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
119#define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
120#define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
121#define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
122#define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
123#define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
124
125typedef enum {
126    ARITH_AND = 0x0 << 21,
127    ARITH_EOR = 0x1 << 21,
128    ARITH_SUB = 0x2 << 21,
129    ARITH_RSB = 0x3 << 21,
130    ARITH_ADD = 0x4 << 21,
131    ARITH_ADC = 0x5 << 21,
132    ARITH_SBC = 0x6 << 21,
133    ARITH_RSC = 0x7 << 21,
134    ARITH_TST = 0x8 << 21 | TO_CPSR,
135    ARITH_CMP = 0xa << 21 | TO_CPSR,
136    ARITH_CMN = 0xb << 21 | TO_CPSR,
137    ARITH_ORR = 0xc << 21,
138    ARITH_MOV = 0xd << 21,
139    ARITH_BIC = 0xe << 21,
140    ARITH_MVN = 0xf << 21,
141
142    INSN_B         = 0x0a000000,
143
144    INSN_CLZ       = 0x016f0f10,
145    INSN_RBIT      = 0x06ff0f30,
146
147    INSN_LDMIA     = 0x08b00000,
148    INSN_STMDB     = 0x09200000,
149
150    INSN_LDR_IMM   = 0x04100000,
151    INSN_LDR_REG   = 0x06100000,
152    INSN_STR_IMM   = 0x04000000,
153    INSN_STR_REG   = 0x06000000,
154
155    INSN_LDRH_IMM  = 0x005000b0,
156    INSN_LDRH_REG  = 0x001000b0,
157    INSN_LDRSH_IMM = 0x005000f0,
158    INSN_LDRSH_REG = 0x001000f0,
159    INSN_STRH_IMM  = 0x004000b0,
160    INSN_STRH_REG  = 0x000000b0,
161
162    INSN_LDRB_IMM  = 0x04500000,
163    INSN_LDRB_REG  = 0x06500000,
164    INSN_LDRSB_IMM = 0x005000d0,
165    INSN_LDRSB_REG = 0x001000d0,
166    INSN_STRB_IMM  = 0x04400000,
167    INSN_STRB_REG  = 0x06400000,
168
169    INSN_LDRD_IMM  = 0x004000d0,
170    INSN_LDRD_REG  = 0x000000d0,
171    INSN_STRD_IMM  = 0x004000f0,
172    INSN_STRD_REG  = 0x000000f0,
173
174    INSN_DMB_ISH   = 0xf57ff05b,
175    INSN_DMB_MCR   = 0xee070fba,
176
177    /* Architected nop introduced in v6k.  */
178    /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
179       also Just So Happened to do nothing on pre-v6k so that we
180       don't need to conditionalize it?  */
181    INSN_NOP_v6k   = 0xe320f000,
182    /* Otherwise the assembler uses mov r0,r0 */
183    INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
184
185    INSN_VADD      = 0xf2000800,
186    INSN_VAND      = 0xf2000110,
187    INSN_VBIC      = 0xf2100110,
188    INSN_VEOR      = 0xf3000110,
189    INSN_VORN      = 0xf2300110,
190    INSN_VORR      = 0xf2200110,
191    INSN_VSUB      = 0xf3000800,
192    INSN_VMUL      = 0xf2000910,
193    INSN_VQADD     = 0xf2000010,
194    INSN_VQADD_U   = 0xf3000010,
195    INSN_VQSUB     = 0xf2000210,
196    INSN_VQSUB_U   = 0xf3000210,
197    INSN_VMAX      = 0xf2000600,
198    INSN_VMAX_U    = 0xf3000600,
199    INSN_VMIN      = 0xf2000610,
200    INSN_VMIN_U    = 0xf3000610,
201
202    INSN_VABS      = 0xf3b10300,
203    INSN_VMVN      = 0xf3b00580,
204    INSN_VNEG      = 0xf3b10380,
205
206    INSN_VCEQ0     = 0xf3b10100,
207    INSN_VCGT0     = 0xf3b10000,
208    INSN_VCGE0     = 0xf3b10080,
209    INSN_VCLE0     = 0xf3b10180,
210    INSN_VCLT0     = 0xf3b10200,
211
212    INSN_VCEQ      = 0xf3000810,
213    INSN_VCGE      = 0xf2000310,
214    INSN_VCGT      = 0xf2000300,
215    INSN_VCGE_U    = 0xf3000310,
216    INSN_VCGT_U    = 0xf3000300,
217
218    INSN_VSHLI     = 0xf2800510,  /* VSHL (immediate) */
219    INSN_VSARI     = 0xf2800010,  /* VSHR.S */
220    INSN_VSHRI     = 0xf3800010,  /* VSHR.U */
221    INSN_VSLI      = 0xf3800510,
222    INSN_VSHL_S    = 0xf2000400,  /* VSHL.S (register) */
223    INSN_VSHL_U    = 0xf3000400,  /* VSHL.U (register) */
224
225    INSN_VBSL      = 0xf3100110,
226    INSN_VBIT      = 0xf3200110,
227    INSN_VBIF      = 0xf3300110,
228
229    INSN_VTST      = 0xf2000810,
230
231    INSN_VDUP_G    = 0xee800b10,  /* VDUP (ARM core register) */
232    INSN_VDUP_S    = 0xf3b00c00,  /* VDUP (scalar) */
233    INSN_VLDR_D    = 0xed100b00,  /* VLDR.64 */
234    INSN_VLD1      = 0xf4200000,  /* VLD1 (multiple single elements) */
235    INSN_VLD1R     = 0xf4a00c00,  /* VLD1 (single element to all lanes) */
236    INSN_VST1      = 0xf4000000,  /* VST1 (multiple single elements) */
237    INSN_VMOVI     = 0xf2800010,  /* VMOV (immediate) */
238} ARMInsn;
239
240#define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
241
242static const uint8_t tcg_cond_to_arm_cond[] = {
243    [TCG_COND_EQ] = COND_EQ,
244    [TCG_COND_NE] = COND_NE,
245    [TCG_COND_LT] = COND_LT,
246    [TCG_COND_GE] = COND_GE,
247    [TCG_COND_LE] = COND_LE,
248    [TCG_COND_GT] = COND_GT,
249    /* unsigned */
250    [TCG_COND_LTU] = COND_CC,
251    [TCG_COND_GEU] = COND_CS,
252    [TCG_COND_LEU] = COND_LS,
253    [TCG_COND_GTU] = COND_HI,
254};
255
256static int encode_imm(uint32_t imm);
257
258/* TCG private relocation type: add with pc+imm8 */
259#define R_ARM_PC8  11
260
261/* TCG private relocation type: vldr with imm8 << 2 */
262#define R_ARM_PC11 12
263
264static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
265{
266    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
267    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
268
269    if (offset == sextract32(offset, 0, 24)) {
270        *src_rw = deposit32(*src_rw, 0, 24, offset);
271        return true;
272    }
273    return false;
274}
275
276static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
277{
278    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
279    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
280
281    if (offset >= -0xfff && offset <= 0xfff) {
282        tcg_insn_unit insn = *src_rw;
283        bool u = (offset >= 0);
284        if (!u) {
285            offset = -offset;
286        }
287        insn = deposit32(insn, 23, 1, u);
288        insn = deposit32(insn, 0, 12, offset);
289        *src_rw = insn;
290        return true;
291    }
292    return false;
293}
294
295static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
296{
297    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
299
300    if (offset >= -0xff && offset <= 0xff) {
301        tcg_insn_unit insn = *src_rw;
302        bool u = (offset >= 0);
303        if (!u) {
304            offset = -offset;
305        }
306        insn = deposit32(insn, 23, 1, u);
307        insn = deposit32(insn, 0, 8, offset);
308        *src_rw = insn;
309        return true;
310    }
311    return false;
312}
313
314static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
315{
316    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
317    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
318    int imm12 = encode_imm(offset);
319
320    if (imm12 >= 0) {
321        *src_rw = deposit32(*src_rw, 0, 12, imm12);
322        return true;
323    }
324    return false;
325}
326
327static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
328                        intptr_t value, intptr_t addend)
329{
330    tcg_debug_assert(addend == 0);
331    switch (type) {
332    case R_ARM_PC24:
333        return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
334    case R_ARM_PC13:
335        return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
336    case R_ARM_PC11:
337        return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
338    case R_ARM_PC8:
339        return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
340    default:
341        g_assert_not_reached();
342    }
343}
344
345#define TCG_CT_CONST_ARM  0x100
346#define TCG_CT_CONST_INV  0x200
347#define TCG_CT_CONST_NEG  0x400
348#define TCG_CT_CONST_ZERO 0x800
349#define TCG_CT_CONST_ORRI 0x1000
350#define TCG_CT_CONST_ANDI 0x2000
351
352#define ALL_GENERAL_REGS  0xffffu
353#define ALL_VECTOR_REGS   0xffff0000u
354
355/*
356 * r0-r2 will be overwritten when reading the tlb entry (softmmu only)
357 * and r0-r1 doing the byte swapping, so don't use these.
358 * r3 is removed for softmmu to avoid clashes with helper arguments.
359 */
360#ifdef CONFIG_SOFTMMU
361#define ALL_QLOAD_REGS \
362    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
363                          (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
364                          (1 << TCG_REG_R14)))
365#define ALL_QSTORE_REGS \
366    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
367                          (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \
368                          ((TARGET_LONG_BITS == 64) << TCG_REG_R3)))
369#else
370#define ALL_QLOAD_REGS   ALL_GENERAL_REGS
371#define ALL_QSTORE_REGS \
372    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
373#endif
374
375/*
376 * ARM immediates for ALU instructions are made of an unsigned 8-bit
377 * right-rotated by an even amount between 0 and 30.
378 *
379 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
380 */
381static int encode_imm(uint32_t imm)
382{
383    uint32_t rot, imm8;
384
385    /* Simple case, no rotation required. */
386    if ((imm & ~0xff) == 0) {
387        return imm;
388    }
389
390    /* Next, try a simple even shift.  */
391    rot = ctz32(imm) & ~1;
392    imm8 = imm >> rot;
393    rot = 32 - rot;
394    if ((imm8 & ~0xff) == 0) {
395        goto found;
396    }
397
398    /*
399     * Finally, try harder with rotations.
400     * The ctz test above will have taken care of rotates >= 8.
401     */
402    for (rot = 2; rot < 8; rot += 2) {
403        imm8 = rol32(imm, rot);
404        if ((imm8 & ~0xff) == 0) {
405            goto found;
406        }
407    }
408    /* Fail: imm cannot be encoded. */
409    return -1;
410
411 found:
412    /* Note that rot is even, and we discard bit 0 by shifting by 7. */
413    return rot << 7 | imm8;
414}
415
416static int encode_imm_nofail(uint32_t imm)
417{
418    int ret = encode_imm(imm);
419    tcg_debug_assert(ret >= 0);
420    return ret;
421}
422
423static bool check_fit_imm(uint32_t imm)
424{
425    return encode_imm(imm) >= 0;
426}
427
428/* Return true if v16 is a valid 16-bit shifted immediate.  */
429static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
430{
431    if (v16 == (v16 & 0xff)) {
432        *cmode = 0x8;
433        *imm8 = v16 & 0xff;
434        return true;
435    } else if (v16 == (v16 & 0xff00)) {
436        *cmode = 0xa;
437        *imm8 = v16 >> 8;
438        return true;
439    }
440    return false;
441}
442
443/* Return true if v32 is a valid 32-bit shifted immediate.  */
444static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
445{
446    if (v32 == (v32 & 0xff)) {
447        *cmode = 0x0;
448        *imm8 = v32 & 0xff;
449        return true;
450    } else if (v32 == (v32 & 0xff00)) {
451        *cmode = 0x2;
452        *imm8 = (v32 >> 8) & 0xff;
453        return true;
454    } else if (v32 == (v32 & 0xff0000)) {
455        *cmode = 0x4;
456        *imm8 = (v32 >> 16) & 0xff;
457        return true;
458    } else if (v32 == (v32 & 0xff000000)) {
459        *cmode = 0x6;
460        *imm8 = v32 >> 24;
461        return true;
462    }
463    return false;
464}
465
466/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
467static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
468{
469    if ((v32 & 0xffff00ff) == 0xff) {
470        *cmode = 0xc;
471        *imm8 = (v32 >> 8) & 0xff;
472        return true;
473    } else if ((v32 & 0xff00ffff) == 0xffff) {
474        *cmode = 0xd;
475        *imm8 = (v32 >> 16) & 0xff;
476        return true;
477    }
478    return false;
479}
480
481/*
482 * Return non-zero if v32 can be formed by MOVI+ORR.
483 * Place the parameters for MOVI in (cmode, imm8).
484 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
485 */
486static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
487{
488    int i;
489
490    for (i = 6; i > 0; i -= 2) {
491        /* Mask out one byte we can add with ORR.  */
492        uint32_t tmp = v32 & ~(0xffu << (i * 4));
493        if (is_shimm32(tmp, cmode, imm8) ||
494            is_soimm32(tmp, cmode, imm8)) {
495            break;
496        }
497    }
498    return i;
499}
500
501/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
502static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
503{
504    if (v32 == deposit32(v32, 16, 16, v32)) {
505        return is_shimm16(v32, cmode, imm8);
506    } else {
507        return is_shimm32(v32, cmode, imm8);
508    }
509}
510
511/* Test if a constant matches the constraint.
512 * TODO: define constraints for:
513 *
514 * ldr/str offset:   between -0xfff and 0xfff
515 * ldrh/strh offset: between -0xff and 0xff
516 * mov operand2:     values represented with x << (2 * y), x < 0x100
517 * add, sub, eor...: ditto
518 */
519static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
520{
521    if (ct & TCG_CT_CONST) {
522        return 1;
523    } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
524        return 1;
525    } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
526        return 1;
527    } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
528        return 1;
529    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
530        return 1;
531    }
532
533    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
534    case 0:
535        break;
536    case TCG_CT_CONST_ANDI:
537        val = ~val;
538        /* fallthru */
539    case TCG_CT_CONST_ORRI:
540        if (val == deposit64(val, 32, 32, val)) {
541            int cmode, imm8;
542            return is_shimm1632(val, &cmode, &imm8);
543        }
544        break;
545    default:
546        /* Both bits should not be set for the same insn.  */
547        g_assert_not_reached();
548    }
549
550    return 0;
551}
552
553static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
554{
555    tcg_out32(s, (cond << 28) | INSN_B |
556                    (((offset - 8) >> 2) & 0x00ffffff));
557}
558
559static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
560{
561    tcg_out32(s, (cond << 28) | 0x0b000000 |
562                    (((offset - 8) >> 2) & 0x00ffffff));
563}
564
565static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
566{
567    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
568}
569
570static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
571{
572    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
573                (((offset - 8) >> 2) & 0x00ffffff));
574}
575
576static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
577                            TCGReg rd, TCGReg rn, TCGReg rm, int shift)
578{
579    tcg_out32(s, (cond << 28) | (0 << 25) | opc |
580                    (rn << 16) | (rd << 12) | shift | rm);
581}
582
583static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
584{
585    /* Simple reg-reg move, optimising out the 'do nothing' case */
586    if (rd != rm) {
587        tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
588    }
589}
590
591static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
592{
593    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
594}
595
596static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
597{
598    /*
599     * Unless the C portion of QEMU is compiled as thumb, we don't need
600     * true BX semantics; merely a branch to an address held in a register.
601     */
602    tcg_out_bx_reg(s, cond, rn);
603}
604
605static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
606                            TCGReg rd, TCGReg rn, int im)
607{
608    tcg_out32(s, (cond << 28) | (1 << 25) | opc |
609                    (rn << 16) | (rd << 12) | im);
610}
611
612static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
613                          TCGReg rn, uint16_t mask)
614{
615    tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
616}
617
618/* Note that this routine is used for both LDR and LDRH formats, so we do
619   not wish to include an immediate shift at this point.  */
620static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
621                            TCGReg rn, TCGReg rm, bool u, bool p, bool w)
622{
623    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
624              | (w << 21) | (rn << 16) | (rt << 12) | rm);
625}
626
627static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
628                            TCGReg rn, int imm8, bool p, bool w)
629{
630    bool u = 1;
631    if (imm8 < 0) {
632        imm8 = -imm8;
633        u = 0;
634    }
635    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
636              (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
637}
638
639static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
640                             TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
641{
642    bool u = 1;
643    if (imm12 < 0) {
644        imm12 = -imm12;
645        u = 0;
646    }
647    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
648              (rn << 16) | (rt << 12) | imm12);
649}
650
651static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
652                            TCGReg rn, int imm12)
653{
654    tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
655}
656
657static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
658                            TCGReg rn, int imm12)
659{
660    tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
661}
662
663static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
664                           TCGReg rn, TCGReg rm)
665{
666    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
667}
668
669static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
670                           TCGReg rn, TCGReg rm)
671{
672    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
673}
674
675static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
676                           TCGReg rn, int imm8)
677{
678    tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
679}
680
681static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
682                           TCGReg rn, TCGReg rm)
683{
684    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
685}
686
687static void __attribute__((unused))
688tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
689{
690    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
691}
692
693static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
694                           TCGReg rn, int imm8)
695{
696    tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
697}
698
699static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
700                           TCGReg rn, TCGReg rm)
701{
702    tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
703}
704
705/* Register pre-increment with base writeback.  */
706static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
707                             TCGReg rn, TCGReg rm)
708{
709    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
710}
711
712static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
713                             TCGReg rn, TCGReg rm)
714{
715    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
716}
717
718static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
719                            TCGReg rn, int imm8)
720{
721    tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
722}
723
724static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
725                           TCGReg rn, int imm8)
726{
727    tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
728}
729
730static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
731                            TCGReg rn, TCGReg rm)
732{
733    tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
734}
735
736static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
737                           TCGReg rn, TCGReg rm)
738{
739    tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
740}
741
742static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
743                            TCGReg rn, int imm8)
744{
745    tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
746}
747
748static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
749                            TCGReg rn, TCGReg rm)
750{
751    tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
752}
753
754static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
755                           TCGReg rn, int imm12)
756{
757    tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
758}
759
760static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
761                           TCGReg rn, int imm12)
762{
763    tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
764}
765
766static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
767                          TCGReg rn, TCGReg rm)
768{
769    tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
770}
771
772static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
773                          TCGReg rn, TCGReg rm)
774{
775    tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
776}
777
778static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
779                           TCGReg rn, int imm8)
780{
781    tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
782}
783
784static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
785                           TCGReg rn, TCGReg rm)
786{
787    tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
788}
789
790static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
791                              TCGReg rd, uint32_t arg)
792{
793    new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
794    tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
795}
796
797static void tcg_out_movi32(TCGContext *s, ARMCond cond,
798                           TCGReg rd, uint32_t arg)
799{
800    int imm12, diff, opc, sh1, sh2;
801    uint32_t tt0, tt1, tt2;
802
803    /* Check a single MOV/MVN before anything else.  */
804    imm12 = encode_imm(arg);
805    if (imm12 >= 0) {
806        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
807        return;
808    }
809    imm12 = encode_imm(~arg);
810    if (imm12 >= 0) {
811        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
812        return;
813    }
814
815    /* Check for a pc-relative address.  This will usually be the TB,
816       or within the TB, which is immediately before the code block.  */
817    diff = tcg_pcrel_diff(s, (void *)arg) - 8;
818    if (diff >= 0) {
819        imm12 = encode_imm(diff);
820        if (imm12 >= 0) {
821            tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
822            return;
823        }
824    } else {
825        imm12 = encode_imm(-diff);
826        if (imm12 >= 0) {
827            tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
828            return;
829        }
830    }
831
832    /* Use movw + movt.  */
833    if (use_armv7_instructions) {
834        /* movw */
835        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
836                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
837        if (arg & 0xffff0000) {
838            /* movt */
839            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
840                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
841        }
842        return;
843    }
844
845    /* Look for sequences of two insns.  If we have lots of 1's, we can
846       shorten the sequence by beginning with mvn and then clearing
847       higher bits with eor.  */
848    tt0 = arg;
849    opc = ARITH_MOV;
850    if (ctpop32(arg) > 16) {
851        tt0 = ~arg;
852        opc = ARITH_MVN;
853    }
854    sh1 = ctz32(tt0) & ~1;
855    tt1 = tt0 & ~(0xff << sh1);
856    sh2 = ctz32(tt1) & ~1;
857    tt2 = tt1 & ~(0xff << sh2);
858    if (tt2 == 0) {
859        int rot;
860
861        rot = ((32 - sh1) << 7) & 0xf00;
862        tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
863        rot = ((32 - sh2) << 7) & 0xf00;
864        tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
865                        ((tt0 >> sh2) & 0xff) | rot);
866        return;
867    }
868
869    /* Otherwise, drop it into the constant pool.  */
870    tcg_out_movi_pool(s, cond, rd, arg);
871}
872
873/*
874 * Emit either the reg,imm or reg,reg form of a data-processing insn.
875 * rhs must satisfy the "rI" constraint.
876 */
877static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
878                           TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
879{
880    if (rhs_is_const) {
881        tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
882    } else {
883        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
884    }
885}
886
887/*
888 * Emit either the reg,imm or reg,reg form of a data-processing insn.
889 * rhs must satisfy the "rIK" constraint.
890 */
891static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
892                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
893                            bool rhs_is_const)
894{
895    if (rhs_is_const) {
896        int imm12 = encode_imm(rhs);
897        if (imm12 < 0) {
898            imm12 = encode_imm_nofail(~rhs);
899            opc = opinv;
900        }
901        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
902    } else {
903        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
904    }
905}
906
907static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
908                            ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
909                            bool rhs_is_const)
910{
911    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
912     * rhs must satisfy the "rIN" constraint.
913     */
914    if (rhs_is_const) {
915        int imm12 = encode_imm(rhs);
916        if (imm12 < 0) {
917            imm12 = encode_imm_nofail(-rhs);
918            opc = opneg;
919        }
920        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
921    } else {
922        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
923    }
924}
925
926static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
927                          TCGReg rn, TCGReg rm)
928{
929    /* mul */
930    tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
931}
932
933static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
934                            TCGReg rd1, TCGReg rn, TCGReg rm)
935{
936    /* umull */
937    tcg_out32(s, (cond << 28) | 0x00800090 |
938              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
939}
940
941static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
942                            TCGReg rd1, TCGReg rn, TCGReg rm)
943{
944    /* smull */
945    tcg_out32(s, (cond << 28) | 0x00c00090 |
946              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
947}
948
949static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
950                         TCGReg rd, TCGReg rn, TCGReg rm)
951{
952    tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
953}
954
955static void tcg_out_udiv(TCGContext *s, ARMCond cond,
956                         TCGReg rd, TCGReg rn, TCGReg rm)
957{
958    tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
959}
960
961static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
962{
963    /* sxtb */
964    tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
965}
966
967static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
968{
969    tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
970}
971
972static void __attribute__((unused))
973tcg_out_ext8u_cond(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
974{
975    tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
976}
977
978static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
979{
980    /* sxth */
981    tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
982}
983
984static void tcg_out_ext16u_cond(TCGContext *s, ARMCond cond,
985                                TCGReg rd, TCGReg rn)
986{
987    /* uxth */
988    tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
989}
990
991static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
992{
993    tcg_out_ext16u_cond(s, COND_AL, rd, rn);
994}
995
996static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
997{
998    g_assert_not_reached();
999}
1000
1001static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
1002{
1003    g_assert_not_reached();
1004}
1005
1006static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
1007{
1008    g_assert_not_reached();
1009}
1010
1011static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
1012{
1013    g_assert_not_reached();
1014}
1015
1016static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
1017{
1018    g_assert_not_reached();
1019}
1020
1021static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
1022                            TCGReg rd, TCGReg rn, int flags)
1023{
1024    if (flags & TCG_BSWAP_OS) {
1025        /* revsh */
1026        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1027        return;
1028    }
1029
1030    /* rev16 */
1031    tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1032    if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1033        /* uxth */
1034        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
1035    }
1036}
1037
1038static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1039{
1040    /* rev */
1041    tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1042}
1043
1044static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1045                            TCGArg a1, int ofs, int len, bool const_a1)
1046{
1047    if (const_a1) {
1048        /* bfi becomes bfc with rn == 15.  */
1049        a1 = 15;
1050    }
1051    /* bfi/bfc */
1052    tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1053              | (ofs << 7) | ((ofs + len - 1) << 16));
1054}
1055
1056static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1057                            TCGReg rn, int ofs, int len)
1058{
1059    /* ubfx */
1060    tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1061              | (ofs << 7) | ((len - 1) << 16));
1062}
1063
1064static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1065                             TCGReg rn, int ofs, int len)
1066{
1067    /* sbfx */
1068    tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1069              | (ofs << 7) | ((len - 1) << 16));
1070}
1071
1072static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1073                          TCGReg rd, TCGReg rn, int32_t offset)
1074{
1075    if (offset > 0xfff || offset < -0xfff) {
1076        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1077        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1078    } else
1079        tcg_out_ld32_12(s, cond, rd, rn, offset);
1080}
1081
1082static void tcg_out_st32(TCGContext *s, ARMCond cond,
1083                         TCGReg rd, TCGReg rn, int32_t offset)
1084{
1085    if (offset > 0xfff || offset < -0xfff) {
1086        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1087        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1088    } else
1089        tcg_out_st32_12(s, cond, rd, rn, offset);
1090}
1091
1092static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1093                          TCGReg rd, TCGReg rn, int32_t offset)
1094{
1095    if (offset > 0xff || offset < -0xff) {
1096        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1097        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1098    } else
1099        tcg_out_ld16u_8(s, cond, rd, rn, offset);
1100}
1101
1102static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1103                          TCGReg rd, TCGReg rn, int32_t offset)
1104{
1105    if (offset > 0xff || offset < -0xff) {
1106        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1107        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1108    } else
1109        tcg_out_ld16s_8(s, cond, rd, rn, offset);
1110}
1111
1112static void tcg_out_st16(TCGContext *s, ARMCond cond,
1113                         TCGReg rd, TCGReg rn, int32_t offset)
1114{
1115    if (offset > 0xff || offset < -0xff) {
1116        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1117        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1118    } else
1119        tcg_out_st16_8(s, cond, rd, rn, offset);
1120}
1121
1122static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1123                         TCGReg rd, TCGReg rn, int32_t offset)
1124{
1125    if (offset > 0xfff || offset < -0xfff) {
1126        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1127        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1128    } else
1129        tcg_out_ld8_12(s, cond, rd, rn, offset);
1130}
1131
1132static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1133                         TCGReg rd, TCGReg rn, int32_t offset)
1134{
1135    if (offset > 0xff || offset < -0xff) {
1136        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1137        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1138    } else
1139        tcg_out_ld8s_8(s, cond, rd, rn, offset);
1140}
1141
1142static void tcg_out_st8(TCGContext *s, ARMCond cond,
1143                        TCGReg rd, TCGReg rn, int32_t offset)
1144{
1145    if (offset > 0xfff || offset < -0xfff) {
1146        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1147        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1148    } else
1149        tcg_out_st8_12(s, cond, rd, rn, offset);
1150}
1151
1152/*
1153 * The _goto case is normally between TBs within the same code buffer, and
1154 * with the code buffer limited to 16MB we wouldn't need the long case.
1155 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1156 */
1157static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1158{
1159    intptr_t addri = (intptr_t)addr;
1160    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1161    bool arm_mode = !(addri & 1);
1162
1163    if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1164        tcg_out_b_imm(s, cond, disp);
1165        return;
1166    }
1167
1168    /* LDR is interworking from v5t. */
1169    tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1170}
1171
1172/*
1173 * The call case is mostly used for helpers - so it's not unreasonable
1174 * for them to be beyond branch range.
1175 */
1176static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
1177{
1178    intptr_t addri = (intptr_t)addr;
1179    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1180    bool arm_mode = !(addri & 1);
1181
1182    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1183        if (arm_mode) {
1184            tcg_out_bl_imm(s, COND_AL, disp);
1185        } else {
1186            tcg_out_blx_imm(s, disp);
1187        }
1188        return;
1189    }
1190
1191    tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1192    tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1193}
1194
1195static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
1196                         const TCGHelperInfo *info)
1197{
1198    tcg_out_call_int(s, addr);
1199}
1200
1201static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1202{
1203    if (l->has_value) {
1204        tcg_out_goto(s, cond, l->u.value_ptr);
1205    } else {
1206        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1207        tcg_out_b_imm(s, cond, 0);
1208    }
1209}
1210
1211static void tcg_out_mb(TCGContext *s, TCGArg a0)
1212{
1213    if (use_armv7_instructions) {
1214        tcg_out32(s, INSN_DMB_ISH);
1215    } else {
1216        tcg_out32(s, INSN_DMB_MCR);
1217    }
1218}
1219
1220static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1221                            const int *const_args)
1222{
1223    TCGReg al = args[0];
1224    TCGReg ah = args[1];
1225    TCGArg bl = args[2];
1226    TCGArg bh = args[3];
1227    TCGCond cond = args[4];
1228    int const_bl = const_args[2];
1229    int const_bh = const_args[3];
1230
1231    switch (cond) {
1232    case TCG_COND_EQ:
1233    case TCG_COND_NE:
1234    case TCG_COND_LTU:
1235    case TCG_COND_LEU:
1236    case TCG_COND_GTU:
1237    case TCG_COND_GEU:
1238        /* We perform a conditional comparision.  If the high half is
1239           equal, then overwrite the flags with the comparison of the
1240           low half.  The resulting flags cover the whole.  */
1241        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1242        tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1243        return cond;
1244
1245    case TCG_COND_LT:
1246    case TCG_COND_GE:
1247        /* We perform a double-word subtraction and examine the result.
1248           We do not actually need the result of the subtract, so the
1249           low part "subtract" is a compare.  For the high half we have
1250           no choice but to compute into a temporary.  */
1251        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1252        tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1253                       TCG_REG_TMP, ah, bh, const_bh);
1254        return cond;
1255
1256    case TCG_COND_LE:
1257    case TCG_COND_GT:
1258        /* Similar, but with swapped arguments, via reversed subtract.  */
1259        tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1260                       TCG_REG_TMP, al, bl, const_bl);
1261        tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1262                       TCG_REG_TMP, ah, bh, const_bh);
1263        return tcg_swap_cond(cond);
1264
1265    default:
1266        g_assert_not_reached();
1267    }
1268}
1269
1270/*
1271 * Note that TCGReg references Q-registers.
1272 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
1273 */
1274static uint32_t encode_vd(TCGReg rd)
1275{
1276    tcg_debug_assert(rd >= TCG_REG_Q0);
1277    return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1278}
1279
1280static uint32_t encode_vn(TCGReg rn)
1281{
1282    tcg_debug_assert(rn >= TCG_REG_Q0);
1283    return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1284}
1285
1286static uint32_t encode_vm(TCGReg rm)
1287{
1288    tcg_debug_assert(rm >= TCG_REG_Q0);
1289    return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1290}
1291
1292static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1293                          TCGReg d, TCGReg m)
1294{
1295    tcg_out32(s, insn | (vece << 18) | (q << 6) |
1296              encode_vd(d) | encode_vm(m));
1297}
1298
1299static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1300                          TCGReg d, TCGReg n, TCGReg m)
1301{
1302    tcg_out32(s, insn | (vece << 20) | (q << 6) |
1303              encode_vd(d) | encode_vn(n) | encode_vm(m));
1304}
1305
1306static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1307                          int q, int op, int cmode, uint8_t imm8)
1308{
1309    tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1310              | (cmode << 8) | extract32(imm8, 0, 4)
1311              | (extract32(imm8, 4, 3) << 16)
1312              | (extract32(imm8, 7, 1) << 24));
1313}
1314
1315static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1316                            TCGReg rd, TCGReg rm, int l_imm6)
1317{
1318    tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1319              (extract32(l_imm6, 6, 1) << 7) |
1320              (extract32(l_imm6, 0, 6) << 16));
1321}
1322
1323static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1324                          TCGReg rd, TCGReg rn, int offset)
1325{
1326    if (offset != 0) {
1327        if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1328            tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1329                            TCG_REG_TMP, rn, offset, true);
1330        } else {
1331            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1332            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1333                            TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1334        }
1335        rn = TCG_REG_TMP;
1336    }
1337    tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1338}
1339
1340#ifdef CONFIG_SOFTMMU
1341/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1342 *                                     int mmu_idx, uintptr_t ra)
1343 */
1344static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
1345    [MO_UB]   = helper_ret_ldub_mmu,
1346    [MO_SB]   = helper_ret_ldsb_mmu,
1347#if HOST_BIG_ENDIAN
1348    [MO_UW] = helper_be_lduw_mmu,
1349    [MO_UL] = helper_be_ldul_mmu,
1350    [MO_UQ] = helper_be_ldq_mmu,
1351    [MO_SW] = helper_be_ldsw_mmu,
1352    [MO_SL] = helper_be_ldul_mmu,
1353#else
1354    [MO_UW] = helper_le_lduw_mmu,
1355    [MO_UL] = helper_le_ldul_mmu,
1356    [MO_UQ] = helper_le_ldq_mmu,
1357    [MO_SW] = helper_le_ldsw_mmu,
1358    [MO_SL] = helper_le_ldul_mmu,
1359#endif
1360};
1361
1362/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1363 *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
1364 */
1365static void * const qemu_st_helpers[MO_SIZE + 1] = {
1366    [MO_8]   = helper_ret_stb_mmu,
1367#if HOST_BIG_ENDIAN
1368    [MO_16] = helper_be_stw_mmu,
1369    [MO_32] = helper_be_stl_mmu,
1370    [MO_64] = helper_be_stq_mmu,
1371#else
1372    [MO_16] = helper_le_stw_mmu,
1373    [MO_32] = helper_le_stl_mmu,
1374    [MO_64] = helper_le_stq_mmu,
1375#endif
1376};
1377
1378/* Helper routines for marshalling helper function arguments into
1379 * the correct registers and stack.
1380 * argreg is where we want to put this argument, arg is the argument itself.
1381 * Return value is the updated argreg ready for the next call.
1382 * Note that argreg 0..3 is real registers, 4+ on stack.
1383 *
1384 * We provide routines for arguments which are: immediate, 32 bit
1385 * value in register, 16 and 8 bit values in register (which must be zero
1386 * extended before use) and 64 bit value in a lo:hi register pair.
1387 */
1388#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG)                \
1389static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg)              \
1390{                                                                          \
1391    if (argreg < 4) {                                                      \
1392        MOV_ARG(s, COND_AL, argreg, arg);                                  \
1393    } else {                                                               \
1394        int ofs = (argreg - 4) * 4;                                        \
1395        EXT_ARG;                                                           \
1396        tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE);            \
1397        tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs);         \
1398    }                                                                      \
1399    return argreg + 1;                                                     \
1400}
1401
1402DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1403    (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1404DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u_cond,
1405    (tcg_out_ext8u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1406DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u_cond,
1407    (tcg_out_ext16u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1408DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1409
1410static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1411                                TCGReg arglo, TCGReg arghi)
1412{
1413    /* 64 bit arguments must go in even/odd register pairs
1414     * and in 8-aligned stack slots.
1415     */
1416    if (argreg & 1) {
1417        argreg++;
1418    }
1419    if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) {
1420        tcg_out_strd_8(s, COND_AL, arglo,
1421                       TCG_REG_CALL_STACK, (argreg - 4) * 4);
1422        return argreg + 2;
1423    } else {
1424        argreg = tcg_out_arg_reg32(s, argreg, arglo);
1425        argreg = tcg_out_arg_reg32(s, argreg, arghi);
1426        return argreg;
1427    }
1428}
1429
1430#define TLB_SHIFT	(CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1431
1432/* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
1433QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1434QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1435
1436/* These offsets are built into the LDRD below.  */
1437QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1438QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1439
1440/* Load and compare a TLB entry, leaving the flags set.  Returns the register
1441   containing the addend of the tlb entry.  Clobbers R0, R1, R2, TMP.  */
1442
1443static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1444                               MemOp opc, int mem_index, bool is_load)
1445{
1446    int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1447                   : offsetof(CPUTLBEntry, addr_write));
1448    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1449    unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
1450    unsigned a_mask = (1 << get_alignment_bits(opc)) - 1;
1451    TCGReg t_addr;
1452
1453    /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}.  */
1454    tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1455
1456    /* Extract the tlb index from the address into R0.  */
1457    tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1458                    SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1459
1460    /*
1461     * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1462     * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1463     */
1464    if (cmp_off == 0) {
1465        if (TARGET_LONG_BITS == 64) {
1466            tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1467        } else {
1468            tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1469        }
1470    } else {
1471        tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1472                        TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1473        if (TARGET_LONG_BITS == 64) {
1474            tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1475        } else {
1476            tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1477        }
1478    }
1479
1480    /* Load the tlb addend.  */
1481    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1482                    offsetof(CPUTLBEntry, addend));
1483
1484    /*
1485     * Check alignment, check comparators.
1486     * Do this in 2-4 insns.  Use MOVW for v7, if possible,
1487     * to reduce the number of sequential conditional instructions.
1488     * Almost all guests have at least 4k pages, which means that we need
1489     * to clear at least 9 bits even for an 8-byte memory, which means it
1490     * isn't worth checking for an immediate operand for BIC.
1491     *
1492     * For unaligned accesses, test the page of the last unit of alignment.
1493     * This leaves the least significant alignment bits unchanged, and of
1494     * course must be zero.
1495     */
1496    t_addr = addrlo;
1497    if (a_mask < s_mask) {
1498        t_addr = TCG_REG_R0;
1499        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
1500                        addrlo, s_mask - a_mask);
1501    }
1502    if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
1503        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
1504        tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1505                        t_addr, TCG_REG_TMP, 0);
1506        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1507    } else {
1508        if (a_mask) {
1509            tcg_debug_assert(a_mask <= 0xff);
1510            tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1511        }
1512        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
1513                        SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1514        tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
1515                        0, TCG_REG_R2, TCG_REG_TMP,
1516                        SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1517    }
1518
1519    if (TARGET_LONG_BITS == 64) {
1520        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1521    }
1522
1523    return TCG_REG_R1;
1524}
1525
1526/* Record the context of a call to the out of line helper code for the slow
1527   path for a load or store, so that we can later generate the correct
1528   helper code.  */
1529static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1530                                TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1531                                TCGReg addrhi, tcg_insn_unit *raddr,
1532                                tcg_insn_unit *label_ptr)
1533{
1534    TCGLabelQemuLdst *label = new_ldst_label(s);
1535
1536    label->is_ld = is_ld;
1537    label->oi = oi;
1538    label->datalo_reg = datalo;
1539    label->datahi_reg = datahi;
1540    label->addrlo_reg = addrlo;
1541    label->addrhi_reg = addrhi;
1542    label->raddr = tcg_splitwx_to_rx(raddr);
1543    label->label_ptr[0] = label_ptr;
1544}
1545
1546static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1547{
1548    TCGReg argreg, datalo, datahi;
1549    MemOpIdx oi = lb->oi;
1550    MemOp opc = get_memop(oi);
1551
1552    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1553        return false;
1554    }
1555
1556    argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1557    if (TARGET_LONG_BITS == 64) {
1558        argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1559    } else {
1560        argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1561    }
1562    argreg = tcg_out_arg_imm32(s, argreg, oi);
1563    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1564
1565    /* Use the canonical unsigned helpers and minimize icache usage. */
1566    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1567
1568    datalo = lb->datalo_reg;
1569    datahi = lb->datahi_reg;
1570    if ((opc & MO_SIZE) == MO_64) {
1571        if (datalo != TCG_REG_R1) {
1572            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1573            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1574        } else if (datahi != TCG_REG_R0) {
1575            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1576            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1577        } else {
1578            tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1579            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1580            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1581        }
1582    } else {
1583        tcg_out_movext(s, TCG_TYPE_I32, datalo,
1584                       TCG_TYPE_I32, opc & MO_SSIZE, TCG_REG_R0);
1585    }
1586
1587    tcg_out_goto(s, COND_AL, lb->raddr);
1588    return true;
1589}
1590
1591static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1592{
1593    TCGReg argreg, datalo, datahi;
1594    MemOpIdx oi = lb->oi;
1595    MemOp opc = get_memop(oi);
1596
1597    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1598        return false;
1599    }
1600
1601    argreg = TCG_REG_R0;
1602    argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1603    if (TARGET_LONG_BITS == 64) {
1604        argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1605    } else {
1606        argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1607    }
1608
1609    datalo = lb->datalo_reg;
1610    datahi = lb->datahi_reg;
1611    switch (opc & MO_SIZE) {
1612    case MO_8:
1613        argreg = tcg_out_arg_reg8(s, argreg, datalo);
1614        break;
1615    case MO_16:
1616        argreg = tcg_out_arg_reg16(s, argreg, datalo);
1617        break;
1618    case MO_32:
1619    default:
1620        argreg = tcg_out_arg_reg32(s, argreg, datalo);
1621        break;
1622    case MO_64:
1623        argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1624        break;
1625    }
1626
1627    argreg = tcg_out_arg_imm32(s, argreg, oi);
1628    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1629
1630    /* Tail-call to the helper, which will return to the fast path.  */
1631    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1632    return true;
1633}
1634#else
1635
1636static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
1637                                   TCGReg addrhi, unsigned a_bits)
1638{
1639    unsigned a_mask = (1 << a_bits) - 1;
1640    TCGLabelQemuLdst *label = new_ldst_label(s);
1641
1642    label->is_ld = is_ld;
1643    label->addrlo_reg = addrlo;
1644    label->addrhi_reg = addrhi;
1645
1646    /* We are expecting a_bits to max out at 7, and can easily support 8. */
1647    tcg_debug_assert(a_mask <= 0xff);
1648    /* tst addr, #mask */
1649    tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1650
1651    /* blne slow_path */
1652    label->label_ptr[0] = s->code_ptr;
1653    tcg_out_bl_imm(s, COND_NE, 0);
1654
1655    label->raddr = tcg_splitwx_to_rx(s->code_ptr);
1656}
1657
1658static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
1659{
1660    if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1661        return false;
1662    }
1663
1664    if (TARGET_LONG_BITS == 64) {
1665        /* 64-bit target address is aligned into R2:R3. */
1666        if (l->addrhi_reg != TCG_REG_R2) {
1667            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
1668            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
1669        } else if (l->addrlo_reg != TCG_REG_R3) {
1670            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
1671            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
1672        } else {
1673            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2);
1674            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3);
1675            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1);
1676        }
1677    } else {
1678        tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg);
1679    }
1680    tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0);
1681
1682    /*
1683     * Tail call to the helper, with the return address back inline,
1684     * just for the clarity of the debugging traceback -- the helper
1685     * cannot return.  We have used BLNE to arrive here, so LR is
1686     * already set.
1687     */
1688    tcg_out_goto(s, COND_AL, (const void *)
1689                 (l->is_ld ? helper_unaligned_ld : helper_unaligned_st));
1690    return true;
1691}
1692
1693static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1694{
1695    return tcg_out_fail_alignment(s, l);
1696}
1697
1698static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1699{
1700    return tcg_out_fail_alignment(s, l);
1701}
1702#endif /* SOFTMMU */
1703
1704static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
1705                                  TCGReg datalo, TCGReg datahi,
1706                                  TCGReg addrlo, TCGReg addend,
1707                                  bool scratch_addend)
1708{
1709    /* Byte swapping is left to middle-end expansion. */
1710    tcg_debug_assert((opc & MO_BSWAP) == 0);
1711
1712    switch (opc & MO_SSIZE) {
1713    case MO_UB:
1714        tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1715        break;
1716    case MO_SB:
1717        tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1718        break;
1719    case MO_UW:
1720        tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1721        break;
1722    case MO_SW:
1723        tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1724        break;
1725    case MO_UL:
1726        tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1727        break;
1728    case MO_UQ:
1729        /* We used pair allocation for datalo, so already should be aligned. */
1730        tcg_debug_assert((datalo & 1) == 0);
1731        tcg_debug_assert(datahi == datalo + 1);
1732        /* LDRD requires alignment; double-check that. */
1733        if (get_alignment_bits(opc) >= MO_64) {
1734            /*
1735             * Rm (the second address op) must not overlap Rt or Rt + 1.
1736             * Since datalo is aligned, we can simplify the test via alignment.
1737             * Flip the two address arguments if that works.
1738             */
1739            if ((addend & ~1) != datalo) {
1740                tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
1741                break;
1742            }
1743            if ((addrlo & ~1) != datalo) {
1744                tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo);
1745                break;
1746            }
1747        }
1748        if (scratch_addend) {
1749            tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
1750            tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
1751        } else {
1752            tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1753                            addend, addrlo, SHIFT_IMM_LSL(0));
1754            tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
1755            tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
1756        }
1757        break;
1758    default:
1759        g_assert_not_reached();
1760    }
1761}
1762
1763#ifndef CONFIG_SOFTMMU
1764static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1765                                   TCGReg datahi, TCGReg addrlo)
1766{
1767    /* Byte swapping is left to middle-end expansion. */
1768    tcg_debug_assert((opc & MO_BSWAP) == 0);
1769
1770    switch (opc & MO_SSIZE) {
1771    case MO_UB:
1772        tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1773        break;
1774    case MO_SB:
1775        tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1776        break;
1777    case MO_UW:
1778        tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1779        break;
1780    case MO_SW:
1781        tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1782        break;
1783    case MO_UL:
1784        tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1785        break;
1786    case MO_UQ:
1787        /* We used pair allocation for datalo, so already should be aligned. */
1788        tcg_debug_assert((datalo & 1) == 0);
1789        tcg_debug_assert(datahi == datalo + 1);
1790        /* LDRD requires alignment; double-check that. */
1791        if (get_alignment_bits(opc) >= MO_64) {
1792            tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
1793        } else if (datalo == addrlo) {
1794            tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1795            tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1796        } else {
1797            tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1798            tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1799        }
1800        break;
1801    default:
1802        g_assert_not_reached();
1803    }
1804}
1805#endif
1806
1807static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1808{
1809    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1810    MemOpIdx oi;
1811    MemOp opc;
1812#ifdef CONFIG_SOFTMMU
1813    int mem_index;
1814    TCGReg addend;
1815    tcg_insn_unit *label_ptr;
1816#else
1817    unsigned a_bits;
1818#endif
1819
1820    datalo = *args++;
1821    datahi = (is64 ? *args++ : 0);
1822    addrlo = *args++;
1823    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1824    oi = *args++;
1825    opc = get_memop(oi);
1826
1827#ifdef CONFIG_SOFTMMU
1828    mem_index = get_mmuidx(oi);
1829    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1830
1831    /* This a conditional BL only to load a pointer within this opcode into LR
1832       for the slow path.  We will not be using the value for a tail call.  */
1833    label_ptr = s->code_ptr;
1834    tcg_out_bl_imm(s, COND_NE, 0);
1835
1836    tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
1837
1838    add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1839                        s->code_ptr, label_ptr);
1840#else /* !CONFIG_SOFTMMU */
1841    a_bits = get_alignment_bits(opc);
1842    if (a_bits) {
1843        tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
1844    }
1845    if (guest_base) {
1846        tcg_out_qemu_ld_index(s, opc, datalo, datahi,
1847                              addrlo, TCG_REG_GUEST_BASE, false);
1848    } else {
1849        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1850    }
1851#endif
1852}
1853
1854static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
1855                                  TCGReg datalo, TCGReg datahi,
1856                                  TCGReg addrlo, TCGReg addend,
1857                                  bool scratch_addend)
1858{
1859    /* Byte swapping is left to middle-end expansion. */
1860    tcg_debug_assert((opc & MO_BSWAP) == 0);
1861
1862    switch (opc & MO_SIZE) {
1863    case MO_8:
1864        tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1865        break;
1866    case MO_16:
1867        tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1868        break;
1869    case MO_32:
1870        tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1871        break;
1872    case MO_64:
1873        /* We used pair allocation for datalo, so already should be aligned. */
1874        tcg_debug_assert((datalo & 1) == 0);
1875        tcg_debug_assert(datahi == datalo + 1);
1876        /* STRD requires alignment; double-check that. */
1877        if (get_alignment_bits(opc) >= MO_64) {
1878            tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1879        } else if (scratch_addend) {
1880            tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1881            tcg_out_st32_12(s, cond, datahi, addend, 4);
1882        } else {
1883            tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP,
1884                            addend, addrlo, SHIFT_IMM_LSL(0));
1885            tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0);
1886            tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4);
1887        }
1888        break;
1889    default:
1890        g_assert_not_reached();
1891    }
1892}
1893
1894#ifndef CONFIG_SOFTMMU
1895static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1896                                   TCGReg datahi, TCGReg addrlo)
1897{
1898    /* Byte swapping is left to middle-end expansion. */
1899    tcg_debug_assert((opc & MO_BSWAP) == 0);
1900
1901    switch (opc & MO_SIZE) {
1902    case MO_8:
1903        tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1904        break;
1905    case MO_16:
1906        tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1907        break;
1908    case MO_32:
1909        tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1910        break;
1911    case MO_64:
1912        /* We used pair allocation for datalo, so already should be aligned. */
1913        tcg_debug_assert((datalo & 1) == 0);
1914        tcg_debug_assert(datahi == datalo + 1);
1915        /* STRD requires alignment; double-check that. */
1916        if (get_alignment_bits(opc) >= MO_64) {
1917            tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1918        } else {
1919            tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1920            tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1921        }
1922        break;
1923    default:
1924        g_assert_not_reached();
1925    }
1926}
1927#endif
1928
1929static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1930{
1931    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1932    MemOpIdx oi;
1933    MemOp opc;
1934#ifdef CONFIG_SOFTMMU
1935    int mem_index;
1936    TCGReg addend;
1937    tcg_insn_unit *label_ptr;
1938#else
1939    unsigned a_bits;
1940#endif
1941
1942    datalo = *args++;
1943    datahi = (is64 ? *args++ : 0);
1944    addrlo = *args++;
1945    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1946    oi = *args++;
1947    opc = get_memop(oi);
1948
1949#ifdef CONFIG_SOFTMMU
1950    mem_index = get_mmuidx(oi);
1951    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1952
1953    tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
1954                          addrlo, addend, true);
1955
1956    /* The conditional call must come last, as we're going to return here.  */
1957    label_ptr = s->code_ptr;
1958    tcg_out_bl_imm(s, COND_NE, 0);
1959
1960    add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1961                        s->code_ptr, label_ptr);
1962#else /* !CONFIG_SOFTMMU */
1963    a_bits = get_alignment_bits(opc);
1964    if (a_bits) {
1965        tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
1966    }
1967    if (guest_base) {
1968        tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
1969                              addrlo, TCG_REG_GUEST_BASE, false);
1970    } else {
1971        tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1972    }
1973#endif
1974}
1975
1976static void tcg_out_epilogue(TCGContext *s);
1977
1978static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
1979{
1980    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
1981    tcg_out_epilogue(s);
1982}
1983
1984static void tcg_out_goto_tb(TCGContext *s, int which)
1985{
1986    uintptr_t i_addr;
1987    intptr_t i_disp;
1988
1989    /* Direct branch will be patched by tb_target_set_jmp_target. */
1990    set_jmp_insn_offset(s, which);
1991    tcg_out32(s, INSN_NOP);
1992
1993    /* When branch is out of range, fall through to indirect. */
1994    i_addr = get_jmp_target_addr(s, which);
1995    i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
1996    tcg_debug_assert(i_disp < 0);
1997    if (i_disp >= -0xfff) {
1998        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
1999    } else {
2000        /*
2001         * The TB is close, but outside the 12 bits addressable by
2002         * the load.  We can extend this to 20 bits with a sub of a
2003         * shifted immediate from pc.
2004         */
2005        int h = -i_disp;
2006        int l = h & 0xfff;
2007
2008        h = encode_imm_nofail(h - l);
2009        tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
2010        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
2011    }
2012    set_jmp_reset_offset(s, which);
2013}
2014
2015void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2016                              uintptr_t jmp_rx, uintptr_t jmp_rw)
2017{
2018    uintptr_t addr = tb->jmp_target_addr[n];
2019    ptrdiff_t offset = addr - (jmp_rx + 8);
2020    tcg_insn_unit insn;
2021
2022    /* Either directly branch, or fall through to indirect branch. */
2023    if (offset == sextract64(offset, 0, 26)) {
2024        /* B <addr> */
2025        insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
2026    } else {
2027        insn = INSN_NOP;
2028    }
2029
2030    qatomic_set((uint32_t *)jmp_rw, insn);
2031    flush_idcache_range(jmp_rx, jmp_rw, 4);
2032}
2033
2034static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2035                       const TCGArg args[TCG_MAX_OP_ARGS],
2036                       const int const_args[TCG_MAX_OP_ARGS])
2037{
2038    TCGArg a0, a1, a2, a3, a4, a5;
2039    int c;
2040
2041    switch (opc) {
2042    case INDEX_op_goto_ptr:
2043        tcg_out_b_reg(s, COND_AL, args[0]);
2044        break;
2045    case INDEX_op_br:
2046        tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
2047        break;
2048
2049    case INDEX_op_ld8u_i32:
2050        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
2051        break;
2052    case INDEX_op_ld8s_i32:
2053        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
2054        break;
2055    case INDEX_op_ld16u_i32:
2056        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
2057        break;
2058    case INDEX_op_ld16s_i32:
2059        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
2060        break;
2061    case INDEX_op_ld_i32:
2062        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
2063        break;
2064    case INDEX_op_st8_i32:
2065        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
2066        break;
2067    case INDEX_op_st16_i32:
2068        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
2069        break;
2070    case INDEX_op_st_i32:
2071        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
2072        break;
2073
2074    case INDEX_op_movcond_i32:
2075        /* Constraints mean that v2 is always in the same register as dest,
2076         * so we only need to do "if condition passed, move v1 to dest".
2077         */
2078        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2079                        args[1], args[2], const_args[2]);
2080        tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
2081                        ARITH_MVN, args[0], 0, args[3], const_args[3]);
2082        break;
2083    case INDEX_op_add_i32:
2084        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
2085                        args[0], args[1], args[2], const_args[2]);
2086        break;
2087    case INDEX_op_sub_i32:
2088        if (const_args[1]) {
2089            if (const_args[2]) {
2090                tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
2091            } else {
2092                tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
2093                               args[0], args[2], args[1], 1);
2094            }
2095        } else {
2096            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
2097                            args[0], args[1], args[2], const_args[2]);
2098        }
2099        break;
2100    case INDEX_op_and_i32:
2101        tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
2102                        args[0], args[1], args[2], const_args[2]);
2103        break;
2104    case INDEX_op_andc_i32:
2105        tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
2106                        args[0], args[1], args[2], const_args[2]);
2107        break;
2108    case INDEX_op_or_i32:
2109        c = ARITH_ORR;
2110        goto gen_arith;
2111    case INDEX_op_xor_i32:
2112        c = ARITH_EOR;
2113        /* Fall through.  */
2114    gen_arith:
2115        tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
2116        break;
2117    case INDEX_op_add2_i32:
2118        a0 = args[0], a1 = args[1], a2 = args[2];
2119        a3 = args[3], a4 = args[4], a5 = args[5];
2120        if (a0 == a3 || (a0 == a5 && !const_args[5])) {
2121            a0 = TCG_REG_TMP;
2122        }
2123        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
2124                        a0, a2, a4, const_args[4]);
2125        tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
2126                        a1, a3, a5, const_args[5]);
2127        tcg_out_mov_reg(s, COND_AL, args[0], a0);
2128        break;
2129    case INDEX_op_sub2_i32:
2130        a0 = args[0], a1 = args[1], a2 = args[2];
2131        a3 = args[3], a4 = args[4], a5 = args[5];
2132        if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
2133            a0 = TCG_REG_TMP;
2134        }
2135        if (const_args[2]) {
2136            if (const_args[4]) {
2137                tcg_out_movi32(s, COND_AL, a0, a4);
2138                a4 = a0;
2139            }
2140            tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
2141        } else {
2142            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
2143                            ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
2144        }
2145        if (const_args[3]) {
2146            if (const_args[5]) {
2147                tcg_out_movi32(s, COND_AL, a1, a5);
2148                a5 = a1;
2149            }
2150            tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
2151        } else {
2152            tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
2153                            a1, a3, a5, const_args[5]);
2154        }
2155        tcg_out_mov_reg(s, COND_AL, args[0], a0);
2156        break;
2157    case INDEX_op_neg_i32:
2158        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
2159        break;
2160    case INDEX_op_not_i32:
2161        tcg_out_dat_reg(s, COND_AL,
2162                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
2163        break;
2164    case INDEX_op_mul_i32:
2165        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
2166        break;
2167    case INDEX_op_mulu2_i32:
2168        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2169        break;
2170    case INDEX_op_muls2_i32:
2171        tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2172        break;
2173    /* XXX: Perhaps args[2] & 0x1f is wrong */
2174    case INDEX_op_shl_i32:
2175        c = const_args[2] ?
2176                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
2177        goto gen_shift32;
2178    case INDEX_op_shr_i32:
2179        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
2180                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
2181        goto gen_shift32;
2182    case INDEX_op_sar_i32:
2183        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
2184                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
2185        goto gen_shift32;
2186    case INDEX_op_rotr_i32:
2187        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
2188                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
2189        /* Fall through.  */
2190    gen_shift32:
2191        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
2192        break;
2193
2194    case INDEX_op_rotl_i32:
2195        if (const_args[2]) {
2196            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2197                            ((0x20 - args[2]) & 0x1f) ?
2198                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
2199                            SHIFT_IMM_LSL(0));
2200        } else {
2201            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
2202            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2203                            SHIFT_REG_ROR(TCG_REG_TMP));
2204        }
2205        break;
2206
2207    case INDEX_op_ctz_i32:
2208        tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
2209        a1 = TCG_REG_TMP;
2210        goto do_clz;
2211
2212    case INDEX_op_clz_i32:
2213        a1 = args[1];
2214    do_clz:
2215        a0 = args[0];
2216        a2 = args[2];
2217        c = const_args[2];
2218        if (c && a2 == 32) {
2219            tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
2220            break;
2221        }
2222        tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
2223        tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
2224        if (c || a0 != a2) {
2225            tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
2226        }
2227        break;
2228
2229    case INDEX_op_brcond_i32:
2230        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2231                       args[0], args[1], const_args[1]);
2232        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
2233                           arg_label(args[3]));
2234        break;
2235    case INDEX_op_setcond_i32:
2236        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2237                        args[1], args[2], const_args[2]);
2238        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
2239                        ARITH_MOV, args[0], 0, 1);
2240        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
2241                        ARITH_MOV, args[0], 0, 0);
2242        break;
2243
2244    case INDEX_op_brcond2_i32:
2245        c = tcg_out_cmp2(s, args, const_args);
2246        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2247        break;
2248    case INDEX_op_setcond2_i32:
2249        c = tcg_out_cmp2(s, args + 1, const_args + 1);
2250        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2251        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2252                        ARITH_MOV, args[0], 0, 0);
2253        break;
2254
2255    case INDEX_op_qemu_ld_i32:
2256        tcg_out_qemu_ld(s, args, 0);
2257        break;
2258    case INDEX_op_qemu_ld_i64:
2259        tcg_out_qemu_ld(s, args, 1);
2260        break;
2261    case INDEX_op_qemu_st_i32:
2262        tcg_out_qemu_st(s, args, 0);
2263        break;
2264    case INDEX_op_qemu_st_i64:
2265        tcg_out_qemu_st(s, args, 1);
2266        break;
2267
2268    case INDEX_op_bswap16_i32:
2269        tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2270        break;
2271    case INDEX_op_bswap32_i32:
2272        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2273        break;
2274
2275    case INDEX_op_deposit_i32:
2276        tcg_out_deposit(s, COND_AL, args[0], args[2],
2277                        args[3], args[4], const_args[2]);
2278        break;
2279    case INDEX_op_extract_i32:
2280        tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2281        break;
2282    case INDEX_op_sextract_i32:
2283        tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2284        break;
2285    case INDEX_op_extract2_i32:
2286        /* ??? These optimization vs zero should be generic.  */
2287        /* ??? But we can't substitute 2 for 1 in the opcode stream yet.  */
2288        if (const_args[1]) {
2289            if (const_args[2]) {
2290                tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2291            } else {
2292                tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2293                                args[2], SHIFT_IMM_LSL(32 - args[3]));
2294            }
2295        } else if (const_args[2]) {
2296            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2297                            args[1], SHIFT_IMM_LSR(args[3]));
2298        } else {
2299            /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
2300            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2301                            args[2], SHIFT_IMM_LSL(32 - args[3]));
2302            tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2303                            args[1], SHIFT_IMM_LSR(args[3]));
2304        }
2305        break;
2306
2307    case INDEX_op_div_i32:
2308        tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2309        break;
2310    case INDEX_op_divu_i32:
2311        tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2312        break;
2313
2314    case INDEX_op_mb:
2315        tcg_out_mb(s, args[0]);
2316        break;
2317
2318    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2319    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2320    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2321    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2322    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2323    case INDEX_op_ext8u_i32:
2324    case INDEX_op_ext16s_i32:
2325    case INDEX_op_ext16u_i32:
2326    default:
2327        g_assert_not_reached();
2328    }
2329}
2330
2331static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2332{
2333    switch (op) {
2334    case INDEX_op_goto_ptr:
2335        return C_O0_I1(r);
2336
2337    case INDEX_op_ld8u_i32:
2338    case INDEX_op_ld8s_i32:
2339    case INDEX_op_ld16u_i32:
2340    case INDEX_op_ld16s_i32:
2341    case INDEX_op_ld_i32:
2342    case INDEX_op_neg_i32:
2343    case INDEX_op_not_i32:
2344    case INDEX_op_bswap16_i32:
2345    case INDEX_op_bswap32_i32:
2346    case INDEX_op_ext8s_i32:
2347    case INDEX_op_ext16s_i32:
2348    case INDEX_op_ext16u_i32:
2349    case INDEX_op_extract_i32:
2350    case INDEX_op_sextract_i32:
2351        return C_O1_I1(r, r);
2352
2353    case INDEX_op_st8_i32:
2354    case INDEX_op_st16_i32:
2355    case INDEX_op_st_i32:
2356        return C_O0_I2(r, r);
2357
2358    case INDEX_op_add_i32:
2359    case INDEX_op_sub_i32:
2360    case INDEX_op_setcond_i32:
2361        return C_O1_I2(r, r, rIN);
2362
2363    case INDEX_op_and_i32:
2364    case INDEX_op_andc_i32:
2365    case INDEX_op_clz_i32:
2366    case INDEX_op_ctz_i32:
2367        return C_O1_I2(r, r, rIK);
2368
2369    case INDEX_op_mul_i32:
2370    case INDEX_op_div_i32:
2371    case INDEX_op_divu_i32:
2372        return C_O1_I2(r, r, r);
2373
2374    case INDEX_op_mulu2_i32:
2375    case INDEX_op_muls2_i32:
2376        return C_O2_I2(r, r, r, r);
2377
2378    case INDEX_op_or_i32:
2379    case INDEX_op_xor_i32:
2380        return C_O1_I2(r, r, rI);
2381
2382    case INDEX_op_shl_i32:
2383    case INDEX_op_shr_i32:
2384    case INDEX_op_sar_i32:
2385    case INDEX_op_rotl_i32:
2386    case INDEX_op_rotr_i32:
2387        return C_O1_I2(r, r, ri);
2388
2389    case INDEX_op_brcond_i32:
2390        return C_O0_I2(r, rIN);
2391    case INDEX_op_deposit_i32:
2392        return C_O1_I2(r, 0, rZ);
2393    case INDEX_op_extract2_i32:
2394        return C_O1_I2(r, rZ, rZ);
2395    case INDEX_op_movcond_i32:
2396        return C_O1_I4(r, r, rIN, rIK, 0);
2397    case INDEX_op_add2_i32:
2398        return C_O2_I4(r, r, r, r, rIN, rIK);
2399    case INDEX_op_sub2_i32:
2400        return C_O2_I4(r, r, rI, rI, rIN, rIK);
2401    case INDEX_op_brcond2_i32:
2402        return C_O0_I4(r, r, rI, rI);
2403    case INDEX_op_setcond2_i32:
2404        return C_O1_I4(r, r, r, rI, rI);
2405
2406    case INDEX_op_qemu_ld_i32:
2407        return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
2408    case INDEX_op_qemu_ld_i64:
2409        return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, l) : C_O2_I2(e, p, l, l);
2410    case INDEX_op_qemu_st_i32:
2411        return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
2412    case INDEX_op_qemu_st_i64:
2413        return TARGET_LONG_BITS == 32 ? C_O0_I3(S, p, s) : C_O0_I4(S, p, s, s);
2414
2415    case INDEX_op_st_vec:
2416        return C_O0_I2(w, r);
2417    case INDEX_op_ld_vec:
2418    case INDEX_op_dupm_vec:
2419        return C_O1_I1(w, r);
2420    case INDEX_op_dup_vec:
2421        return C_O1_I1(w, wr);
2422    case INDEX_op_abs_vec:
2423    case INDEX_op_neg_vec:
2424    case INDEX_op_not_vec:
2425    case INDEX_op_shli_vec:
2426    case INDEX_op_shri_vec:
2427    case INDEX_op_sari_vec:
2428        return C_O1_I1(w, w);
2429    case INDEX_op_dup2_vec:
2430    case INDEX_op_add_vec:
2431    case INDEX_op_mul_vec:
2432    case INDEX_op_smax_vec:
2433    case INDEX_op_smin_vec:
2434    case INDEX_op_ssadd_vec:
2435    case INDEX_op_sssub_vec:
2436    case INDEX_op_sub_vec:
2437    case INDEX_op_umax_vec:
2438    case INDEX_op_umin_vec:
2439    case INDEX_op_usadd_vec:
2440    case INDEX_op_ussub_vec:
2441    case INDEX_op_xor_vec:
2442    case INDEX_op_arm_sshl_vec:
2443    case INDEX_op_arm_ushl_vec:
2444        return C_O1_I2(w, w, w);
2445    case INDEX_op_arm_sli_vec:
2446        return C_O1_I2(w, 0, w);
2447    case INDEX_op_or_vec:
2448    case INDEX_op_andc_vec:
2449        return C_O1_I2(w, w, wO);
2450    case INDEX_op_and_vec:
2451    case INDEX_op_orc_vec:
2452        return C_O1_I2(w, w, wV);
2453    case INDEX_op_cmp_vec:
2454        return C_O1_I2(w, w, wZ);
2455    case INDEX_op_bitsel_vec:
2456        return C_O1_I3(w, w, w, w);
2457    default:
2458        g_assert_not_reached();
2459    }
2460}
2461
2462static void tcg_target_init(TCGContext *s)
2463{
2464    /*
2465     * Only probe for the platform and capabilities if we haven't already
2466     * determined maximum values at compile time.
2467     */
2468#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2469    {
2470        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2471#ifndef use_idiv_instructions
2472        use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2473#endif
2474#ifndef use_neon_instructions
2475        use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2476#endif
2477    }
2478#endif
2479
2480    if (__ARM_ARCH < 7) {
2481        const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2482        if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2483            arm_arch = pl[1] - '0';
2484        }
2485
2486        if (arm_arch < 6) {
2487            error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
2488            exit(EXIT_FAILURE);
2489        }
2490    }
2491
2492    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2493
2494    tcg_target_call_clobber_regs = 0;
2495    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2496    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2497    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2498    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2499    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2500    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2501
2502    if (use_neon_instructions) {
2503        tcg_target_available_regs[TCG_TYPE_V64]  = ALL_VECTOR_REGS;
2504        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2505
2506        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2507        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2508        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2509        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2510        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2511        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2512        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2513        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2514        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2515        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2516        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2517        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2518    }
2519
2520    s->reserved_regs = 0;
2521    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2522    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2523    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2524    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2525}
2526
2527static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2528                       TCGReg arg1, intptr_t arg2)
2529{
2530    switch (type) {
2531    case TCG_TYPE_I32:
2532        tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2533        return;
2534    case TCG_TYPE_V64:
2535        /* regs 1; size 8; align 8 */
2536        tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2537        return;
2538    case TCG_TYPE_V128:
2539        /*
2540         * We have only 8-byte alignment for the stack per the ABI.
2541         * Rather than dynamically re-align the stack, it's easier
2542         * to simply not request alignment beyond that.  So:
2543         * regs 2; size 8; align 8
2544         */
2545        tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
2546        return;
2547    default:
2548        g_assert_not_reached();
2549    }
2550}
2551
2552static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2553                       TCGReg arg1, intptr_t arg2)
2554{
2555    switch (type) {
2556    case TCG_TYPE_I32:
2557        tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2558        return;
2559    case TCG_TYPE_V64:
2560        /* regs 1; size 8; align 8 */
2561        tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2562        return;
2563    case TCG_TYPE_V128:
2564        /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
2565        tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
2566        return;
2567    default:
2568        g_assert_not_reached();
2569    }
2570}
2571
2572static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2573                        TCGReg base, intptr_t ofs)
2574{
2575    return false;
2576}
2577
2578static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2579{
2580    if (ret == arg) {
2581        return true;
2582    }
2583    switch (type) {
2584    case TCG_TYPE_I32:
2585        if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2586            tcg_out_mov_reg(s, COND_AL, ret, arg);
2587            return true;
2588        }
2589        return false;
2590
2591    case TCG_TYPE_V64:
2592    case TCG_TYPE_V128:
2593        /* "VMOV D,N" is an alias for "VORR D,N,N". */
2594        tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2595        return true;
2596
2597    default:
2598        g_assert_not_reached();
2599    }
2600}
2601
2602static void tcg_out_movi(TCGContext *s, TCGType type,
2603                         TCGReg ret, tcg_target_long arg)
2604{
2605    tcg_debug_assert(type == TCG_TYPE_I32);
2606    tcg_debug_assert(ret < TCG_REG_Q0);
2607    tcg_out_movi32(s, COND_AL, ret, arg);
2608}
2609
2610static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
2611{
2612    return false;
2613}
2614
2615static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
2616                             tcg_target_long imm)
2617{
2618    int enc, opc = ARITH_ADD;
2619
2620    /* All of the easiest immediates to encode are positive. */
2621    if (imm < 0) {
2622        imm = -imm;
2623        opc = ARITH_SUB;
2624    }
2625    enc = encode_imm(imm);
2626    if (enc >= 0) {
2627        tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
2628    } else {
2629        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
2630        tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
2631                        TCG_REG_TMP, SHIFT_IMM_LSL(0));
2632    }
2633}
2634
2635/* Type is always V128, with I64 elements.  */
2636static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2637{
2638    /* Move high element into place first. */
2639    /* VMOV Dd+1, Ds */
2640    tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2641    /* Move low element into place; tcg_out_mov will check for nop. */
2642    tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2643}
2644
2645static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2646                            TCGReg rd, TCGReg rs)
2647{
2648    int q = type - TCG_TYPE_V64;
2649
2650    if (vece == MO_64) {
2651        if (type == TCG_TYPE_V128) {
2652            tcg_out_dup2_vec(s, rd, rs, rs);
2653        } else {
2654            tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2655        }
2656    } else if (rs < TCG_REG_Q0) {
2657        int b = (vece == MO_8);
2658        int e = (vece == MO_16);
2659        tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2660                  encode_vn(rd) | (rs << 12));
2661    } else {
2662        int imm4 = 1 << vece;
2663        tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2664                  encode_vd(rd) | encode_vm(rs));
2665    }
2666    return true;
2667}
2668
2669static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2670                             TCGReg rd, TCGReg base, intptr_t offset)
2671{
2672    if (vece == MO_64) {
2673        tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2674        if (type == TCG_TYPE_V128) {
2675            tcg_out_dup2_vec(s, rd, rd, rd);
2676        }
2677    } else {
2678        int q = type - TCG_TYPE_V64;
2679        tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2680                      rd, base, offset);
2681    }
2682    return true;
2683}
2684
2685static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2686                             TCGReg rd, int64_t v64)
2687{
2688    int q = type - TCG_TYPE_V64;
2689    int cmode, imm8, i;
2690
2691    /* Test all bytes equal first.  */
2692    if (vece == MO_8) {
2693        tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2694        return;
2695    }
2696
2697    /*
2698     * Test all bytes 0x00 or 0xff second.  This can match cases that
2699     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2700     */
2701    for (i = imm8 = 0; i < 8; i++) {
2702        uint8_t byte = v64 >> (i * 8);
2703        if (byte == 0xff) {
2704            imm8 |= 1 << i;
2705        } else if (byte != 0) {
2706            goto fail_bytes;
2707        }
2708    }
2709    tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2710    return;
2711 fail_bytes:
2712
2713    /*
2714     * Tests for various replications.  For each element width, if we
2715     * cannot find an expansion there's no point checking a larger
2716     * width because we already know by replication it cannot match.
2717     */
2718    if (vece == MO_16) {
2719        uint16_t v16 = v64;
2720
2721        if (is_shimm16(v16, &cmode, &imm8)) {
2722            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2723            return;
2724        }
2725        if (is_shimm16(~v16, &cmode, &imm8)) {
2726            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2727            return;
2728        }
2729
2730        /*
2731         * Otherwise, all remaining constants can be loaded in two insns:
2732         * rd = v16 & 0xff, rd |= v16 & 0xff00.
2733         */
2734        tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2735        tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8);   /* VORRI */
2736        return;
2737    }
2738
2739    if (vece == MO_32) {
2740        uint32_t v32 = v64;
2741
2742        if (is_shimm32(v32, &cmode, &imm8) ||
2743            is_soimm32(v32, &cmode, &imm8)) {
2744            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2745            return;
2746        }
2747        if (is_shimm32(~v32, &cmode, &imm8) ||
2748            is_soimm32(~v32, &cmode, &imm8)) {
2749            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2750            return;
2751        }
2752
2753        /*
2754         * Restrict the set of constants to those we can load with
2755         * two instructions.  Others we load from the pool.
2756         */
2757        i = is_shimm32_pair(v32, &cmode, &imm8);
2758        if (i) {
2759            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2760            tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2761            return;
2762        }
2763        i = is_shimm32_pair(~v32, &cmode, &imm8);
2764        if (i) {
2765            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2766            tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2767            return;
2768        }
2769    }
2770
2771    /*
2772     * As a last resort, load from the constant pool.
2773     */
2774    if (!q || vece == MO_64) {
2775        new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2776        /* VLDR Dd, [pc + offset] */
2777        tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2778        if (q) {
2779            tcg_out_dup2_vec(s, rd, rd, rd);
2780        }
2781    } else {
2782        new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2783        /* add tmp, pc, offset */
2784        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2785        tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2786    }
2787}
2788
2789static const ARMInsn vec_cmp_insn[16] = {
2790    [TCG_COND_EQ] = INSN_VCEQ,
2791    [TCG_COND_GT] = INSN_VCGT,
2792    [TCG_COND_GE] = INSN_VCGE,
2793    [TCG_COND_GTU] = INSN_VCGT_U,
2794    [TCG_COND_GEU] = INSN_VCGE_U,
2795};
2796
2797static const ARMInsn vec_cmp0_insn[16] = {
2798    [TCG_COND_EQ] = INSN_VCEQ0,
2799    [TCG_COND_GT] = INSN_VCGT0,
2800    [TCG_COND_GE] = INSN_VCGE0,
2801    [TCG_COND_LT] = INSN_VCLT0,
2802    [TCG_COND_LE] = INSN_VCLE0,
2803};
2804
2805static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2806                           unsigned vecl, unsigned vece,
2807                           const TCGArg args[TCG_MAX_OP_ARGS],
2808                           const int const_args[TCG_MAX_OP_ARGS])
2809{
2810    TCGType type = vecl + TCG_TYPE_V64;
2811    unsigned q = vecl;
2812    TCGArg a0, a1, a2, a3;
2813    int cmode, imm8;
2814
2815    a0 = args[0];
2816    a1 = args[1];
2817    a2 = args[2];
2818
2819    switch (opc) {
2820    case INDEX_op_ld_vec:
2821        tcg_out_ld(s, type, a0, a1, a2);
2822        return;
2823    case INDEX_op_st_vec:
2824        tcg_out_st(s, type, a0, a1, a2);
2825        return;
2826    case INDEX_op_dupm_vec:
2827        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2828        return;
2829    case INDEX_op_dup2_vec:
2830        tcg_out_dup2_vec(s, a0, a1, a2);
2831        return;
2832    case INDEX_op_abs_vec:
2833        tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2834        return;
2835    case INDEX_op_neg_vec:
2836        tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2837        return;
2838    case INDEX_op_not_vec:
2839        tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2840        return;
2841    case INDEX_op_add_vec:
2842        tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2843        return;
2844    case INDEX_op_mul_vec:
2845        tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2846        return;
2847    case INDEX_op_smax_vec:
2848        tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2849        return;
2850    case INDEX_op_smin_vec:
2851        tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2852        return;
2853    case INDEX_op_sub_vec:
2854        tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2855        return;
2856    case INDEX_op_ssadd_vec:
2857        tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2858        return;
2859    case INDEX_op_sssub_vec:
2860        tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2861        return;
2862    case INDEX_op_umax_vec:
2863        tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2864        return;
2865    case INDEX_op_umin_vec:
2866        tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2867        return;
2868    case INDEX_op_usadd_vec:
2869        tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2870        return;
2871    case INDEX_op_ussub_vec:
2872        tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2873        return;
2874    case INDEX_op_xor_vec:
2875        tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2876        return;
2877    case INDEX_op_arm_sshl_vec:
2878        /*
2879         * Note that Vm is the data and Vn is the shift count,
2880         * therefore the arguments appear reversed.
2881         */
2882        tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2883        return;
2884    case INDEX_op_arm_ushl_vec:
2885        /* See above. */
2886        tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2887        return;
2888    case INDEX_op_shli_vec:
2889        tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2890        return;
2891    case INDEX_op_shri_vec:
2892        tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2893        return;
2894    case INDEX_op_sari_vec:
2895        tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2896        return;
2897    case INDEX_op_arm_sli_vec:
2898        tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2899        return;
2900
2901    case INDEX_op_andc_vec:
2902        if (!const_args[2]) {
2903            tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2904            return;
2905        }
2906        a2 = ~a2;
2907        /* fall through */
2908    case INDEX_op_and_vec:
2909        if (const_args[2]) {
2910            is_shimm1632(~a2, &cmode, &imm8);
2911            if (a0 == a1) {
2912                tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2913                return;
2914            }
2915            tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2916            a2 = a0;
2917        }
2918        tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2919        return;
2920
2921    case INDEX_op_orc_vec:
2922        if (!const_args[2]) {
2923            tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2924            return;
2925        }
2926        a2 = ~a2;
2927        /* fall through */
2928    case INDEX_op_or_vec:
2929        if (const_args[2]) {
2930            is_shimm1632(a2, &cmode, &imm8);
2931            if (a0 == a1) {
2932                tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2933                return;
2934            }
2935            tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2936            a2 = a0;
2937        }
2938        tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2939        return;
2940
2941    case INDEX_op_cmp_vec:
2942        {
2943            TCGCond cond = args[3];
2944
2945            if (cond == TCG_COND_NE) {
2946                if (const_args[2]) {
2947                    tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2948                } else {
2949                    tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2950                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2951                }
2952            } else {
2953                ARMInsn insn;
2954
2955                if (const_args[2]) {
2956                    insn = vec_cmp0_insn[cond];
2957                    if (insn) {
2958                        tcg_out_vreg2(s, insn, q, vece, a0, a1);
2959                        return;
2960                    }
2961                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2962                    a2 = TCG_VEC_TMP;
2963                }
2964                insn = vec_cmp_insn[cond];
2965                if (insn == 0) {
2966                    TCGArg t;
2967                    t = a1, a1 = a2, a2 = t;
2968                    cond = tcg_swap_cond(cond);
2969                    insn = vec_cmp_insn[cond];
2970                    tcg_debug_assert(insn != 0);
2971                }
2972                tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2973            }
2974        }
2975        return;
2976
2977    case INDEX_op_bitsel_vec:
2978        a3 = args[3];
2979        if (a0 == a3) {
2980            tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2981        } else if (a0 == a2) {
2982            tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2983        } else {
2984            tcg_out_mov(s, type, a0, a1);
2985            tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2986        }
2987        return;
2988
2989    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
2990    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
2991    default:
2992        g_assert_not_reached();
2993    }
2994}
2995
2996int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2997{
2998    switch (opc) {
2999    case INDEX_op_add_vec:
3000    case INDEX_op_sub_vec:
3001    case INDEX_op_and_vec:
3002    case INDEX_op_andc_vec:
3003    case INDEX_op_or_vec:
3004    case INDEX_op_orc_vec:
3005    case INDEX_op_xor_vec:
3006    case INDEX_op_not_vec:
3007    case INDEX_op_shli_vec:
3008    case INDEX_op_shri_vec:
3009    case INDEX_op_sari_vec:
3010    case INDEX_op_ssadd_vec:
3011    case INDEX_op_sssub_vec:
3012    case INDEX_op_usadd_vec:
3013    case INDEX_op_ussub_vec:
3014    case INDEX_op_bitsel_vec:
3015        return 1;
3016    case INDEX_op_abs_vec:
3017    case INDEX_op_cmp_vec:
3018    case INDEX_op_mul_vec:
3019    case INDEX_op_neg_vec:
3020    case INDEX_op_smax_vec:
3021    case INDEX_op_smin_vec:
3022    case INDEX_op_umax_vec:
3023    case INDEX_op_umin_vec:
3024        return vece < MO_64;
3025    case INDEX_op_shlv_vec:
3026    case INDEX_op_shrv_vec:
3027    case INDEX_op_sarv_vec:
3028    case INDEX_op_rotli_vec:
3029    case INDEX_op_rotlv_vec:
3030    case INDEX_op_rotrv_vec:
3031        return -1;
3032    default:
3033        return 0;
3034    }
3035}
3036
3037void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3038                       TCGArg a0, ...)
3039{
3040    va_list va;
3041    TCGv_vec v0, v1, v2, t1, t2, c1;
3042    TCGArg a2;
3043
3044    va_start(va, a0);
3045    v0 = temp_tcgv_vec(arg_temp(a0));
3046    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3047    a2 = va_arg(va, TCGArg);
3048    va_end(va);
3049
3050    switch (opc) {
3051    case INDEX_op_shlv_vec:
3052        /*
3053         * Merely propagate shlv_vec to arm_ushl_vec.
3054         * In this way we don't set TCG_TARGET_HAS_shv_vec
3055         * because everything is done via expansion.
3056         */
3057        v2 = temp_tcgv_vec(arg_temp(a2));
3058        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3059                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3060        break;
3061
3062    case INDEX_op_shrv_vec:
3063    case INDEX_op_sarv_vec:
3064        /* Right shifts are negative left shifts for NEON.  */
3065        v2 = temp_tcgv_vec(arg_temp(a2));
3066        t1 = tcg_temp_new_vec(type);
3067        tcg_gen_neg_vec(vece, t1, v2);
3068        if (opc == INDEX_op_shrv_vec) {
3069            opc = INDEX_op_arm_ushl_vec;
3070        } else {
3071            opc = INDEX_op_arm_sshl_vec;
3072        }
3073        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
3074                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3075        tcg_temp_free_vec(t1);
3076        break;
3077
3078    case INDEX_op_rotli_vec:
3079        t1 = tcg_temp_new_vec(type);
3080        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
3081        vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
3082                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
3083        tcg_temp_free_vec(t1);
3084        break;
3085
3086    case INDEX_op_rotlv_vec:
3087        v2 = temp_tcgv_vec(arg_temp(a2));
3088        t1 = tcg_temp_new_vec(type);
3089        c1 = tcg_constant_vec(type, vece, 8 << vece);
3090        tcg_gen_sub_vec(vece, t1, v2, c1);
3091        /* Right shifts are negative left shifts for NEON.  */
3092        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3093                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3094        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3095                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3096        tcg_gen_or_vec(vece, v0, v0, t1);
3097        tcg_temp_free_vec(t1);
3098        break;
3099
3100    case INDEX_op_rotrv_vec:
3101        v2 = temp_tcgv_vec(arg_temp(a2));
3102        t1 = tcg_temp_new_vec(type);
3103        t2 = tcg_temp_new_vec(type);
3104        c1 = tcg_constant_vec(type, vece, 8 << vece);
3105        tcg_gen_neg_vec(vece, t1, v2);
3106        tcg_gen_sub_vec(vece, t2, c1, v2);
3107        /* Right shifts are negative left shifts for NEON.  */
3108        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3109                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3110        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
3111                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
3112        tcg_gen_or_vec(vece, v0, t1, t2);
3113        tcg_temp_free_vec(t1);
3114        tcg_temp_free_vec(t2);
3115        break;
3116
3117    default:
3118        g_assert_not_reached();
3119    }
3120}
3121
3122static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3123{
3124    int i;
3125    for (i = 0; i < count; ++i) {
3126        p[i] = INSN_NOP;
3127    }
3128}
3129
3130/* Compute frame size via macros, to share between tcg_target_qemu_prologue
3131   and tcg_register_jit.  */
3132
3133#define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
3134
3135#define FRAME_SIZE \
3136    ((PUSH_SIZE \
3137      + TCG_STATIC_CALL_ARGS_SIZE \
3138      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3139      + TCG_TARGET_STACK_ALIGN - 1) \
3140     & -TCG_TARGET_STACK_ALIGN)
3141
3142#define STACK_ADDEND  (FRAME_SIZE - PUSH_SIZE)
3143
3144static void tcg_target_qemu_prologue(TCGContext *s)
3145{
3146    /* Calling convention requires us to save r4-r11 and lr.  */
3147    /* stmdb sp!, { r4 - r11, lr } */
3148    tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
3149                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3150                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3151                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
3152
3153    /* Reserve callee argument and tcg temp space.  */
3154    tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
3155                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3156    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3157                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3158
3159    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3160
3161#ifndef CONFIG_SOFTMMU
3162    if (guest_base) {
3163        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
3164        tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
3165    }
3166#endif
3167
3168    tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
3169
3170    /*
3171     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3172     * and fall through to the rest of the epilogue.
3173     */
3174    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3175    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
3176    tcg_out_epilogue(s);
3177}
3178
3179static void tcg_out_epilogue(TCGContext *s)
3180{
3181    /* Release local stack frame.  */
3182    tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
3183                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3184
3185    /* ldmia sp!, { r4 - r11, pc } */
3186    tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
3187                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3188                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3189                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
3190}
3191
3192typedef struct {
3193    DebugFrameHeader h;
3194    uint8_t fde_def_cfa[4];
3195    uint8_t fde_reg_ofs[18];
3196} DebugFrame;
3197
3198#define ELF_HOST_MACHINE EM_ARM
3199
3200/* We're expecting a 2 byte uleb128 encoded value.  */
3201QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3202
3203static const DebugFrame debug_frame = {
3204    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3205    .h.cie.id = -1,
3206    .h.cie.version = 1,
3207    .h.cie.code_align = 1,
3208    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
3209    .h.cie.return_column = 14,
3210
3211    /* Total FDE size does not include the "len" member.  */
3212    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3213
3214    .fde_def_cfa = {
3215        12, 13,                         /* DW_CFA_def_cfa sp, ... */
3216        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3217        (FRAME_SIZE >> 7)
3218    },
3219    .fde_reg_ofs = {
3220        /* The following must match the stmdb in the prologue.  */
3221        0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
3222        0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
3223        0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
3224        0x89, 4,                        /* DW_CFA_offset, r9, -16 */
3225        0x88, 5,                        /* DW_CFA_offset, r8, -20 */
3226        0x87, 6,                        /* DW_CFA_offset, r7, -24 */
3227        0x86, 7,                        /* DW_CFA_offset, r6, -28 */
3228        0x85, 8,                        /* DW_CFA_offset, r5, -32 */
3229        0x84, 9,                        /* DW_CFA_offset, r4, -36 */
3230    }
3231};
3232
3233void tcg_register_jit(const void *buf, size_t buf_size)
3234{
3235    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3236}
3237