xref: /openbmc/qemu/tcg/riscv/tcg-target.c.inc (revision 5c0968a7e1da73f91f148d563a29af529427c5a5)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
8 *
9 * Based on i386/tcg-target.c and mips/tcg-target.c
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
28 */
29
30/* Used for function call generation. */
31#define TCG_REG_CALL_STACK              TCG_REG_SP
32#define TCG_TARGET_STACK_ALIGN          16
33#define TCG_TARGET_CALL_STACK_OFFSET    0
34#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_NORMAL
35#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_NORMAL
36#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_NORMAL
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39#ifdef CONFIG_DEBUG_TCG
40static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
41    "zero", "ra",  "sp",  "gp",  "tp",  "t0",  "t1",  "t2",
42    "s0",   "s1",  "a0",  "a1",  "a2",  "a3",  "a4",  "a5",
43    "a6",   "a7",  "s2",  "s3",  "s4",  "s5",  "s6",  "s7",
44    "s8",   "s9",  "s10", "s11", "t3",  "t4",  "t5",  "t6",
45    "v0",   "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",
46    "v8",   "v9",  "v10", "v11", "v12", "v13", "v14", "v15",
47    "v16",  "v17", "v18", "v19", "v20", "v21", "v22", "v23",
48    "v24",  "v25", "v26", "v27", "v28", "v29", "v30", "v31",
49};
50#endif
51
52static const int tcg_target_reg_alloc_order[] = {
53    /* Call saved registers */
54    /* TCG_REG_S0 reserved for TCG_AREG0 */
55    TCG_REG_S1,
56    TCG_REG_S2,
57    TCG_REG_S3,
58    TCG_REG_S4,
59    TCG_REG_S5,
60    TCG_REG_S6,
61    TCG_REG_S7,
62    TCG_REG_S8,
63    TCG_REG_S9,
64    TCG_REG_S10,
65    TCG_REG_S11,
66
67    /* Call clobbered registers */
68    TCG_REG_T0,
69    TCG_REG_T1,
70    TCG_REG_T2,
71    TCG_REG_T3,
72    TCG_REG_T4,
73    TCG_REG_T5,
74    TCG_REG_T6,
75
76    /* Argument registers */
77    TCG_REG_A0,
78    TCG_REG_A1,
79    TCG_REG_A2,
80    TCG_REG_A3,
81    TCG_REG_A4,
82    TCG_REG_A5,
83    TCG_REG_A6,
84    TCG_REG_A7,
85
86    /* Vector registers and TCG_REG_V0 reserved for mask. */
87    TCG_REG_V1,  TCG_REG_V2,  TCG_REG_V3,  TCG_REG_V4,
88    TCG_REG_V5,  TCG_REG_V6,  TCG_REG_V7,  TCG_REG_V8,
89    TCG_REG_V9,  TCG_REG_V10, TCG_REG_V11, TCG_REG_V12,
90    TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16,
91    TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20,
92    TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24,
93    TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28,
94    TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
95};
96
97static const int tcg_target_call_iarg_regs[] = {
98    TCG_REG_A0,
99    TCG_REG_A1,
100    TCG_REG_A2,
101    TCG_REG_A3,
102    TCG_REG_A4,
103    TCG_REG_A5,
104    TCG_REG_A6,
105    TCG_REG_A7,
106};
107
108static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
109{
110    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
111    tcg_debug_assert(slot >= 0 && slot <= 1);
112    return TCG_REG_A0 + slot;
113}
114
115#define TCG_CT_CONST_S12     0x100
116#define TCG_CT_CONST_N12     0x200
117#define TCG_CT_CONST_M12     0x400
118#define TCG_CT_CONST_S5      0x800
119#define TCG_CT_CONST_CMP_VI 0x1000
120
121#define ALL_GENERAL_REGS   MAKE_64BIT_MASK(0, 32)
122#define ALL_VECTOR_REGS    MAKE_64BIT_MASK(32, 32)
123#define ALL_DVECTOR_REG_GROUPS 0x5555555500000000
124#define ALL_QVECTOR_REG_GROUPS 0x1111111100000000
125
126#define sextreg  sextract64
127
128/*
129 * RISC-V Base ISA opcodes (IM)
130 */
131
132#define V_OPIVV (0x0 << 12)
133#define V_OPFVV (0x1 << 12)
134#define V_OPMVV (0x2 << 12)
135#define V_OPIVI (0x3 << 12)
136#define V_OPIVX (0x4 << 12)
137#define V_OPFVF (0x5 << 12)
138#define V_OPMVX (0x6 << 12)
139#define V_OPCFG (0x7 << 12)
140
141/* NF <= 7 && NF >= 0 */
142#define V_NF(x) (x << 29)
143#define V_UNIT_STRIDE (0x0 << 20)
144#define V_UNIT_STRIDE_WHOLE_REG (0x8 << 20)
145
146typedef enum {
147    VLMUL_M1 = 0, /* LMUL=1 */
148    VLMUL_M2,     /* LMUL=2 */
149    VLMUL_M4,     /* LMUL=4 */
150    VLMUL_M8,     /* LMUL=8 */
151    VLMUL_RESERVED,
152    VLMUL_MF8,    /* LMUL=1/8 */
153    VLMUL_MF4,    /* LMUL=1/4 */
154    VLMUL_MF2,    /* LMUL=1/2 */
155} RISCVVlmul;
156
157typedef enum {
158    OPC_ADD = 0x33,
159    OPC_ADDI = 0x13,
160    OPC_AND = 0x7033,
161    OPC_ANDI = 0x7013,
162    OPC_AUIPC = 0x17,
163    OPC_BEQ = 0x63,
164    OPC_BEXTI = 0x48005013,
165    OPC_BGE = 0x5063,
166    OPC_BGEU = 0x7063,
167    OPC_BLT = 0x4063,
168    OPC_BLTU = 0x6063,
169    OPC_BNE = 0x1063,
170    OPC_DIV = 0x2004033,
171    OPC_DIVU = 0x2005033,
172    OPC_JAL = 0x6f,
173    OPC_JALR = 0x67,
174    OPC_LB = 0x3,
175    OPC_LBU = 0x4003,
176    OPC_LD = 0x3003,
177    OPC_LH = 0x1003,
178    OPC_LHU = 0x5003,
179    OPC_LUI = 0x37,
180    OPC_LW = 0x2003,
181    OPC_LWU = 0x6003,
182    OPC_MUL = 0x2000033,
183    OPC_MULH = 0x2001033,
184    OPC_MULHSU = 0x2002033,
185    OPC_MULHU = 0x2003033,
186    OPC_OR = 0x6033,
187    OPC_ORI = 0x6013,
188    OPC_REM = 0x2006033,
189    OPC_REMU = 0x2007033,
190    OPC_SB = 0x23,
191    OPC_SD = 0x3023,
192    OPC_SH = 0x1023,
193    OPC_SLL = 0x1033,
194    OPC_SLLI = 0x1013,
195    OPC_SLT = 0x2033,
196    OPC_SLTI = 0x2013,
197    OPC_SLTIU = 0x3013,
198    OPC_SLTU = 0x3033,
199    OPC_SRA = 0x40005033,
200    OPC_SRAI = 0x40005013,
201    OPC_SRL = 0x5033,
202    OPC_SRLI = 0x5013,
203    OPC_SUB = 0x40000033,
204    OPC_SW = 0x2023,
205    OPC_XOR = 0x4033,
206    OPC_XORI = 0x4013,
207
208    OPC_ADDIW = 0x1b,
209    OPC_ADDW = 0x3b,
210    OPC_DIVUW = 0x200503b,
211    OPC_DIVW = 0x200403b,
212    OPC_MULW = 0x200003b,
213    OPC_REMUW = 0x200703b,
214    OPC_REMW = 0x200603b,
215    OPC_SLLIW = 0x101b,
216    OPC_SLLW = 0x103b,
217    OPC_SRAIW = 0x4000501b,
218    OPC_SRAW = 0x4000503b,
219    OPC_SRLIW = 0x501b,
220    OPC_SRLW = 0x503b,
221    OPC_SUBW = 0x4000003b,
222
223    OPC_FENCE = 0x0000000f,
224    OPC_NOP   = OPC_ADDI,   /* nop = addi r0,r0,0 */
225
226    /* Zba: Bit manipulation extension, address generation */
227    OPC_ADD_UW = 0x0800003b,
228
229    /* Zbb: Bit manipulation extension, basic bit manipulation */
230    OPC_ANDN   = 0x40007033,
231    OPC_CLZ    = 0x60001013,
232    OPC_CLZW   = 0x6000101b,
233    OPC_CPOP   = 0x60201013,
234    OPC_CPOPW  = 0x6020101b,
235    OPC_CTZ    = 0x60101013,
236    OPC_CTZW   = 0x6010101b,
237    OPC_ORN    = 0x40006033,
238    OPC_REV8   = 0x6b805013,
239    OPC_ROL    = 0x60001033,
240    OPC_ROLW   = 0x6000103b,
241    OPC_ROR    = 0x60005033,
242    OPC_RORW   = 0x6000503b,
243    OPC_RORI   = 0x60005013,
244    OPC_RORIW  = 0x6000501b,
245    OPC_SEXT_B = 0x60401013,
246    OPC_SEXT_H = 0x60501013,
247    OPC_XNOR   = 0x40004033,
248    OPC_ZEXT_H = 0x0800403b,
249
250    /* Zicond: integer conditional operations */
251    OPC_CZERO_EQZ = 0x0e005033,
252    OPC_CZERO_NEZ = 0x0e007033,
253
254    /* V: Vector extension 1.0 */
255    OPC_VSETVLI  = 0x57 | V_OPCFG,
256    OPC_VSETIVLI = 0xc0000057 | V_OPCFG,
257    OPC_VSETVL   = 0x80000057 | V_OPCFG,
258
259    OPC_VLE8_V  = 0x7 | V_UNIT_STRIDE,
260    OPC_VLE16_V = 0x5007 | V_UNIT_STRIDE,
261    OPC_VLE32_V = 0x6007 | V_UNIT_STRIDE,
262    OPC_VLE64_V = 0x7007 | V_UNIT_STRIDE,
263    OPC_VSE8_V  = 0x27 | V_UNIT_STRIDE,
264    OPC_VSE16_V = 0x5027 | V_UNIT_STRIDE,
265    OPC_VSE32_V = 0x6027 | V_UNIT_STRIDE,
266    OPC_VSE64_V = 0x7027 | V_UNIT_STRIDE,
267
268    OPC_VL1RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0),
269    OPC_VL2RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1),
270    OPC_VL4RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
271    OPC_VL8RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
272
273    OPC_VS1R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0),
274    OPC_VS2R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1),
275    OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
276    OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
277
278    OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI,
279    OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV,
280
281    OPC_VADD_VV = 0x57 | V_OPIVV,
282    OPC_VADD_VI = 0x57 | V_OPIVI,
283    OPC_VSUB_VV = 0x8000057 | V_OPIVV,
284    OPC_VRSUB_VI = 0xc000057 | V_OPIVI,
285    OPC_VAND_VV = 0x24000057 | V_OPIVV,
286    OPC_VAND_VI = 0x24000057 | V_OPIVI,
287    OPC_VOR_VV = 0x28000057 | V_OPIVV,
288    OPC_VOR_VI = 0x28000057 | V_OPIVI,
289    OPC_VXOR_VV = 0x2c000057 | V_OPIVV,
290    OPC_VXOR_VI = 0x2c000057 | V_OPIVI,
291
292    OPC_VMUL_VV = 0x94000057 | V_OPMVV,
293    OPC_VSADD_VV = 0x84000057 | V_OPIVV,
294    OPC_VSADD_VI = 0x84000057 | V_OPIVI,
295    OPC_VSSUB_VV = 0x8c000057 | V_OPIVV,
296    OPC_VSSUB_VI = 0x8c000057 | V_OPIVI,
297    OPC_VSADDU_VV = 0x80000057 | V_OPIVV,
298    OPC_VSADDU_VI = 0x80000057 | V_OPIVI,
299    OPC_VSSUBU_VV = 0x88000057 | V_OPIVV,
300    OPC_VSSUBU_VI = 0x88000057 | V_OPIVI,
301
302    OPC_VMAX_VV = 0x1c000057 | V_OPIVV,
303    OPC_VMAX_VI = 0x1c000057 | V_OPIVI,
304    OPC_VMAXU_VV = 0x18000057 | V_OPIVV,
305    OPC_VMAXU_VI = 0x18000057 | V_OPIVI,
306    OPC_VMIN_VV = 0x14000057 | V_OPIVV,
307    OPC_VMIN_VI = 0x14000057 | V_OPIVI,
308    OPC_VMINU_VV = 0x10000057 | V_OPIVV,
309    OPC_VMINU_VI = 0x10000057 | V_OPIVI,
310
311    OPC_VMSEQ_VV = 0x60000057 | V_OPIVV,
312    OPC_VMSEQ_VI = 0x60000057 | V_OPIVI,
313    OPC_VMSEQ_VX = 0x60000057 | V_OPIVX,
314    OPC_VMSNE_VV = 0x64000057 | V_OPIVV,
315    OPC_VMSNE_VI = 0x64000057 | V_OPIVI,
316    OPC_VMSNE_VX = 0x64000057 | V_OPIVX,
317
318    OPC_VMSLTU_VV = 0x68000057 | V_OPIVV,
319    OPC_VMSLTU_VX = 0x68000057 | V_OPIVX,
320    OPC_VMSLT_VV = 0x6c000057 | V_OPIVV,
321    OPC_VMSLT_VX = 0x6c000057 | V_OPIVX,
322    OPC_VMSLEU_VV = 0x70000057 | V_OPIVV,
323    OPC_VMSLEU_VX = 0x70000057 | V_OPIVX,
324    OPC_VMSLE_VV = 0x74000057 | V_OPIVV,
325    OPC_VMSLE_VX = 0x74000057 | V_OPIVX,
326
327    OPC_VMSLEU_VI = 0x70000057 | V_OPIVI,
328    OPC_VMSLE_VI = 0x74000057 | V_OPIVI,
329    OPC_VMSGTU_VI = 0x78000057 | V_OPIVI,
330    OPC_VMSGTU_VX = 0x78000057 | V_OPIVX,
331    OPC_VMSGT_VI = 0x7c000057 | V_OPIVI,
332    OPC_VMSGT_VX = 0x7c000057 | V_OPIVX,
333
334    OPC_VSLL_VV = 0x94000057 | V_OPIVV,
335    OPC_VSLL_VI = 0x94000057 | V_OPIVI,
336    OPC_VSLL_VX = 0x94000057 | V_OPIVX,
337    OPC_VSRL_VV = 0xa0000057 | V_OPIVV,
338    OPC_VSRL_VI = 0xa0000057 | V_OPIVI,
339    OPC_VSRL_VX = 0xa0000057 | V_OPIVX,
340    OPC_VSRA_VV = 0xa4000057 | V_OPIVV,
341    OPC_VSRA_VI = 0xa4000057 | V_OPIVI,
342    OPC_VSRA_VX = 0xa4000057 | V_OPIVX,
343
344    OPC_VMV_V_V = 0x5e000057 | V_OPIVV,
345    OPC_VMV_V_I = 0x5e000057 | V_OPIVI,
346    OPC_VMV_V_X = 0x5e000057 | V_OPIVX,
347
348    OPC_VMVNR_V = 0x9e000057 | V_OPIVI,
349} RISCVInsn;
350
351static const struct {
352    RISCVInsn op;
353    bool swap;
354} tcg_cmpcond_to_rvv_vv[] = {
355    [TCG_COND_EQ] =  { OPC_VMSEQ_VV,  false },
356    [TCG_COND_NE] =  { OPC_VMSNE_VV,  false },
357    [TCG_COND_LT] =  { OPC_VMSLT_VV,  false },
358    [TCG_COND_GE] =  { OPC_VMSLE_VV,  true  },
359    [TCG_COND_GT] =  { OPC_VMSLT_VV,  true  },
360    [TCG_COND_LE] =  { OPC_VMSLE_VV,  false },
361    [TCG_COND_LTU] = { OPC_VMSLTU_VV, false },
362    [TCG_COND_GEU] = { OPC_VMSLEU_VV, true  },
363    [TCG_COND_GTU] = { OPC_VMSLTU_VV, true  },
364    [TCG_COND_LEU] = { OPC_VMSLEU_VV, false }
365};
366
367static const struct {
368    RISCVInsn op;
369    int min;
370    int max;
371    bool adjust;
372}  tcg_cmpcond_to_rvv_vi[] = {
373    [TCG_COND_EQ]  = { OPC_VMSEQ_VI,  -16, 15, false },
374    [TCG_COND_NE]  = { OPC_VMSNE_VI,  -16, 15, false },
375    [TCG_COND_GT]  = { OPC_VMSGT_VI,  -16, 15, false },
376    [TCG_COND_LE]  = { OPC_VMSLE_VI,  -16, 15, false },
377    [TCG_COND_LT]  = { OPC_VMSLE_VI,  -15, 16, true  },
378    [TCG_COND_GE]  = { OPC_VMSGT_VI,  -15, 16, true  },
379    [TCG_COND_LEU] = { OPC_VMSLEU_VI,   0, 15, false },
380    [TCG_COND_GTU] = { OPC_VMSGTU_VI,   0, 15, false },
381    [TCG_COND_LTU] = { OPC_VMSLEU_VI,   1, 16, true  },
382    [TCG_COND_GEU] = { OPC_VMSGTU_VI,   1, 16, true  },
383};
384
385/* test if a constant matches the constraint */
386static bool tcg_target_const_match(int64_t val, int ct,
387                                   TCGType type, TCGCond cond, int vece)
388{
389    if (ct & TCG_CT_CONST) {
390        return 1;
391    }
392    if (type >= TCG_TYPE_V64) {
393        /* Val is replicated by VECE; extract the highest element. */
394        val >>= (-8 << vece) & 63;
395    }
396    /*
397     * Sign extended from 12 bits: [-0x800, 0x7ff].
398     * Used for most arithmetic, as this is the isa field.
399     */
400    if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
401        return 1;
402    }
403    /*
404     * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
405     * Used for subtraction, where a constant must be handled by ADDI.
406     */
407    if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
408        return 1;
409    }
410    /*
411     * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
412     * Used by addsub2 and movcond, which may need the negative value,
413     * and requires the modified constant to be representable.
414     */
415    if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
416        return 1;
417    }
418    /*
419     * Sign extended from 5 bits: [-0x10, 0x0f].
420     * Used for vector-immediate.
421     */
422    if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) {
423        return 1;
424    }
425    /*
426     * Used for vector compare OPIVI instructions.
427     */
428    if ((ct & TCG_CT_CONST_CMP_VI) &&
429        val >= tcg_cmpcond_to_rvv_vi[cond].min &&
430        val <= tcg_cmpcond_to_rvv_vi[cond].max) {
431        return true;
432     }
433    return 0;
434}
435
436/*
437 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
438 */
439
440/* Type-R */
441
442static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2)
443{
444    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20;
445}
446
447/* Type-I */
448
449static int32_t encode_imm12(uint32_t imm)
450{
451    return (imm & 0xfff) << 20;
452}
453
454static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm)
455{
456    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm);
457}
458
459/* Type-S */
460
461static int32_t encode_simm12(uint32_t imm)
462{
463    int32_t ret = 0;
464
465    ret |= (imm & 0xFE0) << 20;
466    ret |= (imm & 0x1F) << 7;
467
468    return ret;
469}
470
471static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
472{
473    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm);
474}
475
476/* Type-SB */
477
478static int32_t encode_sbimm12(uint32_t imm)
479{
480    int32_t ret = 0;
481
482    ret |= (imm & 0x1000) << 19;
483    ret |= (imm & 0x7e0) << 20;
484    ret |= (imm & 0x1e) << 7;
485    ret |= (imm & 0x800) >> 4;
486
487    return ret;
488}
489
490static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
491{
492    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm);
493}
494
495/* Type-U */
496
497static int32_t encode_uimm20(uint32_t imm)
498{
499    return imm & 0xfffff000;
500}
501
502static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm)
503{
504    return opc | (rd & 0x1f) << 7 | encode_uimm20(imm);
505}
506
507/* Type-UJ */
508
509static int32_t encode_ujimm20(uint32_t imm)
510{
511    int32_t ret = 0;
512
513    ret |= (imm & 0x0007fe) << (21 - 1);
514    ret |= (imm & 0x000800) << (20 - 11);
515    ret |= (imm & 0x0ff000) << (12 - 12);
516    ret |= (imm & 0x100000) << (31 - 20);
517
518    return ret;
519}
520
521static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
522{
523    return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
524}
525
526
527/* Type-OPIVI */
528
529static int32_t encode_vi(RISCVInsn opc, TCGReg rd, int32_t imm,
530                         TCGReg vs2, bool vm)
531{
532    return opc | (rd & 0x1f) << 7 | (imm & 0x1f) << 15 |
533           (vs2 & 0x1f) << 20 | (vm << 25);
534}
535
536/* Type-OPIVV/OPMVV/OPIVX/OPMVX, Vector load and store */
537
538static int32_t encode_v(RISCVInsn opc, TCGReg d, TCGReg s1,
539                        TCGReg s2, bool vm)
540{
541    return opc | (d & 0x1f) << 7 | (s1 & 0x1f) << 15 |
542           (s2 & 0x1f) << 20 | (vm << 25);
543}
544
545/* Vector vtype */
546
547static uint32_t encode_vtype(bool vta, bool vma,
548                            MemOp vsew, RISCVVlmul vlmul)
549{
550    return vma << 7 | vta << 6 | vsew << 3 | vlmul;
551}
552
553static int32_t encode_vset(RISCVInsn opc, TCGReg rd,
554                           TCGArg rs1, uint32_t vtype)
555{
556    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (vtype & 0x7ff) << 20;
557}
558
559static int32_t encode_vseti(RISCVInsn opc, TCGReg rd,
560                            uint32_t uimm, uint32_t vtype)
561{
562    return opc | (rd & 0x1f) << 7 | (uimm & 0x1f) << 15 | (vtype & 0x3ff) << 20;
563}
564
565/*
566 * RISC-V instruction emitters
567 */
568
569static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc,
570                            TCGReg rd, TCGReg rs1, TCGReg rs2)
571{
572    tcg_out32(s, encode_r(opc, rd, rs1, rs2));
573}
574
575static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc,
576                            TCGReg rd, TCGReg rs1, TCGArg imm)
577{
578    tcg_out32(s, encode_i(opc, rd, rs1, imm));
579}
580
581static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc,
582                              TCGReg rs1, TCGReg rs2, uint32_t imm)
583{
584    tcg_out32(s, encode_s(opc, rs1, rs2, imm));
585}
586
587static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc,
588                               TCGReg rs1, TCGReg rs2, uint32_t imm)
589{
590    tcg_out32(s, encode_sb(opc, rs1, rs2, imm));
591}
592
593static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc,
594                              TCGReg rd, uint32_t imm)
595{
596    tcg_out32(s, encode_u(opc, rd, imm));
597}
598
599static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc,
600                             TCGReg rd, uint32_t imm)
601{
602    tcg_out32(s, encode_uj(opc, rd, imm));
603}
604
605static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
606{
607    int i;
608    for (i = 0; i < count; ++i) {
609        p[i] = OPC_NOP;
610    }
611}
612
613/*
614 * Relocations
615 */
616
617static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
618{
619    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
620    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
621
622    tcg_debug_assert((offset & 1) == 0);
623    if (offset == sextreg(offset, 0, 12)) {
624        *src_rw |= encode_sbimm12(offset);
625        return true;
626    }
627
628    return false;
629}
630
631static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
632{
633    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
634    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
635
636    tcg_debug_assert((offset & 1) == 0);
637    if (offset == sextreg(offset, 0, 20)) {
638        *src_rw |= encode_ujimm20(offset);
639        return true;
640    }
641
642    return false;
643}
644
645static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
646{
647    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
648    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
649    int32_t lo = sextreg(offset, 0, 12);
650    int32_t hi = offset - lo;
651
652    if (offset == hi + lo) {
653        src_rw[0] |= encode_uimm20(hi);
654        src_rw[1] |= encode_imm12(lo);
655        return true;
656    }
657
658    return false;
659}
660
661static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
662                        intptr_t value, intptr_t addend)
663{
664    tcg_debug_assert(addend == 0);
665    switch (type) {
666    case R_RISCV_BRANCH:
667        return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
668    case R_RISCV_JAL:
669        return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
670    case R_RISCV_CALL:
671        return reloc_call(code_ptr, (tcg_insn_unit *)value);
672    default:
673        g_assert_not_reached();
674    }
675}
676
677/*
678 * RISC-V vector instruction emitters
679 */
680
681/*
682 * Vector registers uses the same 5 lower bits as GPR registers,
683 * and vm=0 (vm = false) means vector masking ENABLED.
684 * With RVV 1.0, vs2 is the first operand, while rs1/imm is the
685 * second operand.
686 */
687static void tcg_out_opc_vv(TCGContext *s, RISCVInsn opc,
688                           TCGReg vd, TCGReg vs2, TCGReg vs1)
689{
690    tcg_out32(s, encode_v(opc, vd, vs1, vs2, true));
691}
692
693static void tcg_out_opc_vx(TCGContext *s, RISCVInsn opc,
694                           TCGReg vd, TCGReg vs2, TCGReg rs1)
695{
696    tcg_out32(s, encode_v(opc, vd, rs1, vs2, true));
697}
698
699static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc,
700                           TCGReg vd, TCGReg vs2, int32_t imm)
701{
702    tcg_out32(s, encode_vi(opc, vd, imm, vs2, true));
703}
704
705static void tcg_out_opc_vv_vi(TCGContext *s, RISCVInsn o_vv, RISCVInsn o_vi,
706                              TCGReg vd, TCGReg vs2, TCGArg vi1, int c_vi1)
707{
708    if (c_vi1) {
709        tcg_out_opc_vi(s, o_vi, vd, vs2, vi1);
710    } else {
711        tcg_out_opc_vv(s, o_vv, vd, vs2, vi1);
712    }
713}
714
715static void tcg_out_opc_vim_mask(TCGContext *s, RISCVInsn opc, TCGReg vd,
716                                 TCGReg vs2, int32_t imm)
717{
718    tcg_out32(s, encode_vi(opc, vd, imm, vs2, false));
719}
720
721static void tcg_out_opc_vvm_mask(TCGContext *s, RISCVInsn opc, TCGReg vd,
722                                 TCGReg vs2, TCGReg vs1)
723{
724    tcg_out32(s, encode_v(opc, vd, vs1, vs2, false));
725}
726
727typedef struct VsetCache {
728    uint32_t movi_insn;
729    uint32_t vset_insn;
730} VsetCache;
731
732static VsetCache riscv_vset_cache[3][4];
733
734static void set_vtype(TCGContext *s, TCGType type, MemOp vsew)
735{
736    const VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew];
737
738    s->riscv_cur_type = type;
739    s->riscv_cur_vsew = vsew;
740
741    if (p->movi_insn) {
742        tcg_out32(s, p->movi_insn);
743    }
744    tcg_out32(s, p->vset_insn);
745}
746
747static MemOp set_vtype_len(TCGContext *s, TCGType type)
748{
749    if (type != s->riscv_cur_type) {
750        set_vtype(s, type, MO_64);
751    }
752    return s->riscv_cur_vsew;
753}
754
755static void set_vtype_len_sew(TCGContext *s, TCGType type, MemOp vsew)
756{
757    if (type != s->riscv_cur_type || vsew != s->riscv_cur_vsew) {
758        set_vtype(s, type, vsew);
759    }
760}
761
762/*
763 * TCG intrinsics
764 */
765
766static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
767{
768    if (ret == arg) {
769        return true;
770    }
771    switch (type) {
772    case TCG_TYPE_I32:
773    case TCG_TYPE_I64:
774        tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
775        break;
776    case TCG_TYPE_V64:
777    case TCG_TYPE_V128:
778    case TCG_TYPE_V256:
779        {
780            int lmul = type - riscv_lg2_vlenb;
781            int nf = 1 << MAX(lmul, 0);
782            tcg_out_opc_vi(s, OPC_VMVNR_V, ret, arg, nf - 1);
783        }
784        break;
785    default:
786        g_assert_not_reached();
787    }
788    return true;
789}
790
791static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
792                         tcg_target_long val)
793{
794    tcg_target_long lo, hi, tmp;
795    int shift, ret;
796
797    if (type == TCG_TYPE_I32) {
798        val = (int32_t)val;
799    }
800
801    lo = sextreg(val, 0, 12);
802    if (val == lo) {
803        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo);
804        return;
805    }
806
807    hi = val - lo;
808    if (val == (int32_t)val) {
809        tcg_out_opc_upper(s, OPC_LUI, rd, hi);
810        if (lo != 0) {
811            tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
812        }
813        return;
814    }
815
816    tmp = tcg_pcrel_diff(s, (void *)val);
817    if (tmp == (int32_t)tmp) {
818        tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
819        tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
820        ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
821        tcg_debug_assert(ret == true);
822        return;
823    }
824
825    /* Look for a single 20-bit section.  */
826    shift = ctz64(val);
827    tmp = val >> shift;
828    if (tmp == sextreg(tmp, 0, 20)) {
829        tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12);
830        if (shift > 12) {
831            tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12);
832        } else {
833            tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift);
834        }
835        return;
836    }
837
838    /* Look for a few high zero bits, with lots of bits set in the middle.  */
839    shift = clz64(val);
840    tmp = val << shift;
841    if (tmp == sextreg(tmp, 12, 20) << 12) {
842        tcg_out_opc_upper(s, OPC_LUI, rd, tmp);
843        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
844        return;
845    } else if (tmp == sextreg(tmp, 0, 12)) {
846        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp);
847        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
848        return;
849    }
850
851    /* Drop into the constant pool.  */
852    new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0);
853    tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
854    tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
855}
856
857static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
858{
859    return false;
860}
861
862static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
863                             tcg_target_long imm)
864{
865    /* This function is only used for passing structs by reference. */
866    g_assert_not_reached();
867}
868
869static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
870{
871    tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff);
872}
873
874static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
875{
876    if (cpuinfo & CPUINFO_ZBB) {
877        tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO);
878    } else {
879        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
880        tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
881    }
882}
883
884static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
885{
886    if (cpuinfo & CPUINFO_ZBA) {
887        tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO);
888    } else {
889        tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
890        tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
891    }
892}
893
894static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
895{
896    if (cpuinfo & CPUINFO_ZBB) {
897        tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0);
898    } else {
899        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
900        tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
901    }
902}
903
904static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
905{
906    if (cpuinfo & CPUINFO_ZBB) {
907        tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0);
908    } else {
909        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
910        tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
911    }
912}
913
914static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
915{
916    tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
917}
918
919static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
920{
921    if (ret != arg) {
922        tcg_out_ext32s(s, ret, arg);
923    }
924}
925
926static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
927{
928    tcg_out_ext32u(s, ret, arg);
929}
930
931static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
932{
933    tcg_out_ext32s(s, ret, arg);
934}
935
936static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
937                         TCGReg addr, intptr_t offset)
938{
939    intptr_t imm12 = sextreg(offset, 0, 12);
940
941    if (offset != imm12) {
942        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
943
944        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
945            imm12 = sextreg(diff, 0, 12);
946            tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12);
947        } else {
948            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
949            if (addr != TCG_REG_ZERO) {
950                tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr);
951            }
952        }
953        addr = TCG_REG_TMP2;
954    }
955
956    switch (opc) {
957    case OPC_SB:
958    case OPC_SH:
959    case OPC_SW:
960    case OPC_SD:
961        tcg_out_opc_store(s, opc, addr, data, imm12);
962        break;
963    case OPC_LB:
964    case OPC_LBU:
965    case OPC_LH:
966    case OPC_LHU:
967    case OPC_LW:
968    case OPC_LWU:
969    case OPC_LD:
970        tcg_out_opc_imm(s, opc, data, addr, imm12);
971        break;
972    default:
973        g_assert_not_reached();
974    }
975}
976
977static void tcg_out_vec_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
978                             TCGReg addr, intptr_t offset)
979{
980    tcg_debug_assert(data >= TCG_REG_V0);
981    tcg_debug_assert(addr < TCG_REG_V0);
982
983    if (offset) {
984        tcg_debug_assert(addr != TCG_REG_ZERO);
985        if (offset == sextreg(offset, 0, 12)) {
986            tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, addr, offset);
987        } else {
988            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
989            tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, addr);
990        }
991        addr = TCG_REG_TMP0;
992    }
993    tcg_out32(s, encode_v(opc, data, addr, 0, true));
994}
995
996static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
997                       TCGReg arg1, intptr_t arg2)
998{
999    RISCVInsn insn;
1000
1001    switch (type) {
1002    case TCG_TYPE_I32:
1003        tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
1004        break;
1005    case TCG_TYPE_I64:
1006        tcg_out_ldst(s, OPC_LD, arg, arg1, arg2);
1007        break;
1008    case TCG_TYPE_V64:
1009    case TCG_TYPE_V128:
1010    case TCG_TYPE_V256:
1011        if (type >= riscv_lg2_vlenb) {
1012            static const RISCVInsn whole_reg_ld[] = {
1013                OPC_VL1RE64_V, OPC_VL2RE64_V, OPC_VL4RE64_V, OPC_VL8RE64_V
1014            };
1015            unsigned idx = type - riscv_lg2_vlenb;
1016
1017            tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_ld));
1018            insn = whole_reg_ld[idx];
1019        } else {
1020            static const RISCVInsn unit_stride_ld[] = {
1021                OPC_VLE8_V, OPC_VLE16_V, OPC_VLE32_V, OPC_VLE64_V
1022            };
1023            MemOp prev_vsew = set_vtype_len(s, type);
1024
1025            tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_ld));
1026            insn = unit_stride_ld[prev_vsew];
1027        }
1028        tcg_out_vec_ldst(s, insn, arg, arg1, arg2);
1029        break;
1030    default:
1031        g_assert_not_reached();
1032    }
1033}
1034
1035static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1036                       TCGReg arg1, intptr_t arg2)
1037{
1038    RISCVInsn insn;
1039
1040    switch (type) {
1041    case TCG_TYPE_I32:
1042        tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
1043        break;
1044    case TCG_TYPE_I64:
1045        tcg_out_ldst(s, OPC_SD, arg, arg1, arg2);
1046        break;
1047    case TCG_TYPE_V64:
1048    case TCG_TYPE_V128:
1049    case TCG_TYPE_V256:
1050        if (type >= riscv_lg2_vlenb) {
1051            static const RISCVInsn whole_reg_st[] = {
1052                OPC_VS1R_V, OPC_VS2R_V, OPC_VS4R_V, OPC_VS8R_V
1053            };
1054            unsigned idx = type - riscv_lg2_vlenb;
1055
1056            tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_st));
1057            insn = whole_reg_st[idx];
1058        } else {
1059            static const RISCVInsn unit_stride_st[] = {
1060                OPC_VSE8_V, OPC_VSE16_V, OPC_VSE32_V, OPC_VSE64_V
1061            };
1062            MemOp prev_vsew = set_vtype_len(s, type);
1063
1064            tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_st));
1065            insn = unit_stride_st[prev_vsew];
1066        }
1067        tcg_out_vec_ldst(s, insn, arg, arg1, arg2);
1068        break;
1069    default:
1070        g_assert_not_reached();
1071    }
1072}
1073
1074static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1075                        TCGReg base, intptr_t ofs)
1076{
1077    if (val == 0) {
1078        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
1079        return true;
1080    }
1081    return false;
1082}
1083
1084static void tcg_out_addsub2(TCGContext *s,
1085                            TCGReg rl, TCGReg rh,
1086                            TCGReg al, TCGReg ah,
1087                            TCGArg bl, TCGArg bh,
1088                            bool cbl, bool cbh, bool is_sub, bool is32bit)
1089{
1090    const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
1091    const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
1092    const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
1093    TCGReg th = TCG_REG_TMP1;
1094
1095    /* If we have a negative constant such that negating it would
1096       make the high part zero, we can (usually) eliminate one insn.  */
1097    if (cbl && cbh && bh == -1 && bl != 0) {
1098        bl = -bl;
1099        bh = 0;
1100        is_sub = !is_sub;
1101    }
1102
1103    /* By operating on the high part first, we get to use the final
1104       carry operation to move back from the temporary.  */
1105    if (!cbh) {
1106        tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
1107    } else if (bh != 0 || ah == rl) {
1108        tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
1109    } else {
1110        th = ah;
1111    }
1112
1113    /* Note that tcg optimization should eliminate the bl == 0 case.  */
1114    if (is_sub) {
1115        if (cbl) {
1116            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
1117            tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
1118        } else {
1119            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
1120            tcg_out_opc_reg(s, opc_sub, rl, al, bl);
1121        }
1122        tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
1123    } else {
1124        if (cbl) {
1125            tcg_out_opc_imm(s, opc_addi, rl, al, bl);
1126            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
1127        } else if (al == bl) {
1128            /*
1129             * If the input regs overlap, this is a simple doubling
1130             * and carry-out is the input msb.  This special case is
1131             * required when the output reg overlaps the input,
1132             * but we might as well use it always.
1133             */
1134            tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
1135            tcg_out_opc_reg(s, opc_add, rl, al, al);
1136        } else {
1137            tcg_out_opc_reg(s, opc_add, rl, al, bl);
1138            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
1139                            rl, (rl == bl ? al : bl));
1140        }
1141        tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
1142    }
1143}
1144
1145static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1146                                   TCGReg dst, TCGReg src)
1147{
1148    set_vtype_len_sew(s, type, vece);
1149    tcg_out_opc_vx(s, OPC_VMV_V_X, dst, 0, src);
1150    return true;
1151}
1152
1153static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1154                                    TCGReg dst, TCGReg base, intptr_t offset)
1155{
1156    tcg_out_ld(s, TCG_TYPE_REG, TCG_REG_TMP0, base, offset);
1157    return tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0);
1158}
1159
1160static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1161                                    TCGReg dst, int64_t arg)
1162{
1163    /* Arg is replicated by VECE; extract the highest element. */
1164    arg >>= (-8 << vece) & 63;
1165
1166    if (arg >= -16 && arg < 16) {
1167        if (arg == 0 || arg == -1) {
1168            set_vtype_len(s, type);
1169        } else {
1170            set_vtype_len_sew(s, type, vece);
1171        }
1172        tcg_out_opc_vi(s, OPC_VMV_V_I, dst, 0, arg);
1173        return;
1174    }
1175    tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, arg);
1176    tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0);
1177}
1178
1179static const struct {
1180    RISCVInsn op;
1181    bool swap;
1182} tcg_brcond_to_riscv[] = {
1183    [TCG_COND_EQ] =  { OPC_BEQ,  false },
1184    [TCG_COND_NE] =  { OPC_BNE,  false },
1185    [TCG_COND_LT] =  { OPC_BLT,  false },
1186    [TCG_COND_GE] =  { OPC_BGE,  false },
1187    [TCG_COND_LE] =  { OPC_BGE,  true  },
1188    [TCG_COND_GT] =  { OPC_BLT,  true  },
1189    [TCG_COND_LTU] = { OPC_BLTU, false },
1190    [TCG_COND_GEU] = { OPC_BGEU, false },
1191    [TCG_COND_LEU] = { OPC_BGEU, true  },
1192    [TCG_COND_GTU] = { OPC_BLTU, true  }
1193};
1194
1195static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
1196                           TCGReg arg2, TCGLabel *l)
1197{
1198    RISCVInsn op = tcg_brcond_to_riscv[cond].op;
1199
1200    tcg_debug_assert(op != 0);
1201
1202    if (tcg_brcond_to_riscv[cond].swap) {
1203        TCGReg t = arg1;
1204        arg1 = arg2;
1205        arg2 = t;
1206    }
1207
1208    tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
1209    tcg_out_opc_branch(s, op, arg1, arg2, 0);
1210}
1211
1212#define SETCOND_INV    TCG_TARGET_NB_REGS
1213#define SETCOND_NEZ    (SETCOND_INV << 1)
1214#define SETCOND_FLAGS  (SETCOND_INV | SETCOND_NEZ)
1215
1216static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
1217                               TCGReg arg1, tcg_target_long arg2, bool c2)
1218{
1219    int flags = 0;
1220
1221    switch (cond) {
1222    case TCG_COND_EQ:    /* -> NE  */
1223    case TCG_COND_GE:    /* -> LT  */
1224    case TCG_COND_GEU:   /* -> LTU */
1225    case TCG_COND_GT:    /* -> LE  */
1226    case TCG_COND_GTU:   /* -> LEU */
1227        cond = tcg_invert_cond(cond);
1228        flags ^= SETCOND_INV;
1229        break;
1230    default:
1231        break;
1232    }
1233
1234    switch (cond) {
1235    case TCG_COND_LE:
1236    case TCG_COND_LEU:
1237        /*
1238         * If we have a constant input, the most efficient way to implement
1239         * LE is by adding 1 and using LT.  Watch out for wrap around for LEU.
1240         * We don't need to care for this for LE because the constant input
1241         * is constrained to signed 12-bit, and 0x800 is representable in the
1242         * temporary register.
1243         */
1244        if (c2) {
1245            if (cond == TCG_COND_LEU) {
1246                /* unsigned <= -1 is true */
1247                if (arg2 == -1) {
1248                    tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
1249                    return ret;
1250                }
1251                cond = TCG_COND_LTU;
1252            } else {
1253                cond = TCG_COND_LT;
1254            }
1255            tcg_debug_assert(arg2 <= 0x7ff);
1256            if (++arg2 == 0x800) {
1257                tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
1258                arg2 = TCG_REG_TMP0;
1259                c2 = false;
1260            }
1261        } else {
1262            TCGReg tmp = arg2;
1263            arg2 = arg1;
1264            arg1 = tmp;
1265            cond = tcg_swap_cond(cond);    /* LE -> GE */
1266            cond = tcg_invert_cond(cond);  /* GE -> LT */
1267            flags ^= SETCOND_INV;
1268        }
1269        break;
1270    default:
1271        break;
1272    }
1273
1274    switch (cond) {
1275    case TCG_COND_NE:
1276        flags |= SETCOND_NEZ;
1277        if (!c2) {
1278            tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
1279        } else if (arg2 == 0) {
1280            ret = arg1;
1281        } else {
1282            tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2);
1283        }
1284        break;
1285
1286    case TCG_COND_LT:
1287        if (c2) {
1288            tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2);
1289        } else {
1290            tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
1291        }
1292        break;
1293
1294    case TCG_COND_LTU:
1295        if (c2) {
1296            tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2);
1297        } else {
1298            tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
1299        }
1300        break;
1301
1302    default:
1303        g_assert_not_reached();
1304    }
1305
1306    return ret | flags;
1307}
1308
1309static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
1310                            TCGReg arg1, tcg_target_long arg2, bool c2)
1311{
1312    int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
1313
1314    if (tmpflags != ret) {
1315        TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
1316
1317        switch (tmpflags & SETCOND_FLAGS) {
1318        case SETCOND_INV:
1319            /* Intermediate result is boolean: simply invert. */
1320            tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1);
1321            break;
1322        case SETCOND_NEZ:
1323            /* Intermediate result is zero/non-zero: test != 0. */
1324            tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
1325            break;
1326        case SETCOND_NEZ | SETCOND_INV:
1327            /* Intermediate result is zero/non-zero: test == 0. */
1328            tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1);
1329            break;
1330        default:
1331            g_assert_not_reached();
1332        }
1333    }
1334}
1335
1336static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
1337                               TCGReg arg1, tcg_target_long arg2, bool c2)
1338{
1339    int tmpflags;
1340    TCGReg tmp;
1341
1342    /* For LT/GE comparison against 0, replicate the sign bit. */
1343    if (c2 && arg2 == 0) {
1344        switch (cond) {
1345        case TCG_COND_GE:
1346            tcg_out_opc_imm(s, OPC_XORI, ret, arg1, -1);
1347            arg1 = ret;
1348            /* fall through */
1349        case TCG_COND_LT:
1350            tcg_out_opc_imm(s, OPC_SRAI, ret, arg1, TCG_TARGET_REG_BITS - 1);
1351            return;
1352        default:
1353            break;
1354        }
1355    }
1356
1357    tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
1358    tmp = tmpflags & ~SETCOND_FLAGS;
1359
1360    /* If intermediate result is zero/non-zero: test != 0. */
1361    if (tmpflags & SETCOND_NEZ) {
1362        tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
1363        tmp = ret;
1364    }
1365
1366    /* Produce the 0/-1 result. */
1367    if (tmpflags & SETCOND_INV) {
1368        tcg_out_opc_imm(s, OPC_ADDI, ret, tmp, -1);
1369    } else {
1370        tcg_out_opc_reg(s, OPC_SUB, ret, TCG_REG_ZERO, tmp);
1371    }
1372}
1373
1374static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
1375                                   int val1, bool c_val1,
1376                                   int val2, bool c_val2)
1377{
1378    if (val1 == 0) {
1379        if (c_val2) {
1380            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2);
1381            val2 = TCG_REG_TMP1;
1382        }
1383        tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne);
1384        return;
1385    }
1386
1387    if (val2 == 0) {
1388        if (c_val1) {
1389            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1);
1390            val1 = TCG_REG_TMP1;
1391        }
1392        tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne);
1393        return;
1394    }
1395
1396    if (c_val2) {
1397        if (c_val1) {
1398            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2);
1399        } else {
1400            tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2);
1401        }
1402        tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne);
1403        tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2);
1404        return;
1405    }
1406
1407    if (c_val1) {
1408        tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1);
1409        tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne);
1410        tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1);
1411        return;
1412    }
1413
1414    tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne);
1415    tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne);
1416    tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1);
1417}
1418
1419static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret,
1420                                TCGReg cmp1, TCGReg cmp2,
1421                                int val, bool c_val)
1422{
1423    RISCVInsn op;
1424    int disp = 8;
1425
1426    tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv));
1427    op = tcg_brcond_to_riscv[cond].op;
1428    tcg_debug_assert(op != 0);
1429
1430    if (tcg_brcond_to_riscv[cond].swap) {
1431        tcg_out_opc_branch(s, op, cmp2, cmp1, disp);
1432    } else {
1433        tcg_out_opc_branch(s, op, cmp1, cmp2, disp);
1434    }
1435    if (c_val) {
1436        tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val);
1437    } else {
1438        tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0);
1439    }
1440}
1441
1442static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
1443                                TCGReg cmp1, TCGReg cmp2,
1444                                int val1, bool c_val1,
1445                                int val2, bool c_val2)
1446{
1447    TCGReg tmp;
1448
1449    /* TCG optimizer reorders to prefer ret matching val2. */
1450    if (!c_val2 && ret == val2) {
1451        cond = tcg_invert_cond(cond);
1452        tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1);
1453        return;
1454    }
1455
1456    if (!c_val1 && ret == val1) {
1457        tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2);
1458        return;
1459    }
1460
1461    tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret);
1462    if (c_val1) {
1463        tcg_out_movi(s, TCG_TYPE_REG, tmp, val1);
1464    } else {
1465        tcg_out_mov(s, TCG_TYPE_REG, tmp, val1);
1466    }
1467    tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2);
1468    tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
1469}
1470
1471static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
1472                            TCGReg cmp1, int cmp2, bool c_cmp2,
1473                            TCGReg val1, bool c_val1,
1474                            TCGReg val2, bool c_val2)
1475{
1476    int tmpflags;
1477    TCGReg t;
1478
1479    if (!(cpuinfo & CPUINFO_ZICOND) && (!c_cmp2 || cmp2 == 0)) {
1480        tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2,
1481                            val1, c_val1, val2, c_val2);
1482        return;
1483    }
1484
1485    tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2);
1486    t = tmpflags & ~SETCOND_FLAGS;
1487
1488    if (cpuinfo & CPUINFO_ZICOND) {
1489        if (tmpflags & SETCOND_INV) {
1490            tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1);
1491        } else {
1492            tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2);
1493        }
1494    } else {
1495        cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE;
1496        tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO,
1497                            val1, c_val1, val2, c_val2);
1498    }
1499}
1500
1501static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
1502                         TCGReg ret, TCGReg src1, int src2, bool c_src2)
1503{
1504    tcg_out_opc_imm(s, insn, ret, src1, 0);
1505
1506    if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) {
1507        /*
1508         * The requested zero result does not match the insn, so adjust.
1509         * Note that constraints put 'ret' in a new register, so the
1510         * computation above did not clobber either 'src1' or 'src2'.
1511         */
1512        tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
1513                        src2, c_src2, ret, false);
1514    }
1515}
1516
1517static void tcg_out_cmpsel(TCGContext *s, TCGType type, unsigned vece,
1518                           TCGCond cond, TCGReg ret,
1519                           TCGReg cmp1, TCGReg cmp2, bool c_cmp2,
1520                           TCGReg val1, bool c_val1,
1521                           TCGReg val2, bool c_val2)
1522{
1523    set_vtype_len_sew(s, type, vece);
1524
1525    /* Use only vmerge_vim if possible, by inverting the test. */
1526    if (c_val2 && !c_val1) {
1527        TCGArg temp = val1;
1528        cond = tcg_invert_cond(cond);
1529        val1 = val2;
1530        val2 = temp;
1531        c_val1 = true;
1532        c_val2 = false;
1533    }
1534
1535    /* Perform the comparison into V0 mask. */
1536    if (c_cmp2) {
1537        tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, TCG_REG_V0, cmp1,
1538                       cmp2 - tcg_cmpcond_to_rvv_vi[cond].adjust);
1539    } else if (tcg_cmpcond_to_rvv_vv[cond].swap) {
1540        tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op,
1541                       TCG_REG_V0, cmp2, cmp1);
1542    } else {
1543        tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op,
1544                       TCG_REG_V0, cmp1, cmp2);
1545    }
1546    if (c_val1) {
1547        if (c_val2) {
1548            tcg_out_opc_vi(s, OPC_VMV_V_I, ret, 0, val2);
1549            val2 = ret;
1550        }
1551        /* vd[i] == v0.mask[i] ? imm : vs2[i] */
1552        tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, ret, val2, val1);
1553    } else {
1554        /* vd[i] == v0.mask[i] ? vs1[i] : vs2[i] */
1555        tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, ret, val2, val1);
1556    }
1557}
1558
1559static void tcg_out_vshifti(TCGContext *s, RISCVInsn opc_vi, RISCVInsn opc_vx,
1560                             TCGReg dst, TCGReg src, unsigned imm)
1561{
1562    if (imm < 32) {
1563        tcg_out_opc_vi(s, opc_vi, dst, src, imm);
1564    } else {
1565        tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP0, imm);
1566        tcg_out_opc_vx(s, opc_vx, dst, src, TCG_REG_TMP0);
1567    }
1568}
1569
1570static void init_setting_vtype(TCGContext *s)
1571{
1572    s->riscv_cur_type = TCG_TYPE_COUNT;
1573}
1574
1575static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
1576{
1577    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
1578    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
1579    int ret;
1580
1581    init_setting_vtype(s);
1582
1583    tcg_debug_assert((offset & 1) == 0);
1584    if (offset == sextreg(offset, 0, 20)) {
1585        /* short jump: -2097150 to 2097152 */
1586        tcg_out_opc_jump(s, OPC_JAL, link, offset);
1587    } else if (offset == (int32_t)offset) {
1588        /* long jump: -2147483646 to 2147483648 */
1589        tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
1590        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
1591        ret = reloc_call(s->code_ptr - 2, arg);
1592        tcg_debug_assert(ret == true);
1593    } else {
1594        /* far jump: 64-bit */
1595        tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
1596        tcg_target_long base = (tcg_target_long)arg - imm;
1597        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
1598        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
1599    }
1600}
1601
1602static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
1603                         const TCGHelperInfo *info)
1604{
1605    tcg_out_call_int(s, arg, false);
1606}
1607
1608static void tcg_out_mb(TCGContext *s, TCGArg a0)
1609{
1610    tcg_insn_unit insn = OPC_FENCE;
1611
1612    if (a0 & TCG_MO_LD_LD) {
1613        insn |= 0x02200000;
1614    }
1615    if (a0 & TCG_MO_ST_LD) {
1616        insn |= 0x01200000;
1617    }
1618    if (a0 & TCG_MO_LD_ST) {
1619        insn |= 0x02100000;
1620    }
1621    if (a0 & TCG_MO_ST_ST) {
1622        insn |= 0x01100000;
1623    }
1624    tcg_out32(s, insn);
1625}
1626
1627/*
1628 * Load/store and TLB
1629 */
1630
1631static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1632{
1633    tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1634    bool ok = reloc_jimm20(s->code_ptr - 1, target);
1635    tcg_debug_assert(ok);
1636}
1637
1638bool tcg_target_has_memory_bswap(MemOp memop)
1639{
1640    return false;
1641}
1642
1643/* We have three temps, we might as well expose them. */
1644static const TCGLdstHelperParam ldst_helper_param = {
1645    .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 }
1646};
1647
1648static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1649{
1650    MemOp opc = get_memop(l->oi);
1651
1652    /* resolve label address */
1653    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1654        return false;
1655    }
1656
1657    /* call load helper */
1658    tcg_out_ld_helper_args(s, l, &ldst_helper_param);
1659    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false);
1660    tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param);
1661
1662    tcg_out_goto(s, l->raddr);
1663    return true;
1664}
1665
1666static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1667{
1668    MemOp opc = get_memop(l->oi);
1669
1670    /* resolve label address */
1671    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1672        return false;
1673    }
1674
1675    /* call store helper */
1676    tcg_out_st_helper_args(s, l, &ldst_helper_param);
1677    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
1678
1679    tcg_out_goto(s, l->raddr);
1680    return true;
1681}
1682
1683/* We expect to use a 12-bit negative offset from ENV.  */
1684#define MIN_TLB_MASK_TABLE_OFS  -(1 << 11)
1685
1686/*
1687 * For system-mode, perform the TLB load and compare.
1688 * For user-mode, perform any required alignment tests.
1689 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1690 * is required and fill in @h with the host address for the fast path.
1691 */
1692static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
1693                                           TCGReg addr_reg, MemOpIdx oi,
1694                                           bool is_ld)
1695{
1696    TCGType addr_type = s->addr_type;
1697    TCGLabelQemuLdst *ldst = NULL;
1698    MemOp opc = get_memop(oi);
1699    TCGAtomAlign aa;
1700    unsigned a_mask;
1701
1702    aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1703    a_mask = (1u << aa.align) - 1;
1704
1705    if (tcg_use_softmmu) {
1706        unsigned s_bits = opc & MO_SIZE;
1707        unsigned s_mask = (1u << s_bits) - 1;
1708        int mem_index = get_mmuidx(oi);
1709        int fast_ofs = tlb_mask_table_ofs(s, mem_index);
1710        int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1711        int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1712        int compare_mask;
1713        TCGReg addr_adj;
1714
1715        ldst = new_ldst_label(s);
1716        ldst->is_ld = is_ld;
1717        ldst->oi = oi;
1718        ldst->addr_reg = addr_reg;
1719
1720        init_setting_vtype(s);
1721
1722        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1723        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1724
1725        tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
1726                        s->page_bits - CPU_TLB_ENTRY_BITS);
1727        tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1728        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1729
1730        /*
1731         * For aligned accesses, we check the first byte and include the
1732         * alignment bits within the address.  For unaligned access, we
1733         * check that we don't cross pages using the address of the last
1734         * byte of the access.
1735         */
1736        addr_adj = addr_reg;
1737        if (a_mask < s_mask) {
1738            addr_adj = TCG_REG_TMP0;
1739            tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
1740                            addr_adj, addr_reg, s_mask - a_mask);
1741        }
1742        compare_mask = s->page_mask | a_mask;
1743        if (compare_mask == sextreg(compare_mask, 0, 12)) {
1744            tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
1745        } else {
1746            tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
1747            tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
1748        }
1749
1750        /* Load the tlb comparator and the addend.  */
1751        QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1752        tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1753                   is_ld ? offsetof(CPUTLBEntry, addr_read)
1754                         : offsetof(CPUTLBEntry, addr_write));
1755        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1756                   offsetof(CPUTLBEntry, addend));
1757
1758        /* Compare masked address with the TLB entry. */
1759        ldst->label_ptr[0] = s->code_ptr;
1760        tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1761
1762        /* TLB Hit - translate address using addend.  */
1763        if (addr_type != TCG_TYPE_I32) {
1764            tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
1765        } else if (cpuinfo & CPUINFO_ZBA) {
1766            tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0,
1767                            addr_reg, TCG_REG_TMP2);
1768        } else {
1769            tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
1770            tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0,
1771                            TCG_REG_TMP0, TCG_REG_TMP2);
1772        }
1773        *pbase = TCG_REG_TMP0;
1774    } else {
1775        TCGReg base;
1776
1777        if (a_mask) {
1778            ldst = new_ldst_label(s);
1779            ldst->is_ld = is_ld;
1780            ldst->oi = oi;
1781            ldst->addr_reg = addr_reg;
1782
1783            init_setting_vtype(s);
1784
1785            /* We are expecting alignment max 7, so we can always use andi. */
1786            tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
1787            tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
1788
1789            ldst->label_ptr[0] = s->code_ptr;
1790            tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1791        }
1792
1793        if (guest_base != 0) {
1794            base = TCG_REG_TMP0;
1795            if (addr_type != TCG_TYPE_I32) {
1796                tcg_out_opc_reg(s, OPC_ADD, base, addr_reg,
1797                                TCG_GUEST_BASE_REG);
1798            } else if (cpuinfo & CPUINFO_ZBA) {
1799                tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg,
1800                                TCG_GUEST_BASE_REG);
1801            } else {
1802                tcg_out_ext32u(s, base, addr_reg);
1803                tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
1804            }
1805        } else if (addr_type != TCG_TYPE_I32) {
1806            base = addr_reg;
1807        } else {
1808            base = TCG_REG_TMP0;
1809            tcg_out_ext32u(s, base, addr_reg);
1810        }
1811        *pbase = base;
1812    }
1813
1814    return ldst;
1815}
1816
1817static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
1818                                   TCGReg base, MemOp opc, TCGType type)
1819{
1820    /* Byte swapping is left to middle-end expansion. */
1821    tcg_debug_assert((opc & MO_BSWAP) == 0);
1822
1823    switch (opc & (MO_SSIZE)) {
1824    case MO_UB:
1825        tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
1826        break;
1827    case MO_SB:
1828        tcg_out_opc_imm(s, OPC_LB, val, base, 0);
1829        break;
1830    case MO_UW:
1831        tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
1832        break;
1833    case MO_SW:
1834        tcg_out_opc_imm(s, OPC_LH, val, base, 0);
1835        break;
1836    case MO_UL:
1837        if (type == TCG_TYPE_I64) {
1838            tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
1839            break;
1840        }
1841        /* FALLTHRU */
1842    case MO_SL:
1843        tcg_out_opc_imm(s, OPC_LW, val, base, 0);
1844        break;
1845    case MO_UQ:
1846        tcg_out_opc_imm(s, OPC_LD, val, base, 0);
1847        break;
1848    default:
1849        g_assert_not_reached();
1850    }
1851}
1852
1853static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1854                            MemOpIdx oi, TCGType data_type)
1855{
1856    TCGLabelQemuLdst *ldst;
1857    TCGReg base;
1858
1859    ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
1860    tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
1861
1862    if (ldst) {
1863        ldst->type = data_type;
1864        ldst->datalo_reg = data_reg;
1865        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1866    }
1867}
1868
1869static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
1870                                   TCGReg base, MemOp opc)
1871{
1872    /* Byte swapping is left to middle-end expansion. */
1873    tcg_debug_assert((opc & MO_BSWAP) == 0);
1874
1875    switch (opc & (MO_SSIZE)) {
1876    case MO_8:
1877        tcg_out_opc_store(s, OPC_SB, base, val, 0);
1878        break;
1879    case MO_16:
1880        tcg_out_opc_store(s, OPC_SH, base, val, 0);
1881        break;
1882    case MO_32:
1883        tcg_out_opc_store(s, OPC_SW, base, val, 0);
1884        break;
1885    case MO_64:
1886        tcg_out_opc_store(s, OPC_SD, base, val, 0);
1887        break;
1888    default:
1889        g_assert_not_reached();
1890    }
1891}
1892
1893static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1894                            MemOpIdx oi, TCGType data_type)
1895{
1896    TCGLabelQemuLdst *ldst;
1897    TCGReg base;
1898
1899    ldst = prepare_host_addr(s, &base, addr_reg, oi, false);
1900    tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
1901
1902    if (ldst) {
1903        ldst->type = data_type;
1904        ldst->datalo_reg = data_reg;
1905        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1906    }
1907}
1908
1909static const tcg_insn_unit *tb_ret_addr;
1910
1911static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1912{
1913    /* Reuse the zeroing that exists for goto_ptr.  */
1914    if (a0 == 0) {
1915        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1916    } else {
1917        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1918        tcg_out_call_int(s, tb_ret_addr, true);
1919    }
1920}
1921
1922static void tcg_out_goto_tb(TCGContext *s, int which)
1923{
1924    /* Direct branch will be patched by tb_target_set_jmp_target. */
1925    set_jmp_insn_offset(s, which);
1926    tcg_out32(s, OPC_JAL);
1927
1928    /* When branch is out of range, fall through to indirect. */
1929    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
1930               get_jmp_target_addr(s, which));
1931    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1932    set_jmp_reset_offset(s, which);
1933}
1934
1935void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1936                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1937{
1938    uintptr_t addr = tb->jmp_target_addr[n];
1939    ptrdiff_t offset = addr - jmp_rx;
1940    tcg_insn_unit insn;
1941
1942    /* Either directly branch, or fall through to indirect branch. */
1943    if (offset == sextreg(offset, 0, 20)) {
1944        insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
1945    } else {
1946        insn = OPC_NOP;
1947    }
1948    qatomic_set((uint32_t *)jmp_rw, insn);
1949    flush_idcache_range(jmp_rx, jmp_rw, 4);
1950}
1951
1952
1953static void tgen_add(TCGContext *s, TCGType type,
1954                     TCGReg a0, TCGReg a1, TCGReg a2)
1955{
1956    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDW : OPC_ADD;
1957    tcg_out_opc_reg(s, insn, a0, a1, a2);
1958}
1959
1960static void tgen_addi(TCGContext *s, TCGType type,
1961                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1962{
1963    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI;
1964    tcg_out_opc_imm(s, insn, a0, a1, a2);
1965}
1966
1967static const TCGOutOpBinary outop_add = {
1968    .base.static_constraint = C_O1_I2(r, r, rI),
1969    .out_rrr = tgen_add,
1970    .out_rri = tgen_addi,
1971};
1972
1973static void tgen_and(TCGContext *s, TCGType type,
1974                     TCGReg a0, TCGReg a1, TCGReg a2)
1975{
1976    tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
1977}
1978
1979static void tgen_andi(TCGContext *s, TCGType type,
1980                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1981{
1982    tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
1983}
1984
1985static const TCGOutOpBinary outop_and = {
1986    .base.static_constraint = C_O1_I2(r, r, rI),
1987    .out_rrr = tgen_and,
1988    .out_rri = tgen_andi,
1989};
1990
1991static void tgen_andc(TCGContext *s, TCGType type,
1992                      TCGReg a0, TCGReg a1, TCGReg a2)
1993{
1994    tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
1995}
1996
1997static TCGConstraintSetIndex cset_zbb_rrr(TCGType type, unsigned flags)
1998{
1999    return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, r) : C_NotImplemented;
2000}
2001
2002static const TCGOutOpBinary outop_andc = {
2003    .base.static_constraint = C_Dynamic,
2004    .base.dynamic_constraint = cset_zbb_rrr,
2005    .out_rrr = tgen_andc,
2006};
2007
2008static void tgen_eqv(TCGContext *s, TCGType type,
2009                     TCGReg a0, TCGReg a1, TCGReg a2)
2010{
2011    tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
2012}
2013
2014static const TCGOutOpBinary outop_eqv = {
2015    .base.static_constraint = C_Dynamic,
2016    .base.dynamic_constraint = cset_zbb_rrr,
2017    .out_rrr = tgen_eqv,
2018};
2019
2020static void tgen_or(TCGContext *s, TCGType type,
2021                    TCGReg a0, TCGReg a1, TCGReg a2)
2022{
2023    tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
2024}
2025
2026static void tgen_ori(TCGContext *s, TCGType type,
2027                     TCGReg a0, TCGReg a1, tcg_target_long a2)
2028{
2029    tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
2030}
2031
2032static const TCGOutOpBinary outop_or = {
2033    .base.static_constraint = C_O1_I2(r, r, rI),
2034    .out_rrr = tgen_or,
2035    .out_rri = tgen_ori,
2036};
2037
2038static void tgen_orc(TCGContext *s, TCGType type,
2039                     TCGReg a0, TCGReg a1, TCGReg a2)
2040{
2041    tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
2042}
2043
2044static const TCGOutOpBinary outop_orc = {
2045    .base.static_constraint = C_Dynamic,
2046    .base.dynamic_constraint = cset_zbb_rrr,
2047    .out_rrr = tgen_orc,
2048};
2049
2050static void tgen_xor(TCGContext *s, TCGType type,
2051                     TCGReg a0, TCGReg a1, TCGReg a2)
2052{
2053    tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
2054}
2055
2056static void tgen_xori(TCGContext *s, TCGType type,
2057                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2058{
2059    tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
2060}
2061
2062static const TCGOutOpBinary outop_xor = {
2063    .base.static_constraint = C_O1_I2(r, r, rI),
2064    .out_rrr = tgen_xor,
2065    .out_rri = tgen_xori,
2066};
2067
2068
2069static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
2070                       const TCGArg args[TCG_MAX_OP_ARGS],
2071                       const int const_args[TCG_MAX_OP_ARGS])
2072{
2073    TCGArg a0 = args[0];
2074    TCGArg a1 = args[1];
2075    TCGArg a2 = args[2];
2076    int c2 = const_args[2];
2077
2078    switch (opc) {
2079    case INDEX_op_goto_ptr:
2080        tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
2081        break;
2082
2083    case INDEX_op_br:
2084        tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
2085        tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
2086        break;
2087
2088    case INDEX_op_ld8u_i32:
2089    case INDEX_op_ld8u_i64:
2090        tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
2091        break;
2092    case INDEX_op_ld8s_i32:
2093    case INDEX_op_ld8s_i64:
2094        tcg_out_ldst(s, OPC_LB, a0, a1, a2);
2095        break;
2096    case INDEX_op_ld16u_i32:
2097    case INDEX_op_ld16u_i64:
2098        tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
2099        break;
2100    case INDEX_op_ld16s_i32:
2101    case INDEX_op_ld16s_i64:
2102        tcg_out_ldst(s, OPC_LH, a0, a1, a2);
2103        break;
2104    case INDEX_op_ld32u_i64:
2105        tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
2106        break;
2107    case INDEX_op_ld_i32:
2108    case INDEX_op_ld32s_i64:
2109        tcg_out_ldst(s, OPC_LW, a0, a1, a2);
2110        break;
2111    case INDEX_op_ld_i64:
2112        tcg_out_ldst(s, OPC_LD, a0, a1, a2);
2113        break;
2114
2115    case INDEX_op_st8_i32:
2116    case INDEX_op_st8_i64:
2117        tcg_out_ldst(s, OPC_SB, a0, a1, a2);
2118        break;
2119    case INDEX_op_st16_i32:
2120    case INDEX_op_st16_i64:
2121        tcg_out_ldst(s, OPC_SH, a0, a1, a2);
2122        break;
2123    case INDEX_op_st_i32:
2124    case INDEX_op_st32_i64:
2125        tcg_out_ldst(s, OPC_SW, a0, a1, a2);
2126        break;
2127    case INDEX_op_st_i64:
2128        tcg_out_ldst(s, OPC_SD, a0, a1, a2);
2129        break;
2130
2131    case INDEX_op_sub_i32:
2132        if (c2) {
2133            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
2134        } else {
2135            tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
2136        }
2137        break;
2138    case INDEX_op_sub_i64:
2139        if (c2) {
2140            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
2141        } else {
2142            tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
2143        }
2144        break;
2145
2146    case INDEX_op_not_i32:
2147    case INDEX_op_not_i64:
2148        tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
2149        break;
2150
2151    case INDEX_op_neg_i32:
2152        tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
2153        break;
2154    case INDEX_op_neg_i64:
2155        tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
2156        break;
2157
2158    case INDEX_op_mul_i32:
2159        tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
2160        break;
2161    case INDEX_op_mul_i64:
2162        tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
2163        break;
2164
2165    case INDEX_op_div_i32:
2166        tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
2167        break;
2168    case INDEX_op_div_i64:
2169        tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
2170        break;
2171
2172    case INDEX_op_divu_i32:
2173        tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
2174        break;
2175    case INDEX_op_divu_i64:
2176        tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
2177        break;
2178
2179    case INDEX_op_rem_i32:
2180        tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
2181        break;
2182    case INDEX_op_rem_i64:
2183        tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
2184        break;
2185
2186    case INDEX_op_remu_i32:
2187        tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
2188        break;
2189    case INDEX_op_remu_i64:
2190        tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
2191        break;
2192
2193    case INDEX_op_shl_i32:
2194        if (c2) {
2195            tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
2196        } else {
2197            tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
2198        }
2199        break;
2200    case INDEX_op_shl_i64:
2201        if (c2) {
2202            tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
2203        } else {
2204            tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
2205        }
2206        break;
2207
2208    case INDEX_op_shr_i32:
2209        if (c2) {
2210            tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
2211        } else {
2212            tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
2213        }
2214        break;
2215    case INDEX_op_shr_i64:
2216        if (c2) {
2217            tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
2218        } else {
2219            tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
2220        }
2221        break;
2222
2223    case INDEX_op_sar_i32:
2224        if (c2) {
2225            tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
2226        } else {
2227            tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
2228        }
2229        break;
2230    case INDEX_op_sar_i64:
2231        if (c2) {
2232            tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
2233        } else {
2234            tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
2235        }
2236        break;
2237
2238    case INDEX_op_rotl_i32:
2239        if (c2) {
2240            tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f);
2241        } else {
2242            tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2);
2243        }
2244        break;
2245    case INDEX_op_rotl_i64:
2246        if (c2) {
2247            tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f);
2248        } else {
2249            tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2);
2250        }
2251        break;
2252
2253    case INDEX_op_rotr_i32:
2254        if (c2) {
2255            tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f);
2256        } else {
2257            tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2);
2258        }
2259        break;
2260    case INDEX_op_rotr_i64:
2261        if (c2) {
2262            tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f);
2263        } else {
2264            tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2);
2265        }
2266        break;
2267
2268    case INDEX_op_bswap64_i64:
2269        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
2270        break;
2271    case INDEX_op_bswap32_i32:
2272        a2 = 0;
2273        /* fall through */
2274    case INDEX_op_bswap32_i64:
2275        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
2276        if (a2 & TCG_BSWAP_OZ) {
2277            tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
2278        } else {
2279            tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
2280        }
2281        break;
2282    case INDEX_op_bswap16_i64:
2283    case INDEX_op_bswap16_i32:
2284        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
2285        if (a2 & TCG_BSWAP_OZ) {
2286            tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
2287        } else {
2288            tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
2289        }
2290        break;
2291
2292    case INDEX_op_ctpop_i32:
2293        tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0);
2294        break;
2295    case INDEX_op_ctpop_i64:
2296        tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
2297        break;
2298
2299    case INDEX_op_clz_i32:
2300        tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2);
2301        break;
2302    case INDEX_op_clz_i64:
2303        tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2);
2304        break;
2305    case INDEX_op_ctz_i32:
2306        tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2);
2307        break;
2308    case INDEX_op_ctz_i64:
2309        tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2);
2310        break;
2311
2312    case INDEX_op_add2_i32:
2313        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2314                        const_args[4], const_args[5], false, true);
2315        break;
2316    case INDEX_op_add2_i64:
2317        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2318                        const_args[4], const_args[5], false, false);
2319        break;
2320    case INDEX_op_sub2_i32:
2321        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2322                        const_args[4], const_args[5], true, true);
2323        break;
2324    case INDEX_op_sub2_i64:
2325        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2326                        const_args[4], const_args[5], true, false);
2327        break;
2328
2329    case INDEX_op_brcond_i32:
2330    case INDEX_op_brcond_i64:
2331        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
2332        break;
2333
2334    case INDEX_op_setcond_i32:
2335    case INDEX_op_setcond_i64:
2336        tcg_out_setcond(s, args[3], a0, a1, a2, c2);
2337        break;
2338
2339    case INDEX_op_negsetcond_i32:
2340    case INDEX_op_negsetcond_i64:
2341        tcg_out_negsetcond(s, args[3], a0, a1, a2, c2);
2342        break;
2343
2344    case INDEX_op_movcond_i32:
2345    case INDEX_op_movcond_i64:
2346        tcg_out_movcond(s, args[5], a0, a1, a2, c2,
2347                        args[3], const_args[3], args[4], const_args[4]);
2348        break;
2349
2350    case INDEX_op_qemu_ld_i32:
2351        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
2352        break;
2353    case INDEX_op_qemu_ld_i64:
2354        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
2355        break;
2356    case INDEX_op_qemu_st_i32:
2357        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
2358        break;
2359    case INDEX_op_qemu_st_i64:
2360        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
2361        break;
2362
2363    case INDEX_op_extrh_i64_i32:
2364        tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
2365        break;
2366
2367    case INDEX_op_mulsh_i32:
2368    case INDEX_op_mulsh_i64:
2369        tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
2370        break;
2371
2372    case INDEX_op_muluh_i32:
2373    case INDEX_op_muluh_i64:
2374        tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
2375        break;
2376
2377    case INDEX_op_mb:
2378        tcg_out_mb(s, a0);
2379        break;
2380
2381    case INDEX_op_extract_i64:
2382        if (a2 + args[3] == 32) {
2383            if (a2 == 0) {
2384                tcg_out_ext32u(s, a0, a1);
2385            } else {
2386                tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2);
2387            }
2388            break;
2389        }
2390        /* FALLTHRU */
2391    case INDEX_op_extract_i32:
2392        switch (args[3]) {
2393        case 1:
2394            tcg_out_opc_imm(s, OPC_BEXTI, a0, a1, a2);
2395            break;
2396        case 16:
2397            tcg_debug_assert(a2 == 0);
2398            tcg_out_ext16u(s, a0, a1);
2399            break;
2400        default:
2401            g_assert_not_reached();
2402        }
2403        break;
2404
2405    case INDEX_op_sextract_i64:
2406        if (a2 + args[3] == 32) {
2407            if (a2 == 0) {
2408                tcg_out_ext32s(s, a0, a1);
2409            } else {
2410                tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2);
2411            }
2412            break;
2413        }
2414        /* FALLTHRU */
2415    case INDEX_op_sextract_i32:
2416        if (a2 == 0 && args[3] == 8) {
2417            tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
2418        } else if (a2 == 0 && args[3] == 16) {
2419            tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
2420        } else {
2421            g_assert_not_reached();
2422        }
2423        break;
2424
2425    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2426    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2427    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2428    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
2429    case INDEX_op_extu_i32_i64:
2430    case INDEX_op_extrl_i64_i32:
2431    default:
2432        g_assert_not_reached();
2433    }
2434}
2435
2436static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2437                           unsigned vecl, unsigned vece,
2438                           const TCGArg args[TCG_MAX_OP_ARGS],
2439                           const int const_args[TCG_MAX_OP_ARGS])
2440{
2441    TCGType type = vecl + TCG_TYPE_V64;
2442    TCGArg a0, a1, a2;
2443    int c2;
2444
2445    a0 = args[0];
2446    a1 = args[1];
2447    a2 = args[2];
2448    c2 = const_args[2];
2449
2450    switch (opc) {
2451    case INDEX_op_dupm_vec:
2452        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2453        break;
2454    case INDEX_op_ld_vec:
2455        tcg_out_ld(s, type, a0, a1, a2);
2456        break;
2457    case INDEX_op_st_vec:
2458        tcg_out_st(s, type, a0, a1, a2);
2459        break;
2460    case INDEX_op_add_vec:
2461        set_vtype_len_sew(s, type, vece);
2462        tcg_out_opc_vv_vi(s, OPC_VADD_VV, OPC_VADD_VI, a0, a1, a2, c2);
2463        break;
2464    case INDEX_op_sub_vec:
2465        set_vtype_len_sew(s, type, vece);
2466        if (const_args[1]) {
2467            tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a2, a1);
2468        } else {
2469            tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2);
2470        }
2471        break;
2472    case INDEX_op_and_vec:
2473        set_vtype_len(s, type);
2474        tcg_out_opc_vv_vi(s, OPC_VAND_VV, OPC_VAND_VI, a0, a1, a2, c2);
2475        break;
2476    case INDEX_op_or_vec:
2477        set_vtype_len(s, type);
2478        tcg_out_opc_vv_vi(s, OPC_VOR_VV, OPC_VOR_VI, a0, a1, a2, c2);
2479        break;
2480    case INDEX_op_xor_vec:
2481        set_vtype_len(s, type);
2482        tcg_out_opc_vv_vi(s, OPC_VXOR_VV, OPC_VXOR_VI, a0, a1, a2, c2);
2483        break;
2484    case INDEX_op_not_vec:
2485        set_vtype_len(s, type);
2486        tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1);
2487        break;
2488    case INDEX_op_neg_vec:
2489        set_vtype_len_sew(s, type, vece);
2490        tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a1, 0);
2491        break;
2492    case INDEX_op_mul_vec:
2493        set_vtype_len_sew(s, type, vece);
2494        tcg_out_opc_vv(s, OPC_VMUL_VV, a0, a1, a2);
2495        break;
2496    case INDEX_op_ssadd_vec:
2497        set_vtype_len_sew(s, type, vece);
2498        tcg_out_opc_vv_vi(s, OPC_VSADD_VV, OPC_VSADD_VI, a0, a1, a2, c2);
2499        break;
2500    case INDEX_op_sssub_vec:
2501        set_vtype_len_sew(s, type, vece);
2502        tcg_out_opc_vv_vi(s, OPC_VSSUB_VV, OPC_VSSUB_VI, a0, a1, a2, c2);
2503        break;
2504    case INDEX_op_usadd_vec:
2505        set_vtype_len_sew(s, type, vece);
2506        tcg_out_opc_vv_vi(s, OPC_VSADDU_VV, OPC_VSADDU_VI, a0, a1, a2, c2);
2507        break;
2508    case INDEX_op_ussub_vec:
2509        set_vtype_len_sew(s, type, vece);
2510        tcg_out_opc_vv_vi(s, OPC_VSSUBU_VV, OPC_VSSUBU_VI, a0, a1, a2, c2);
2511        break;
2512    case INDEX_op_smax_vec:
2513        set_vtype_len_sew(s, type, vece);
2514        tcg_out_opc_vv_vi(s, OPC_VMAX_VV, OPC_VMAX_VI, a0, a1, a2, c2);
2515        break;
2516    case INDEX_op_smin_vec:
2517        set_vtype_len_sew(s, type, vece);
2518        tcg_out_opc_vv_vi(s, OPC_VMIN_VV, OPC_VMIN_VI, a0, a1, a2, c2);
2519        break;
2520    case INDEX_op_umax_vec:
2521        set_vtype_len_sew(s, type, vece);
2522        tcg_out_opc_vv_vi(s, OPC_VMAXU_VV, OPC_VMAXU_VI, a0, a1, a2, c2);
2523        break;
2524    case INDEX_op_umin_vec:
2525        set_vtype_len_sew(s, type, vece);
2526        tcg_out_opc_vv_vi(s, OPC_VMINU_VV, OPC_VMINU_VI, a0, a1, a2, c2);
2527        break;
2528    case INDEX_op_shls_vec:
2529        set_vtype_len_sew(s, type, vece);
2530        tcg_out_opc_vx(s, OPC_VSLL_VX, a0, a1, a2);
2531        break;
2532    case INDEX_op_shrs_vec:
2533        set_vtype_len_sew(s, type, vece);
2534        tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, a2);
2535        break;
2536    case INDEX_op_sars_vec:
2537        set_vtype_len_sew(s, type, vece);
2538        tcg_out_opc_vx(s, OPC_VSRA_VX, a0, a1, a2);
2539        break;
2540    case INDEX_op_shlv_vec:
2541        set_vtype_len_sew(s, type, vece);
2542        tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2);
2543        break;
2544    case INDEX_op_shrv_vec:
2545        set_vtype_len_sew(s, type, vece);
2546        tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2);
2547        break;
2548    case INDEX_op_sarv_vec:
2549        set_vtype_len_sew(s, type, vece);
2550        tcg_out_opc_vv(s, OPC_VSRA_VV, a0, a1, a2);
2551        break;
2552    case INDEX_op_shli_vec:
2553        set_vtype_len_sew(s, type, vece);
2554        tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, a0, a1, a2);
2555        break;
2556    case INDEX_op_shri_vec:
2557        set_vtype_len_sew(s, type, vece);
2558        tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, a2);
2559        break;
2560    case INDEX_op_sari_vec:
2561        set_vtype_len_sew(s, type, vece);
2562        tcg_out_vshifti(s, OPC_VSRA_VI, OPC_VSRA_VX, a0, a1, a2);
2563        break;
2564    case INDEX_op_rotli_vec:
2565        set_vtype_len_sew(s, type, vece);
2566        tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, TCG_REG_V0, a1, a2);
2567        tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1,
2568                        -a2 & ((8 << vece) - 1));
2569        tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
2570        break;
2571    case INDEX_op_rotls_vec:
2572        set_vtype_len_sew(s, type, vece);
2573        tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1, a2);
2574        tcg_out_opc_reg(s, OPC_SUBW, TCG_REG_TMP0, TCG_REG_ZERO, a2);
2575        tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0);
2576        tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
2577        break;
2578    case INDEX_op_rotlv_vec:
2579        set_vtype_len_sew(s, type, vece);
2580        tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0);
2581        tcg_out_opc_vv(s, OPC_VSRL_VV, TCG_REG_V0, a1, TCG_REG_V0);
2582        tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2);
2583        tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
2584        break;
2585    case INDEX_op_rotrv_vec:
2586        set_vtype_len_sew(s, type, vece);
2587        tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0);
2588        tcg_out_opc_vv(s, OPC_VSLL_VV, TCG_REG_V0, a1, TCG_REG_V0);
2589        tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2);
2590        tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
2591        break;
2592    case INDEX_op_cmp_vec:
2593        tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2,
2594                       -1, true, 0, true);
2595        break;
2596    case INDEX_op_cmpsel_vec:
2597        tcg_out_cmpsel(s, type, vece, args[5], a0, a1, a2, c2,
2598                       args[3], const_args[3], args[4], const_args[4]);
2599        break;
2600    case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov.  */
2601    case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec.  */
2602    default:
2603        g_assert_not_reached();
2604    }
2605}
2606
2607void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2608                       TCGArg a0, ...)
2609{
2610    g_assert_not_reached();
2611}
2612
2613int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2614{
2615    switch (opc) {
2616    case INDEX_op_add_vec:
2617    case INDEX_op_sub_vec:
2618    case INDEX_op_and_vec:
2619    case INDEX_op_or_vec:
2620    case INDEX_op_xor_vec:
2621    case INDEX_op_not_vec:
2622    case INDEX_op_neg_vec:
2623    case INDEX_op_mul_vec:
2624    case INDEX_op_ssadd_vec:
2625    case INDEX_op_sssub_vec:
2626    case INDEX_op_usadd_vec:
2627    case INDEX_op_ussub_vec:
2628    case INDEX_op_smax_vec:
2629    case INDEX_op_smin_vec:
2630    case INDEX_op_umax_vec:
2631    case INDEX_op_umin_vec:
2632    case INDEX_op_shls_vec:
2633    case INDEX_op_shrs_vec:
2634    case INDEX_op_sars_vec:
2635    case INDEX_op_shlv_vec:
2636    case INDEX_op_shrv_vec:
2637    case INDEX_op_sarv_vec:
2638    case INDEX_op_shri_vec:
2639    case INDEX_op_shli_vec:
2640    case INDEX_op_sari_vec:
2641    case INDEX_op_rotls_vec:
2642    case INDEX_op_rotlv_vec:
2643    case INDEX_op_rotrv_vec:
2644    case INDEX_op_rotli_vec:
2645    case INDEX_op_cmp_vec:
2646    case INDEX_op_cmpsel_vec:
2647        return 1;
2648    default:
2649        return 0;
2650    }
2651}
2652
2653static TCGConstraintSetIndex
2654tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
2655{
2656    switch (op) {
2657    case INDEX_op_goto_ptr:
2658        return C_O0_I1(r);
2659
2660    case INDEX_op_ld8u_i32:
2661    case INDEX_op_ld8s_i32:
2662    case INDEX_op_ld16u_i32:
2663    case INDEX_op_ld16s_i32:
2664    case INDEX_op_ld_i32:
2665    case INDEX_op_not_i32:
2666    case INDEX_op_neg_i32:
2667    case INDEX_op_ld8u_i64:
2668    case INDEX_op_ld8s_i64:
2669    case INDEX_op_ld16u_i64:
2670    case INDEX_op_ld16s_i64:
2671    case INDEX_op_ld32s_i64:
2672    case INDEX_op_ld32u_i64:
2673    case INDEX_op_ld_i64:
2674    case INDEX_op_not_i64:
2675    case INDEX_op_neg_i64:
2676    case INDEX_op_extu_i32_i64:
2677    case INDEX_op_extrl_i64_i32:
2678    case INDEX_op_extrh_i64_i32:
2679    case INDEX_op_ext_i32_i64:
2680    case INDEX_op_extract_i32:
2681    case INDEX_op_extract_i64:
2682    case INDEX_op_sextract_i32:
2683    case INDEX_op_sextract_i64:
2684    case INDEX_op_bswap16_i32:
2685    case INDEX_op_bswap32_i32:
2686    case INDEX_op_bswap16_i64:
2687    case INDEX_op_bswap32_i64:
2688    case INDEX_op_bswap64_i64:
2689    case INDEX_op_ctpop_i32:
2690    case INDEX_op_ctpop_i64:
2691        return C_O1_I1(r, r);
2692
2693    case INDEX_op_st8_i32:
2694    case INDEX_op_st16_i32:
2695    case INDEX_op_st_i32:
2696    case INDEX_op_st8_i64:
2697    case INDEX_op_st16_i64:
2698    case INDEX_op_st32_i64:
2699    case INDEX_op_st_i64:
2700        return C_O0_I2(rz, r);
2701
2702    case INDEX_op_setcond_i32:
2703    case INDEX_op_setcond_i64:
2704    case INDEX_op_negsetcond_i32:
2705    case INDEX_op_negsetcond_i64:
2706        return C_O1_I2(r, r, rI);
2707
2708    case INDEX_op_sub_i32:
2709    case INDEX_op_sub_i64:
2710        return C_O1_I2(r, rz, rN);
2711
2712    case INDEX_op_mul_i32:
2713    case INDEX_op_mulsh_i32:
2714    case INDEX_op_muluh_i32:
2715    case INDEX_op_div_i32:
2716    case INDEX_op_divu_i32:
2717    case INDEX_op_rem_i32:
2718    case INDEX_op_remu_i32:
2719    case INDEX_op_mul_i64:
2720    case INDEX_op_mulsh_i64:
2721    case INDEX_op_muluh_i64:
2722    case INDEX_op_div_i64:
2723    case INDEX_op_divu_i64:
2724    case INDEX_op_rem_i64:
2725    case INDEX_op_remu_i64:
2726        return C_O1_I2(r, rz, rz);
2727
2728    case INDEX_op_shl_i32:
2729    case INDEX_op_shr_i32:
2730    case INDEX_op_sar_i32:
2731    case INDEX_op_rotl_i32:
2732    case INDEX_op_rotr_i32:
2733    case INDEX_op_shl_i64:
2734    case INDEX_op_shr_i64:
2735    case INDEX_op_sar_i64:
2736    case INDEX_op_rotl_i64:
2737    case INDEX_op_rotr_i64:
2738        return C_O1_I2(r, r, ri);
2739
2740    case INDEX_op_clz_i32:
2741    case INDEX_op_clz_i64:
2742    case INDEX_op_ctz_i32:
2743    case INDEX_op_ctz_i64:
2744        return C_N1_I2(r, r, rM);
2745
2746    case INDEX_op_brcond_i32:
2747    case INDEX_op_brcond_i64:
2748        return C_O0_I2(rz, rz);
2749
2750    case INDEX_op_movcond_i32:
2751    case INDEX_op_movcond_i64:
2752        return C_O1_I4(r, r, rI, rM, rM);
2753
2754    case INDEX_op_add2_i32:
2755    case INDEX_op_add2_i64:
2756    case INDEX_op_sub2_i32:
2757    case INDEX_op_sub2_i64:
2758        return C_O2_I4(r, r, rz, rz, rM, rM);
2759
2760    case INDEX_op_qemu_ld_i32:
2761    case INDEX_op_qemu_ld_i64:
2762        return C_O1_I1(r, r);
2763    case INDEX_op_qemu_st_i32:
2764    case INDEX_op_qemu_st_i64:
2765        return C_O0_I2(rz, r);
2766
2767    case INDEX_op_st_vec:
2768        return C_O0_I2(v, r);
2769    case INDEX_op_dup_vec:
2770    case INDEX_op_dupm_vec:
2771    case INDEX_op_ld_vec:
2772        return C_O1_I1(v, r);
2773    case INDEX_op_neg_vec:
2774    case INDEX_op_not_vec:
2775    case INDEX_op_shli_vec:
2776    case INDEX_op_shri_vec:
2777    case INDEX_op_sari_vec:
2778    case INDEX_op_rotli_vec:
2779        return C_O1_I1(v, v);
2780    case INDEX_op_add_vec:
2781    case INDEX_op_and_vec:
2782    case INDEX_op_or_vec:
2783    case INDEX_op_xor_vec:
2784    case INDEX_op_ssadd_vec:
2785    case INDEX_op_sssub_vec:
2786    case INDEX_op_usadd_vec:
2787    case INDEX_op_ussub_vec:
2788    case INDEX_op_smax_vec:
2789    case INDEX_op_smin_vec:
2790    case INDEX_op_umax_vec:
2791    case INDEX_op_umin_vec:
2792        return C_O1_I2(v, v, vK);
2793    case INDEX_op_sub_vec:
2794        return C_O1_I2(v, vK, v);
2795    case INDEX_op_mul_vec:
2796    case INDEX_op_shlv_vec:
2797    case INDEX_op_shrv_vec:
2798    case INDEX_op_sarv_vec:
2799    case INDEX_op_rotlv_vec:
2800    case INDEX_op_rotrv_vec:
2801        return C_O1_I2(v, v, v);
2802    case INDEX_op_shls_vec:
2803    case INDEX_op_shrs_vec:
2804    case INDEX_op_sars_vec:
2805    case INDEX_op_rotls_vec:
2806        return C_O1_I2(v, v, r);
2807    case INDEX_op_cmp_vec:
2808        return C_O1_I2(v, v, vL);
2809    case INDEX_op_cmpsel_vec:
2810        return C_O1_I4(v, v, vL, vK, vK);
2811    default:
2812        return C_NotImplemented;
2813    }
2814}
2815
2816static const int tcg_target_callee_save_regs[] = {
2817    TCG_REG_S0,       /* used for the global env (TCG_AREG0) */
2818    TCG_REG_S1,
2819    TCG_REG_S2,
2820    TCG_REG_S3,
2821    TCG_REG_S4,
2822    TCG_REG_S5,
2823    TCG_REG_S6,
2824    TCG_REG_S7,
2825    TCG_REG_S8,
2826    TCG_REG_S9,
2827    TCG_REG_S10,
2828    TCG_REG_S11,
2829    TCG_REG_RA,       /* should be last for ABI compliance */
2830};
2831
2832/* Stack frame parameters.  */
2833#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
2834#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2835#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2836#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2837                     + TCG_TARGET_STACK_ALIGN - 1) \
2838                    & -TCG_TARGET_STACK_ALIGN)
2839#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2840
2841/* We're expecting to be able to use an immediate for frame allocation.  */
2842QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2843
2844/* Generate global QEMU prologue and epilogue code */
2845static void tcg_target_qemu_prologue(TCGContext *s)
2846{
2847    int i;
2848
2849    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2850
2851    /* TB prologue */
2852    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2853    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2854        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2855                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2856    }
2857
2858    if (!tcg_use_softmmu && guest_base) {
2859        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2860        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2861    }
2862
2863    /* Call generated code */
2864    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2865    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2866
2867    /* Return path for goto_ptr. Set return value to 0 */
2868    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2869    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2870
2871    /* TB epilogue */
2872    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2873    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2874        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2875                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2876    }
2877
2878    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2879    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
2880}
2881
2882static void tcg_out_tb_start(TCGContext *s)
2883{
2884    init_setting_vtype(s);
2885}
2886
2887static bool vtype_check(unsigned vtype)
2888{
2889    unsigned long tmp;
2890
2891    /* vsetvl tmp, zero, vtype */
2892    asm(".insn r 0x57, 7, 0x40, %0, zero, %1" : "=r"(tmp) : "r"(vtype));
2893    return tmp != 0;
2894}
2895
2896static void probe_frac_lmul_1(TCGType type, MemOp vsew)
2897{
2898    VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew];
2899    unsigned avl = tcg_type_size(type) >> vsew;
2900    int lmul = type - riscv_lg2_vlenb;
2901    unsigned vtype = encode_vtype(true, true, vsew, lmul & 7);
2902    bool lmul_eq_avl = true;
2903
2904    /* Guaranteed by Zve64x. */
2905    assert(lmul < 3);
2906
2907    /*
2908     * For LMUL < -3, the host vector size is so large that TYPE
2909     * is smaller than the minimum 1/8 fraction.
2910     *
2911     * For other fractional LMUL settings, implementations must
2912     * support SEW settings between SEW_MIN and LMUL * ELEN, inclusive.
2913     * So if ELEN = 64, LMUL = 1/2, then SEW will support e8, e16, e32,
2914     * but e64 may not be supported. In other words, the hardware only
2915     * guarantees SEW_MIN <= SEW <= LMUL * ELEN.  Check.
2916     */
2917    if (lmul < 0 && (lmul < -3 || !vtype_check(vtype))) {
2918        vtype = encode_vtype(true, true, vsew, VLMUL_M1);
2919        lmul_eq_avl = false;
2920    }
2921
2922    if (avl < 32) {
2923        p->vset_insn = encode_vseti(OPC_VSETIVLI, TCG_REG_ZERO, avl, vtype);
2924    } else if (lmul_eq_avl) {
2925        /* rd != 0 and rs1 == 0 uses vlmax */
2926        p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_TMP0, TCG_REG_ZERO, vtype);
2927    } else {
2928        p->movi_insn = encode_i(OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, avl);
2929        p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_ZERO, TCG_REG_TMP0, vtype);
2930    }
2931}
2932
2933static void probe_frac_lmul(void)
2934{
2935    /* Match riscv_lg2_vlenb to TCG_TYPE_V64. */
2936    QEMU_BUILD_BUG_ON(TCG_TYPE_V64 != 3);
2937
2938    for (TCGType t = TCG_TYPE_V64; t <= TCG_TYPE_V256; t++) {
2939        for (MemOp e = MO_8; e <= MO_64; e++) {
2940            probe_frac_lmul_1(t, e);
2941        }
2942    }
2943}
2944
2945static void tcg_target_init(TCGContext *s)
2946{
2947    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
2948    tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
2949
2950    tcg_target_call_clobber_regs = -1;
2951    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2952    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2953    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2954    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2955    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2956    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2957    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2958    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2959    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2960    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2961    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10);
2962    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11);
2963
2964    s->reserved_regs = 0;
2965    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2966    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2967    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2968    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2969    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2970    tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
2971    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2972
2973    if (cpuinfo & CPUINFO_ZVE64X) {
2974        switch (riscv_lg2_vlenb) {
2975        case TCG_TYPE_V64:
2976            tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2977            tcg_target_available_regs[TCG_TYPE_V128] = ALL_DVECTOR_REG_GROUPS;
2978            tcg_target_available_regs[TCG_TYPE_V256] = ALL_QVECTOR_REG_GROUPS;
2979            s->reserved_regs |= (~ALL_QVECTOR_REG_GROUPS & ALL_VECTOR_REGS);
2980            break;
2981        case TCG_TYPE_V128:
2982            tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2983            tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2984            tcg_target_available_regs[TCG_TYPE_V256] = ALL_DVECTOR_REG_GROUPS;
2985            s->reserved_regs |= (~ALL_DVECTOR_REG_GROUPS & ALL_VECTOR_REGS);
2986            break;
2987        default:
2988            /* Guaranteed by Zve64x. */
2989            tcg_debug_assert(riscv_lg2_vlenb >= TCG_TYPE_V256);
2990            tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2991            tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2992            tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
2993            break;
2994        }
2995        tcg_regset_set_reg(s->reserved_regs, TCG_REG_V0);
2996        probe_frac_lmul();
2997    }
2998}
2999
3000typedef struct {
3001    DebugFrameHeader h;
3002    uint8_t fde_def_cfa[4];
3003    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
3004} DebugFrame;
3005
3006#define ELF_HOST_MACHINE EM_RISCV
3007
3008static const DebugFrame debug_frame = {
3009    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
3010    .h.cie.id = -1,
3011    .h.cie.version = 1,
3012    .h.cie.code_align = 1,
3013    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
3014    .h.cie.return_column = TCG_REG_RA,
3015
3016    /* Total FDE size does not include the "len" member.  */
3017    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3018
3019    .fde_def_cfa = {
3020        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ... */
3021        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3022        (FRAME_SIZE >> 7)
3023    },
3024    .fde_reg_ofs = {
3025        0x80 + 9,  12,                  /* DW_CFA_offset, s1,  -96 */
3026        0x80 + 18, 11,                  /* DW_CFA_offset, s2,  -88 */
3027        0x80 + 19, 10,                  /* DW_CFA_offset, s3,  -80 */
3028        0x80 + 20, 9,                   /* DW_CFA_offset, s4,  -72 */
3029        0x80 + 21, 8,                   /* DW_CFA_offset, s5,  -64 */
3030        0x80 + 22, 7,                   /* DW_CFA_offset, s6,  -56 */
3031        0x80 + 23, 6,                   /* DW_CFA_offset, s7,  -48 */
3032        0x80 + 24, 5,                   /* DW_CFA_offset, s8,  -40 */
3033        0x80 + 25, 4,                   /* DW_CFA_offset, s9,  -32 */
3034        0x80 + 26, 3,                   /* DW_CFA_offset, s10, -24 */
3035        0x80 + 27, 2,                   /* DW_CFA_offset, s11, -16 */
3036        0x80 + 1 , 1,                   /* DW_CFA_offset, ra,  -8 */
3037    }
3038};
3039
3040void tcg_register_jit(const void *buf, size_t buf_size)
3041{
3042    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3043}
3044