xref: /openbmc/qemu/tcg/riscv/tcg-target.c.inc (revision a363e1e179445102d7940e92d394d6c00c126f13)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
8 *
9 * Based on i386/tcg-target.c and mips/tcg-target.c
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
28 */
29
30/* Used for function call generation. */
31#define TCG_REG_CALL_STACK              TCG_REG_SP
32#define TCG_TARGET_STACK_ALIGN          16
33#define TCG_TARGET_CALL_STACK_OFFSET    0
34#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_NORMAL
35#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_NORMAL
36#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_NORMAL
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39#ifdef CONFIG_DEBUG_TCG
40static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
41    "zero", "ra",  "sp",  "gp",  "tp",  "t0",  "t1",  "t2",
42    "s0",   "s1",  "a0",  "a1",  "a2",  "a3",  "a4",  "a5",
43    "a6",   "a7",  "s2",  "s3",  "s4",  "s5",  "s6",  "s7",
44    "s8",   "s9",  "s10", "s11", "t3",  "t4",  "t5",  "t6",
45    "v0",   "v1",  "v2",  "v3",  "v4",  "v5",  "v6",  "v7",
46    "v8",   "v9",  "v10", "v11", "v12", "v13", "v14", "v15",
47    "v16",  "v17", "v18", "v19", "v20", "v21", "v22", "v23",
48    "v24",  "v25", "v26", "v27", "v28", "v29", "v30", "v31",
49};
50#endif
51
52static const int tcg_target_reg_alloc_order[] = {
53    /* Call saved registers */
54    /* TCG_REG_S0 reserved for TCG_AREG0 */
55    TCG_REG_S1,
56    TCG_REG_S2,
57    TCG_REG_S3,
58    TCG_REG_S4,
59    TCG_REG_S5,
60    TCG_REG_S6,
61    TCG_REG_S7,
62    TCG_REG_S8,
63    TCG_REG_S9,
64    TCG_REG_S10,
65    TCG_REG_S11,
66
67    /* Call clobbered registers */
68    TCG_REG_T0,
69    TCG_REG_T1,
70    TCG_REG_T2,
71    TCG_REG_T3,
72    TCG_REG_T4,
73    TCG_REG_T5,
74    TCG_REG_T6,
75
76    /* Argument registers */
77    TCG_REG_A0,
78    TCG_REG_A1,
79    TCG_REG_A2,
80    TCG_REG_A3,
81    TCG_REG_A4,
82    TCG_REG_A5,
83    TCG_REG_A6,
84    TCG_REG_A7,
85
86    /* Vector registers and TCG_REG_V0 reserved for mask. */
87    TCG_REG_V1,  TCG_REG_V2,  TCG_REG_V3,  TCG_REG_V4,
88    TCG_REG_V5,  TCG_REG_V6,  TCG_REG_V7,  TCG_REG_V8,
89    TCG_REG_V9,  TCG_REG_V10, TCG_REG_V11, TCG_REG_V12,
90    TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16,
91    TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20,
92    TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24,
93    TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28,
94    TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
95};
96
97static const int tcg_target_call_iarg_regs[] = {
98    TCG_REG_A0,
99    TCG_REG_A1,
100    TCG_REG_A2,
101    TCG_REG_A3,
102    TCG_REG_A4,
103    TCG_REG_A5,
104    TCG_REG_A6,
105    TCG_REG_A7,
106};
107
108static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
109{
110    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
111    tcg_debug_assert(slot >= 0 && slot <= 1);
112    return TCG_REG_A0 + slot;
113}
114
115#define TCG_CT_CONST_S12     0x100
116#define TCG_CT_CONST_M12     0x200
117#define TCG_CT_CONST_S5      0x400
118#define TCG_CT_CONST_CMP_VI  0x800
119
120#define ALL_GENERAL_REGS   MAKE_64BIT_MASK(0, 32)
121#define ALL_VECTOR_REGS    MAKE_64BIT_MASK(32, 32)
122#define ALL_DVECTOR_REG_GROUPS 0x5555555500000000
123#define ALL_QVECTOR_REG_GROUPS 0x1111111100000000
124
125#define sextreg  sextract64
126
127/*
128 * RISC-V Base ISA opcodes (IM)
129 */
130
131#define V_OPIVV (0x0 << 12)
132#define V_OPFVV (0x1 << 12)
133#define V_OPMVV (0x2 << 12)
134#define V_OPIVI (0x3 << 12)
135#define V_OPIVX (0x4 << 12)
136#define V_OPFVF (0x5 << 12)
137#define V_OPMVX (0x6 << 12)
138#define V_OPCFG (0x7 << 12)
139
140/* NF <= 7 && NF >= 0 */
141#define V_NF(x) (x << 29)
142#define V_UNIT_STRIDE (0x0 << 20)
143#define V_UNIT_STRIDE_WHOLE_REG (0x8 << 20)
144
145typedef enum {
146    VLMUL_M1 = 0, /* LMUL=1 */
147    VLMUL_M2,     /* LMUL=2 */
148    VLMUL_M4,     /* LMUL=4 */
149    VLMUL_M8,     /* LMUL=8 */
150    VLMUL_RESERVED,
151    VLMUL_MF8,    /* LMUL=1/8 */
152    VLMUL_MF4,    /* LMUL=1/4 */
153    VLMUL_MF2,    /* LMUL=1/2 */
154} RISCVVlmul;
155
156typedef enum {
157    OPC_ADD = 0x33,
158    OPC_ADDI = 0x13,
159    OPC_AND = 0x7033,
160    OPC_ANDI = 0x7013,
161    OPC_AUIPC = 0x17,
162    OPC_BEQ = 0x63,
163    OPC_BEXTI = 0x48005013,
164    OPC_BGE = 0x5063,
165    OPC_BGEU = 0x7063,
166    OPC_BLT = 0x4063,
167    OPC_BLTU = 0x6063,
168    OPC_BNE = 0x1063,
169    OPC_DIV = 0x2004033,
170    OPC_DIVU = 0x2005033,
171    OPC_JAL = 0x6f,
172    OPC_JALR = 0x67,
173    OPC_LB = 0x3,
174    OPC_LBU = 0x4003,
175    OPC_LD = 0x3003,
176    OPC_LH = 0x1003,
177    OPC_LHU = 0x5003,
178    OPC_LUI = 0x37,
179    OPC_LW = 0x2003,
180    OPC_LWU = 0x6003,
181    OPC_MUL = 0x2000033,
182    OPC_MULH = 0x2001033,
183    OPC_MULHSU = 0x2002033,
184    OPC_MULHU = 0x2003033,
185    OPC_OR = 0x6033,
186    OPC_ORI = 0x6013,
187    OPC_REM = 0x2006033,
188    OPC_REMU = 0x2007033,
189    OPC_SB = 0x23,
190    OPC_SD = 0x3023,
191    OPC_SH = 0x1023,
192    OPC_SLL = 0x1033,
193    OPC_SLLI = 0x1013,
194    OPC_SLT = 0x2033,
195    OPC_SLTI = 0x2013,
196    OPC_SLTIU = 0x3013,
197    OPC_SLTU = 0x3033,
198    OPC_SRA = 0x40005033,
199    OPC_SRAI = 0x40005013,
200    OPC_SRL = 0x5033,
201    OPC_SRLI = 0x5013,
202    OPC_SUB = 0x40000033,
203    OPC_SW = 0x2023,
204    OPC_XOR = 0x4033,
205    OPC_XORI = 0x4013,
206
207    OPC_ADDIW = 0x1b,
208    OPC_ADDW = 0x3b,
209    OPC_DIVUW = 0x200503b,
210    OPC_DIVW = 0x200403b,
211    OPC_MULW = 0x200003b,
212    OPC_REMUW = 0x200703b,
213    OPC_REMW = 0x200603b,
214    OPC_SLLIW = 0x101b,
215    OPC_SLLW = 0x103b,
216    OPC_SRAIW = 0x4000501b,
217    OPC_SRAW = 0x4000503b,
218    OPC_SRLIW = 0x501b,
219    OPC_SRLW = 0x503b,
220    OPC_SUBW = 0x4000003b,
221
222    OPC_FENCE = 0x0000000f,
223    OPC_NOP   = OPC_ADDI,   /* nop = addi r0,r0,0 */
224
225    /* Zba: Bit manipulation extension, address generation */
226    OPC_ADD_UW = 0x0800003b,
227
228    /* Zbb: Bit manipulation extension, basic bit manipulation */
229    OPC_ANDN   = 0x40007033,
230    OPC_CLZ    = 0x60001013,
231    OPC_CLZW   = 0x6000101b,
232    OPC_CPOP   = 0x60201013,
233    OPC_CPOPW  = 0x6020101b,
234    OPC_CTZ    = 0x60101013,
235    OPC_CTZW   = 0x6010101b,
236    OPC_ORN    = 0x40006033,
237    OPC_REV8   = 0x6b805013,
238    OPC_ROL    = 0x60001033,
239    OPC_ROLW   = 0x6000103b,
240    OPC_ROR    = 0x60005033,
241    OPC_RORW   = 0x6000503b,
242    OPC_RORI   = 0x60005013,
243    OPC_RORIW  = 0x6000501b,
244    OPC_SEXT_B = 0x60401013,
245    OPC_SEXT_H = 0x60501013,
246    OPC_XNOR   = 0x40004033,
247    OPC_ZEXT_H = 0x0800403b,
248
249    /* Zicond: integer conditional operations */
250    OPC_CZERO_EQZ = 0x0e005033,
251    OPC_CZERO_NEZ = 0x0e007033,
252
253    /* V: Vector extension 1.0 */
254    OPC_VSETVLI  = 0x57 | V_OPCFG,
255    OPC_VSETIVLI = 0xc0000057 | V_OPCFG,
256    OPC_VSETVL   = 0x80000057 | V_OPCFG,
257
258    OPC_VLE8_V  = 0x7 | V_UNIT_STRIDE,
259    OPC_VLE16_V = 0x5007 | V_UNIT_STRIDE,
260    OPC_VLE32_V = 0x6007 | V_UNIT_STRIDE,
261    OPC_VLE64_V = 0x7007 | V_UNIT_STRIDE,
262    OPC_VSE8_V  = 0x27 | V_UNIT_STRIDE,
263    OPC_VSE16_V = 0x5027 | V_UNIT_STRIDE,
264    OPC_VSE32_V = 0x6027 | V_UNIT_STRIDE,
265    OPC_VSE64_V = 0x7027 | V_UNIT_STRIDE,
266
267    OPC_VL1RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0),
268    OPC_VL2RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1),
269    OPC_VL4RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
270    OPC_VL8RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
271
272    OPC_VS1R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0),
273    OPC_VS2R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1),
274    OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
275    OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
276
277    OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI,
278    OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV,
279
280    OPC_VADD_VV = 0x57 | V_OPIVV,
281    OPC_VADD_VI = 0x57 | V_OPIVI,
282    OPC_VSUB_VV = 0x8000057 | V_OPIVV,
283    OPC_VRSUB_VI = 0xc000057 | V_OPIVI,
284    OPC_VAND_VV = 0x24000057 | V_OPIVV,
285    OPC_VAND_VI = 0x24000057 | V_OPIVI,
286    OPC_VOR_VV = 0x28000057 | V_OPIVV,
287    OPC_VOR_VI = 0x28000057 | V_OPIVI,
288    OPC_VXOR_VV = 0x2c000057 | V_OPIVV,
289    OPC_VXOR_VI = 0x2c000057 | V_OPIVI,
290
291    OPC_VMUL_VV = 0x94000057 | V_OPMVV,
292    OPC_VSADD_VV = 0x84000057 | V_OPIVV,
293    OPC_VSADD_VI = 0x84000057 | V_OPIVI,
294    OPC_VSSUB_VV = 0x8c000057 | V_OPIVV,
295    OPC_VSSUB_VI = 0x8c000057 | V_OPIVI,
296    OPC_VSADDU_VV = 0x80000057 | V_OPIVV,
297    OPC_VSADDU_VI = 0x80000057 | V_OPIVI,
298    OPC_VSSUBU_VV = 0x88000057 | V_OPIVV,
299    OPC_VSSUBU_VI = 0x88000057 | V_OPIVI,
300
301    OPC_VMAX_VV = 0x1c000057 | V_OPIVV,
302    OPC_VMAX_VI = 0x1c000057 | V_OPIVI,
303    OPC_VMAXU_VV = 0x18000057 | V_OPIVV,
304    OPC_VMAXU_VI = 0x18000057 | V_OPIVI,
305    OPC_VMIN_VV = 0x14000057 | V_OPIVV,
306    OPC_VMIN_VI = 0x14000057 | V_OPIVI,
307    OPC_VMINU_VV = 0x10000057 | V_OPIVV,
308    OPC_VMINU_VI = 0x10000057 | V_OPIVI,
309
310    OPC_VMSEQ_VV = 0x60000057 | V_OPIVV,
311    OPC_VMSEQ_VI = 0x60000057 | V_OPIVI,
312    OPC_VMSEQ_VX = 0x60000057 | V_OPIVX,
313    OPC_VMSNE_VV = 0x64000057 | V_OPIVV,
314    OPC_VMSNE_VI = 0x64000057 | V_OPIVI,
315    OPC_VMSNE_VX = 0x64000057 | V_OPIVX,
316
317    OPC_VMSLTU_VV = 0x68000057 | V_OPIVV,
318    OPC_VMSLTU_VX = 0x68000057 | V_OPIVX,
319    OPC_VMSLT_VV = 0x6c000057 | V_OPIVV,
320    OPC_VMSLT_VX = 0x6c000057 | V_OPIVX,
321    OPC_VMSLEU_VV = 0x70000057 | V_OPIVV,
322    OPC_VMSLEU_VX = 0x70000057 | V_OPIVX,
323    OPC_VMSLE_VV = 0x74000057 | V_OPIVV,
324    OPC_VMSLE_VX = 0x74000057 | V_OPIVX,
325
326    OPC_VMSLEU_VI = 0x70000057 | V_OPIVI,
327    OPC_VMSLE_VI = 0x74000057 | V_OPIVI,
328    OPC_VMSGTU_VI = 0x78000057 | V_OPIVI,
329    OPC_VMSGTU_VX = 0x78000057 | V_OPIVX,
330    OPC_VMSGT_VI = 0x7c000057 | V_OPIVI,
331    OPC_VMSGT_VX = 0x7c000057 | V_OPIVX,
332
333    OPC_VSLL_VV = 0x94000057 | V_OPIVV,
334    OPC_VSLL_VI = 0x94000057 | V_OPIVI,
335    OPC_VSLL_VX = 0x94000057 | V_OPIVX,
336    OPC_VSRL_VV = 0xa0000057 | V_OPIVV,
337    OPC_VSRL_VI = 0xa0000057 | V_OPIVI,
338    OPC_VSRL_VX = 0xa0000057 | V_OPIVX,
339    OPC_VSRA_VV = 0xa4000057 | V_OPIVV,
340    OPC_VSRA_VI = 0xa4000057 | V_OPIVI,
341    OPC_VSRA_VX = 0xa4000057 | V_OPIVX,
342
343    OPC_VMV_V_V = 0x5e000057 | V_OPIVV,
344    OPC_VMV_V_I = 0x5e000057 | V_OPIVI,
345    OPC_VMV_V_X = 0x5e000057 | V_OPIVX,
346
347    OPC_VMVNR_V = 0x9e000057 | V_OPIVI,
348} RISCVInsn;
349
350static const struct {
351    RISCVInsn op;
352    bool swap;
353} tcg_cmpcond_to_rvv_vv[] = {
354    [TCG_COND_EQ] =  { OPC_VMSEQ_VV,  false },
355    [TCG_COND_NE] =  { OPC_VMSNE_VV,  false },
356    [TCG_COND_LT] =  { OPC_VMSLT_VV,  false },
357    [TCG_COND_GE] =  { OPC_VMSLE_VV,  true  },
358    [TCG_COND_GT] =  { OPC_VMSLT_VV,  true  },
359    [TCG_COND_LE] =  { OPC_VMSLE_VV,  false },
360    [TCG_COND_LTU] = { OPC_VMSLTU_VV, false },
361    [TCG_COND_GEU] = { OPC_VMSLEU_VV, true  },
362    [TCG_COND_GTU] = { OPC_VMSLTU_VV, true  },
363    [TCG_COND_LEU] = { OPC_VMSLEU_VV, false }
364};
365
366static const struct {
367    RISCVInsn op;
368    int min;
369    int max;
370    bool adjust;
371}  tcg_cmpcond_to_rvv_vi[] = {
372    [TCG_COND_EQ]  = { OPC_VMSEQ_VI,  -16, 15, false },
373    [TCG_COND_NE]  = { OPC_VMSNE_VI,  -16, 15, false },
374    [TCG_COND_GT]  = { OPC_VMSGT_VI,  -16, 15, false },
375    [TCG_COND_LE]  = { OPC_VMSLE_VI,  -16, 15, false },
376    [TCG_COND_LT]  = { OPC_VMSLE_VI,  -15, 16, true  },
377    [TCG_COND_GE]  = { OPC_VMSGT_VI,  -15, 16, true  },
378    [TCG_COND_LEU] = { OPC_VMSLEU_VI,   0, 15, false },
379    [TCG_COND_GTU] = { OPC_VMSGTU_VI,   0, 15, false },
380    [TCG_COND_LTU] = { OPC_VMSLEU_VI,   1, 16, true  },
381    [TCG_COND_GEU] = { OPC_VMSGTU_VI,   1, 16, true  },
382};
383
384/* test if a constant matches the constraint */
385static bool tcg_target_const_match(int64_t val, int ct,
386                                   TCGType type, TCGCond cond, int vece)
387{
388    if (ct & TCG_CT_CONST) {
389        return 1;
390    }
391    if (type >= TCG_TYPE_V64) {
392        /* Val is replicated by VECE; extract the highest element. */
393        val >>= (-8 << vece) & 63;
394    }
395    /*
396     * Sign extended from 12 bits: [-0x800, 0x7ff].
397     * Used for most arithmetic, as this is the isa field.
398     */
399    if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
400        return 1;
401    }
402    /*
403     * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
404     * Used by addsub2 and movcond, which may need the negative value,
405     * and requires the modified constant to be representable.
406     */
407    if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
408        return 1;
409    }
410    /*
411     * Sign extended from 5 bits: [-0x10, 0x0f].
412     * Used for vector-immediate.
413     */
414    if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) {
415        return 1;
416    }
417    /*
418     * Used for vector compare OPIVI instructions.
419     */
420    if ((ct & TCG_CT_CONST_CMP_VI) &&
421        val >= tcg_cmpcond_to_rvv_vi[cond].min &&
422        val <= tcg_cmpcond_to_rvv_vi[cond].max) {
423        return true;
424     }
425    return 0;
426}
427
428/*
429 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
430 */
431
432/* Type-R */
433
434static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2)
435{
436    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20;
437}
438
439/* Type-I */
440
441static int32_t encode_imm12(uint32_t imm)
442{
443    return (imm & 0xfff) << 20;
444}
445
446static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm)
447{
448    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm);
449}
450
451/* Type-S */
452
453static int32_t encode_simm12(uint32_t imm)
454{
455    int32_t ret = 0;
456
457    ret |= (imm & 0xFE0) << 20;
458    ret |= (imm & 0x1F) << 7;
459
460    return ret;
461}
462
463static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
464{
465    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm);
466}
467
468/* Type-SB */
469
470static int32_t encode_sbimm12(uint32_t imm)
471{
472    int32_t ret = 0;
473
474    ret |= (imm & 0x1000) << 19;
475    ret |= (imm & 0x7e0) << 20;
476    ret |= (imm & 0x1e) << 7;
477    ret |= (imm & 0x800) >> 4;
478
479    return ret;
480}
481
482static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
483{
484    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm);
485}
486
487/* Type-U */
488
489static int32_t encode_uimm20(uint32_t imm)
490{
491    return imm & 0xfffff000;
492}
493
494static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm)
495{
496    return opc | (rd & 0x1f) << 7 | encode_uimm20(imm);
497}
498
499/* Type-UJ */
500
501static int32_t encode_ujimm20(uint32_t imm)
502{
503    int32_t ret = 0;
504
505    ret |= (imm & 0x0007fe) << (21 - 1);
506    ret |= (imm & 0x000800) << (20 - 11);
507    ret |= (imm & 0x0ff000) << (12 - 12);
508    ret |= (imm & 0x100000) << (31 - 20);
509
510    return ret;
511}
512
513static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
514{
515    return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
516}
517
518
519/* Type-OPIVI */
520
521static int32_t encode_vi(RISCVInsn opc, TCGReg rd, int32_t imm,
522                         TCGReg vs2, bool vm)
523{
524    return opc | (rd & 0x1f) << 7 | (imm & 0x1f) << 15 |
525           (vs2 & 0x1f) << 20 | (vm << 25);
526}
527
528/* Type-OPIVV/OPMVV/OPIVX/OPMVX, Vector load and store */
529
530static int32_t encode_v(RISCVInsn opc, TCGReg d, TCGReg s1,
531                        TCGReg s2, bool vm)
532{
533    return opc | (d & 0x1f) << 7 | (s1 & 0x1f) << 15 |
534           (s2 & 0x1f) << 20 | (vm << 25);
535}
536
537/* Vector vtype */
538
539static uint32_t encode_vtype(bool vta, bool vma,
540                            MemOp vsew, RISCVVlmul vlmul)
541{
542    return vma << 7 | vta << 6 | vsew << 3 | vlmul;
543}
544
545static int32_t encode_vset(RISCVInsn opc, TCGReg rd,
546                           TCGArg rs1, uint32_t vtype)
547{
548    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (vtype & 0x7ff) << 20;
549}
550
551static int32_t encode_vseti(RISCVInsn opc, TCGReg rd,
552                            uint32_t uimm, uint32_t vtype)
553{
554    return opc | (rd & 0x1f) << 7 | (uimm & 0x1f) << 15 | (vtype & 0x3ff) << 20;
555}
556
557/*
558 * RISC-V instruction emitters
559 */
560
561static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc,
562                            TCGReg rd, TCGReg rs1, TCGReg rs2)
563{
564    tcg_out32(s, encode_r(opc, rd, rs1, rs2));
565}
566
567static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc,
568                            TCGReg rd, TCGReg rs1, TCGArg imm)
569{
570    tcg_out32(s, encode_i(opc, rd, rs1, imm));
571}
572
573static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc,
574                              TCGReg rs1, TCGReg rs2, uint32_t imm)
575{
576    tcg_out32(s, encode_s(opc, rs1, rs2, imm));
577}
578
579static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc,
580                               TCGReg rs1, TCGReg rs2, uint32_t imm)
581{
582    tcg_out32(s, encode_sb(opc, rs1, rs2, imm));
583}
584
585static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc,
586                              TCGReg rd, uint32_t imm)
587{
588    tcg_out32(s, encode_u(opc, rd, imm));
589}
590
591static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc,
592                             TCGReg rd, uint32_t imm)
593{
594    tcg_out32(s, encode_uj(opc, rd, imm));
595}
596
597static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
598{
599    int i;
600    for (i = 0; i < count; ++i) {
601        p[i] = OPC_NOP;
602    }
603}
604
605/*
606 * Relocations
607 */
608
609static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
610{
611    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
612    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
613
614    tcg_debug_assert((offset & 1) == 0);
615    if (offset == sextreg(offset, 0, 12)) {
616        *src_rw |= encode_sbimm12(offset);
617        return true;
618    }
619
620    return false;
621}
622
623static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
624{
625    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
626    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
627
628    tcg_debug_assert((offset & 1) == 0);
629    if (offset == sextreg(offset, 0, 20)) {
630        *src_rw |= encode_ujimm20(offset);
631        return true;
632    }
633
634    return false;
635}
636
637static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
638{
639    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
640    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
641    int32_t lo = sextreg(offset, 0, 12);
642    int32_t hi = offset - lo;
643
644    if (offset == hi + lo) {
645        src_rw[0] |= encode_uimm20(hi);
646        src_rw[1] |= encode_imm12(lo);
647        return true;
648    }
649
650    return false;
651}
652
653static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
654                        intptr_t value, intptr_t addend)
655{
656    tcg_debug_assert(addend == 0);
657    switch (type) {
658    case R_RISCV_BRANCH:
659        return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
660    case R_RISCV_JAL:
661        return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
662    case R_RISCV_CALL:
663        return reloc_call(code_ptr, (tcg_insn_unit *)value);
664    default:
665        g_assert_not_reached();
666    }
667}
668
669/*
670 * RISC-V vector instruction emitters
671 */
672
673/*
674 * Vector registers uses the same 5 lower bits as GPR registers,
675 * and vm=0 (vm = false) means vector masking ENABLED.
676 * With RVV 1.0, vs2 is the first operand, while rs1/imm is the
677 * second operand.
678 */
679static void tcg_out_opc_vv(TCGContext *s, RISCVInsn opc,
680                           TCGReg vd, TCGReg vs2, TCGReg vs1)
681{
682    tcg_out32(s, encode_v(opc, vd, vs1, vs2, true));
683}
684
685static void tcg_out_opc_vx(TCGContext *s, RISCVInsn opc,
686                           TCGReg vd, TCGReg vs2, TCGReg rs1)
687{
688    tcg_out32(s, encode_v(opc, vd, rs1, vs2, true));
689}
690
691static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc,
692                           TCGReg vd, TCGReg vs2, int32_t imm)
693{
694    tcg_out32(s, encode_vi(opc, vd, imm, vs2, true));
695}
696
697static void tcg_out_opc_vv_vi(TCGContext *s, RISCVInsn o_vv, RISCVInsn o_vi,
698                              TCGReg vd, TCGReg vs2, TCGArg vi1, int c_vi1)
699{
700    if (c_vi1) {
701        tcg_out_opc_vi(s, o_vi, vd, vs2, vi1);
702    } else {
703        tcg_out_opc_vv(s, o_vv, vd, vs2, vi1);
704    }
705}
706
707static void tcg_out_opc_vim_mask(TCGContext *s, RISCVInsn opc, TCGReg vd,
708                                 TCGReg vs2, int32_t imm)
709{
710    tcg_out32(s, encode_vi(opc, vd, imm, vs2, false));
711}
712
713static void tcg_out_opc_vvm_mask(TCGContext *s, RISCVInsn opc, TCGReg vd,
714                                 TCGReg vs2, TCGReg vs1)
715{
716    tcg_out32(s, encode_v(opc, vd, vs1, vs2, false));
717}
718
719typedef struct VsetCache {
720    uint32_t movi_insn;
721    uint32_t vset_insn;
722} VsetCache;
723
724static VsetCache riscv_vset_cache[3][4];
725
726static void set_vtype(TCGContext *s, TCGType type, MemOp vsew)
727{
728    const VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew];
729
730    s->riscv_cur_type = type;
731    s->riscv_cur_vsew = vsew;
732
733    if (p->movi_insn) {
734        tcg_out32(s, p->movi_insn);
735    }
736    tcg_out32(s, p->vset_insn);
737}
738
739static MemOp set_vtype_len(TCGContext *s, TCGType type)
740{
741    if (type != s->riscv_cur_type) {
742        set_vtype(s, type, MO_64);
743    }
744    return s->riscv_cur_vsew;
745}
746
747static void set_vtype_len_sew(TCGContext *s, TCGType type, MemOp vsew)
748{
749    if (type != s->riscv_cur_type || vsew != s->riscv_cur_vsew) {
750        set_vtype(s, type, vsew);
751    }
752}
753
754/*
755 * TCG intrinsics
756 */
757
758static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
759{
760    if (ret == arg) {
761        return true;
762    }
763    switch (type) {
764    case TCG_TYPE_I32:
765    case TCG_TYPE_I64:
766        tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
767        break;
768    case TCG_TYPE_V64:
769    case TCG_TYPE_V128:
770    case TCG_TYPE_V256:
771        {
772            int lmul = type - riscv_lg2_vlenb;
773            int nf = 1 << MAX(lmul, 0);
774            tcg_out_opc_vi(s, OPC_VMVNR_V, ret, arg, nf - 1);
775        }
776        break;
777    default:
778        g_assert_not_reached();
779    }
780    return true;
781}
782
783static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
784                         tcg_target_long val)
785{
786    tcg_target_long lo, hi, tmp;
787    int shift, ret;
788
789    if (type == TCG_TYPE_I32) {
790        val = (int32_t)val;
791    }
792
793    lo = sextreg(val, 0, 12);
794    if (val == lo) {
795        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo);
796        return;
797    }
798
799    hi = val - lo;
800    if (val == (int32_t)val) {
801        tcg_out_opc_upper(s, OPC_LUI, rd, hi);
802        if (lo != 0) {
803            tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
804        }
805        return;
806    }
807
808    tmp = tcg_pcrel_diff(s, (void *)val);
809    if (tmp == (int32_t)tmp) {
810        tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
811        tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
812        ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
813        tcg_debug_assert(ret == true);
814        return;
815    }
816
817    /* Look for a single 20-bit section.  */
818    shift = ctz64(val);
819    tmp = val >> shift;
820    if (tmp == sextreg(tmp, 0, 20)) {
821        tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12);
822        if (shift > 12) {
823            tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12);
824        } else {
825            tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift);
826        }
827        return;
828    }
829
830    /* Look for a few high zero bits, with lots of bits set in the middle.  */
831    shift = clz64(val);
832    tmp = val << shift;
833    if (tmp == sextreg(tmp, 12, 20) << 12) {
834        tcg_out_opc_upper(s, OPC_LUI, rd, tmp);
835        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
836        return;
837    } else if (tmp == sextreg(tmp, 0, 12)) {
838        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp);
839        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
840        return;
841    }
842
843    /* Drop into the constant pool.  */
844    new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0);
845    tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
846    tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
847}
848
849static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
850{
851    return false;
852}
853
854static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
855                             tcg_target_long imm)
856{
857    /* This function is only used for passing structs by reference. */
858    g_assert_not_reached();
859}
860
861static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
862{
863    tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff);
864}
865
866static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
867{
868    if (cpuinfo & CPUINFO_ZBB) {
869        tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO);
870    } else {
871        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
872        tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
873    }
874}
875
876static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
877{
878    if (cpuinfo & CPUINFO_ZBA) {
879        tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO);
880    } else {
881        tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
882        tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
883    }
884}
885
886static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
887{
888    if (cpuinfo & CPUINFO_ZBB) {
889        tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0);
890    } else {
891        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
892        tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
893    }
894}
895
896static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
897{
898    if (cpuinfo & CPUINFO_ZBB) {
899        tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0);
900    } else {
901        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
902        tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
903    }
904}
905
906static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
907{
908    tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
909}
910
911static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
912{
913    if (ret != arg) {
914        tcg_out_ext32s(s, ret, arg);
915    }
916}
917
918static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
919{
920    tcg_out_ext32u(s, ret, arg);
921}
922
923static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
924{
925    tcg_out_ext32s(s, ret, arg);
926}
927
928static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
929                         TCGReg addr, intptr_t offset)
930{
931    intptr_t imm12 = sextreg(offset, 0, 12);
932
933    if (offset != imm12) {
934        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
935
936        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
937            imm12 = sextreg(diff, 0, 12);
938            tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12);
939        } else {
940            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
941            if (addr != TCG_REG_ZERO) {
942                tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr);
943            }
944        }
945        addr = TCG_REG_TMP2;
946    }
947
948    switch (opc) {
949    case OPC_SB:
950    case OPC_SH:
951    case OPC_SW:
952    case OPC_SD:
953        tcg_out_opc_store(s, opc, addr, data, imm12);
954        break;
955    case OPC_LB:
956    case OPC_LBU:
957    case OPC_LH:
958    case OPC_LHU:
959    case OPC_LW:
960    case OPC_LWU:
961    case OPC_LD:
962        tcg_out_opc_imm(s, opc, data, addr, imm12);
963        break;
964    default:
965        g_assert_not_reached();
966    }
967}
968
969static void tcg_out_vec_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
970                             TCGReg addr, intptr_t offset)
971{
972    tcg_debug_assert(data >= TCG_REG_V0);
973    tcg_debug_assert(addr < TCG_REG_V0);
974
975    if (offset) {
976        tcg_debug_assert(addr != TCG_REG_ZERO);
977        if (offset == sextreg(offset, 0, 12)) {
978            tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, addr, offset);
979        } else {
980            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
981            tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, addr);
982        }
983        addr = TCG_REG_TMP0;
984    }
985    tcg_out32(s, encode_v(opc, data, addr, 0, true));
986}
987
988static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
989                       TCGReg arg1, intptr_t arg2)
990{
991    RISCVInsn insn;
992
993    switch (type) {
994    case TCG_TYPE_I32:
995        tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
996        break;
997    case TCG_TYPE_I64:
998        tcg_out_ldst(s, OPC_LD, arg, arg1, arg2);
999        break;
1000    case TCG_TYPE_V64:
1001    case TCG_TYPE_V128:
1002    case TCG_TYPE_V256:
1003        if (type >= riscv_lg2_vlenb) {
1004            static const RISCVInsn whole_reg_ld[] = {
1005                OPC_VL1RE64_V, OPC_VL2RE64_V, OPC_VL4RE64_V, OPC_VL8RE64_V
1006            };
1007            unsigned idx = type - riscv_lg2_vlenb;
1008
1009            tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_ld));
1010            insn = whole_reg_ld[idx];
1011        } else {
1012            static const RISCVInsn unit_stride_ld[] = {
1013                OPC_VLE8_V, OPC_VLE16_V, OPC_VLE32_V, OPC_VLE64_V
1014            };
1015            MemOp prev_vsew = set_vtype_len(s, type);
1016
1017            tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_ld));
1018            insn = unit_stride_ld[prev_vsew];
1019        }
1020        tcg_out_vec_ldst(s, insn, arg, arg1, arg2);
1021        break;
1022    default:
1023        g_assert_not_reached();
1024    }
1025}
1026
1027static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1028                       TCGReg arg1, intptr_t arg2)
1029{
1030    RISCVInsn insn;
1031
1032    switch (type) {
1033    case TCG_TYPE_I32:
1034        tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
1035        break;
1036    case TCG_TYPE_I64:
1037        tcg_out_ldst(s, OPC_SD, arg, arg1, arg2);
1038        break;
1039    case TCG_TYPE_V64:
1040    case TCG_TYPE_V128:
1041    case TCG_TYPE_V256:
1042        if (type >= riscv_lg2_vlenb) {
1043            static const RISCVInsn whole_reg_st[] = {
1044                OPC_VS1R_V, OPC_VS2R_V, OPC_VS4R_V, OPC_VS8R_V
1045            };
1046            unsigned idx = type - riscv_lg2_vlenb;
1047
1048            tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_st));
1049            insn = whole_reg_st[idx];
1050        } else {
1051            static const RISCVInsn unit_stride_st[] = {
1052                OPC_VSE8_V, OPC_VSE16_V, OPC_VSE32_V, OPC_VSE64_V
1053            };
1054            MemOp prev_vsew = set_vtype_len(s, type);
1055
1056            tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_st));
1057            insn = unit_stride_st[prev_vsew];
1058        }
1059        tcg_out_vec_ldst(s, insn, arg, arg1, arg2);
1060        break;
1061    default:
1062        g_assert_not_reached();
1063    }
1064}
1065
1066static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1067                        TCGReg base, intptr_t ofs)
1068{
1069    if (val == 0) {
1070        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
1071        return true;
1072    }
1073    return false;
1074}
1075
1076static void tcg_out_addsub2(TCGContext *s,
1077                            TCGReg rl, TCGReg rh,
1078                            TCGReg al, TCGReg ah,
1079                            TCGArg bl, TCGArg bh,
1080                            bool cbl, bool cbh, bool is_sub, bool is32bit)
1081{
1082    const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
1083    const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
1084    const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
1085    TCGReg th = TCG_REG_TMP1;
1086
1087    /* If we have a negative constant such that negating it would
1088       make the high part zero, we can (usually) eliminate one insn.  */
1089    if (cbl && cbh && bh == -1 && bl != 0) {
1090        bl = -bl;
1091        bh = 0;
1092        is_sub = !is_sub;
1093    }
1094
1095    /* By operating on the high part first, we get to use the final
1096       carry operation to move back from the temporary.  */
1097    if (!cbh) {
1098        tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
1099    } else if (bh != 0 || ah == rl) {
1100        tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
1101    } else {
1102        th = ah;
1103    }
1104
1105    /* Note that tcg optimization should eliminate the bl == 0 case.  */
1106    if (is_sub) {
1107        if (cbl) {
1108            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
1109            tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
1110        } else {
1111            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
1112            tcg_out_opc_reg(s, opc_sub, rl, al, bl);
1113        }
1114        tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
1115    } else {
1116        if (cbl) {
1117            tcg_out_opc_imm(s, opc_addi, rl, al, bl);
1118            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
1119        } else if (al == bl) {
1120            /*
1121             * If the input regs overlap, this is a simple doubling
1122             * and carry-out is the input msb.  This special case is
1123             * required when the output reg overlaps the input,
1124             * but we might as well use it always.
1125             */
1126            tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
1127            tcg_out_opc_reg(s, opc_add, rl, al, al);
1128        } else {
1129            tcg_out_opc_reg(s, opc_add, rl, al, bl);
1130            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
1131                            rl, (rl == bl ? al : bl));
1132        }
1133        tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
1134    }
1135}
1136
1137static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1138                                   TCGReg dst, TCGReg src)
1139{
1140    set_vtype_len_sew(s, type, vece);
1141    tcg_out_opc_vx(s, OPC_VMV_V_X, dst, 0, src);
1142    return true;
1143}
1144
1145static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1146                                    TCGReg dst, TCGReg base, intptr_t offset)
1147{
1148    tcg_out_ld(s, TCG_TYPE_REG, TCG_REG_TMP0, base, offset);
1149    return tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0);
1150}
1151
1152static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1153                                    TCGReg dst, int64_t arg)
1154{
1155    /* Arg is replicated by VECE; extract the highest element. */
1156    arg >>= (-8 << vece) & 63;
1157
1158    if (arg >= -16 && arg < 16) {
1159        if (arg == 0 || arg == -1) {
1160            set_vtype_len(s, type);
1161        } else {
1162            set_vtype_len_sew(s, type, vece);
1163        }
1164        tcg_out_opc_vi(s, OPC_VMV_V_I, dst, 0, arg);
1165        return;
1166    }
1167    tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, arg);
1168    tcg_out_dup_vec(s, type, vece, dst, TCG_REG_TMP0);
1169}
1170
1171static const struct {
1172    RISCVInsn op;
1173    bool swap;
1174} tcg_brcond_to_riscv[] = {
1175    [TCG_COND_EQ] =  { OPC_BEQ,  false },
1176    [TCG_COND_NE] =  { OPC_BNE,  false },
1177    [TCG_COND_LT] =  { OPC_BLT,  false },
1178    [TCG_COND_GE] =  { OPC_BGE,  false },
1179    [TCG_COND_LE] =  { OPC_BGE,  true  },
1180    [TCG_COND_GT] =  { OPC_BLT,  true  },
1181    [TCG_COND_LTU] = { OPC_BLTU, false },
1182    [TCG_COND_GEU] = { OPC_BGEU, false },
1183    [TCG_COND_LEU] = { OPC_BGEU, true  },
1184    [TCG_COND_GTU] = { OPC_BLTU, true  }
1185};
1186
1187static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
1188                           TCGReg arg2, TCGLabel *l)
1189{
1190    RISCVInsn op = tcg_brcond_to_riscv[cond].op;
1191
1192    tcg_debug_assert(op != 0);
1193
1194    if (tcg_brcond_to_riscv[cond].swap) {
1195        TCGReg t = arg1;
1196        arg1 = arg2;
1197        arg2 = t;
1198    }
1199
1200    tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
1201    tcg_out_opc_branch(s, op, arg1, arg2, 0);
1202}
1203
1204#define SETCOND_INV    TCG_TARGET_NB_REGS
1205#define SETCOND_NEZ    (SETCOND_INV << 1)
1206#define SETCOND_FLAGS  (SETCOND_INV | SETCOND_NEZ)
1207
1208static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
1209                               TCGReg arg1, tcg_target_long arg2, bool c2)
1210{
1211    int flags = 0;
1212
1213    switch (cond) {
1214    case TCG_COND_EQ:    /* -> NE  */
1215    case TCG_COND_GE:    /* -> LT  */
1216    case TCG_COND_GEU:   /* -> LTU */
1217    case TCG_COND_GT:    /* -> LE  */
1218    case TCG_COND_GTU:   /* -> LEU */
1219        cond = tcg_invert_cond(cond);
1220        flags ^= SETCOND_INV;
1221        break;
1222    default:
1223        break;
1224    }
1225
1226    switch (cond) {
1227    case TCG_COND_LE:
1228    case TCG_COND_LEU:
1229        /*
1230         * If we have a constant input, the most efficient way to implement
1231         * LE is by adding 1 and using LT.  Watch out for wrap around for LEU.
1232         * We don't need to care for this for LE because the constant input
1233         * is constrained to signed 12-bit, and 0x800 is representable in the
1234         * temporary register.
1235         */
1236        if (c2) {
1237            if (cond == TCG_COND_LEU) {
1238                /* unsigned <= -1 is true */
1239                if (arg2 == -1) {
1240                    tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
1241                    return ret;
1242                }
1243                cond = TCG_COND_LTU;
1244            } else {
1245                cond = TCG_COND_LT;
1246            }
1247            tcg_debug_assert(arg2 <= 0x7ff);
1248            if (++arg2 == 0x800) {
1249                tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
1250                arg2 = TCG_REG_TMP0;
1251                c2 = false;
1252            }
1253        } else {
1254            TCGReg tmp = arg2;
1255            arg2 = arg1;
1256            arg1 = tmp;
1257            cond = tcg_swap_cond(cond);    /* LE -> GE */
1258            cond = tcg_invert_cond(cond);  /* GE -> LT */
1259            flags ^= SETCOND_INV;
1260        }
1261        break;
1262    default:
1263        break;
1264    }
1265
1266    switch (cond) {
1267    case TCG_COND_NE:
1268        flags |= SETCOND_NEZ;
1269        if (!c2) {
1270            tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
1271        } else if (arg2 == 0) {
1272            ret = arg1;
1273        } else {
1274            tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2);
1275        }
1276        break;
1277
1278    case TCG_COND_LT:
1279        if (c2) {
1280            tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2);
1281        } else {
1282            tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
1283        }
1284        break;
1285
1286    case TCG_COND_LTU:
1287        if (c2) {
1288            tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2);
1289        } else {
1290            tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
1291        }
1292        break;
1293
1294    default:
1295        g_assert_not_reached();
1296    }
1297
1298    return ret | flags;
1299}
1300
1301static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
1302                            TCGReg arg1, tcg_target_long arg2, bool c2)
1303{
1304    int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
1305
1306    if (tmpflags != ret) {
1307        TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
1308
1309        switch (tmpflags & SETCOND_FLAGS) {
1310        case SETCOND_INV:
1311            /* Intermediate result is boolean: simply invert. */
1312            tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1);
1313            break;
1314        case SETCOND_NEZ:
1315            /* Intermediate result is zero/non-zero: test != 0. */
1316            tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
1317            break;
1318        case SETCOND_NEZ | SETCOND_INV:
1319            /* Intermediate result is zero/non-zero: test == 0. */
1320            tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1);
1321            break;
1322        default:
1323            g_assert_not_reached();
1324        }
1325    }
1326}
1327
1328static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1329                         TCGReg dest, TCGReg arg1, TCGReg arg2)
1330{
1331    tcg_out_setcond(s, cond, dest, arg1, arg2, false);
1332}
1333
1334static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
1335                          TCGReg dest, TCGReg arg1, tcg_target_long arg2)
1336{
1337    tcg_out_setcond(s, cond, dest, arg1, arg2, true);
1338}
1339
1340static const TCGOutOpSetcond outop_setcond = {
1341    .base.static_constraint = C_O1_I2(r, r, rI),
1342    .out_rrr = tgen_setcond,
1343    .out_rri = tgen_setcondi,
1344};
1345
1346static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
1347                               TCGReg arg1, tcg_target_long arg2, bool c2)
1348{
1349    int tmpflags;
1350    TCGReg tmp;
1351
1352    /* For LT/GE comparison against 0, replicate the sign bit. */
1353    if (c2 && arg2 == 0) {
1354        switch (cond) {
1355        case TCG_COND_GE:
1356            tcg_out_opc_imm(s, OPC_XORI, ret, arg1, -1);
1357            arg1 = ret;
1358            /* fall through */
1359        case TCG_COND_LT:
1360            tcg_out_opc_imm(s, OPC_SRAI, ret, arg1, TCG_TARGET_REG_BITS - 1);
1361            return;
1362        default:
1363            break;
1364        }
1365    }
1366
1367    tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
1368    tmp = tmpflags & ~SETCOND_FLAGS;
1369
1370    /* If intermediate result is zero/non-zero: test != 0. */
1371    if (tmpflags & SETCOND_NEZ) {
1372        tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
1373        tmp = ret;
1374    }
1375
1376    /* Produce the 0/-1 result. */
1377    if (tmpflags & SETCOND_INV) {
1378        tcg_out_opc_imm(s, OPC_ADDI, ret, tmp, -1);
1379    } else {
1380        tcg_out_opc_reg(s, OPC_SUB, ret, TCG_REG_ZERO, tmp);
1381    }
1382}
1383
1384static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
1385                            TCGReg dest, TCGReg arg1, TCGReg arg2)
1386{
1387    tcg_out_negsetcond(s, cond, dest, arg1, arg2, false);
1388}
1389
1390static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
1391                             TCGReg dest, TCGReg arg1, tcg_target_long arg2)
1392{
1393    tcg_out_negsetcond(s, cond, dest, arg1, arg2, true);
1394}
1395
1396static const TCGOutOpSetcond outop_negsetcond = {
1397    .base.static_constraint = C_O1_I2(r, r, rI),
1398    .out_rrr = tgen_negsetcond,
1399    .out_rri = tgen_negsetcondi,
1400};
1401
1402static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
1403                                   int val1, bool c_val1,
1404                                   int val2, bool c_val2)
1405{
1406    if (val1 == 0) {
1407        if (c_val2) {
1408            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2);
1409            val2 = TCG_REG_TMP1;
1410        }
1411        tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne);
1412        return;
1413    }
1414
1415    if (val2 == 0) {
1416        if (c_val1) {
1417            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1);
1418            val1 = TCG_REG_TMP1;
1419        }
1420        tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne);
1421        return;
1422    }
1423
1424    if (c_val2) {
1425        if (c_val1) {
1426            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2);
1427        } else {
1428            tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2);
1429        }
1430        tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne);
1431        tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2);
1432        return;
1433    }
1434
1435    if (c_val1) {
1436        tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1);
1437        tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne);
1438        tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1);
1439        return;
1440    }
1441
1442    tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne);
1443    tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne);
1444    tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1);
1445}
1446
1447static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret,
1448                                TCGReg cmp1, TCGReg cmp2,
1449                                int val, bool c_val)
1450{
1451    RISCVInsn op;
1452    int disp = 8;
1453
1454    tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv));
1455    op = tcg_brcond_to_riscv[cond].op;
1456    tcg_debug_assert(op != 0);
1457
1458    if (tcg_brcond_to_riscv[cond].swap) {
1459        tcg_out_opc_branch(s, op, cmp2, cmp1, disp);
1460    } else {
1461        tcg_out_opc_branch(s, op, cmp1, cmp2, disp);
1462    }
1463    if (c_val) {
1464        tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val);
1465    } else {
1466        tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0);
1467    }
1468}
1469
1470static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
1471                                TCGReg cmp1, TCGReg cmp2,
1472                                int val1, bool c_val1,
1473                                int val2, bool c_val2)
1474{
1475    TCGReg tmp;
1476
1477    /* TCG optimizer reorders to prefer ret matching val2. */
1478    if (!c_val2 && ret == val2) {
1479        cond = tcg_invert_cond(cond);
1480        tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1);
1481        return;
1482    }
1483
1484    if (!c_val1 && ret == val1) {
1485        tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2);
1486        return;
1487    }
1488
1489    tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret);
1490    if (c_val1) {
1491        tcg_out_movi(s, TCG_TYPE_REG, tmp, val1);
1492    } else {
1493        tcg_out_mov(s, TCG_TYPE_REG, tmp, val1);
1494    }
1495    tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2);
1496    tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
1497}
1498
1499static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
1500                            TCGReg cmp1, int cmp2, bool c_cmp2,
1501                            TCGReg val1, bool c_val1,
1502                            TCGReg val2, bool c_val2)
1503{
1504    int tmpflags;
1505    TCGReg t;
1506
1507    if (!(cpuinfo & CPUINFO_ZICOND) && (!c_cmp2 || cmp2 == 0)) {
1508        tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2,
1509                            val1, c_val1, val2, c_val2);
1510        return;
1511    }
1512
1513    tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2);
1514    t = tmpflags & ~SETCOND_FLAGS;
1515
1516    if (cpuinfo & CPUINFO_ZICOND) {
1517        if (tmpflags & SETCOND_INV) {
1518            tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1);
1519        } else {
1520            tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2);
1521        }
1522    } else {
1523        cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE;
1524        tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO,
1525                            val1, c_val1, val2, c_val2);
1526    }
1527}
1528
1529static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
1530                         TCGReg ret, TCGReg src1, int src2, bool c_src2)
1531{
1532    tcg_out_opc_imm(s, insn, ret, src1, 0);
1533
1534    if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) {
1535        /*
1536         * The requested zero result does not match the insn, so adjust.
1537         * Note that constraints put 'ret' in a new register, so the
1538         * computation above did not clobber either 'src1' or 'src2'.
1539         */
1540        tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
1541                        src2, c_src2, ret, false);
1542    }
1543}
1544
1545static void tcg_out_cmpsel(TCGContext *s, TCGType type, unsigned vece,
1546                           TCGCond cond, TCGReg ret,
1547                           TCGReg cmp1, TCGReg cmp2, bool c_cmp2,
1548                           TCGReg val1, bool c_val1,
1549                           TCGReg val2, bool c_val2)
1550{
1551    set_vtype_len_sew(s, type, vece);
1552
1553    /* Use only vmerge_vim if possible, by inverting the test. */
1554    if (c_val2 && !c_val1) {
1555        TCGArg temp = val1;
1556        cond = tcg_invert_cond(cond);
1557        val1 = val2;
1558        val2 = temp;
1559        c_val1 = true;
1560        c_val2 = false;
1561    }
1562
1563    /* Perform the comparison into V0 mask. */
1564    if (c_cmp2) {
1565        tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, TCG_REG_V0, cmp1,
1566                       cmp2 - tcg_cmpcond_to_rvv_vi[cond].adjust);
1567    } else if (tcg_cmpcond_to_rvv_vv[cond].swap) {
1568        tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op,
1569                       TCG_REG_V0, cmp2, cmp1);
1570    } else {
1571        tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op,
1572                       TCG_REG_V0, cmp1, cmp2);
1573    }
1574    if (c_val1) {
1575        if (c_val2) {
1576            tcg_out_opc_vi(s, OPC_VMV_V_I, ret, 0, val2);
1577            val2 = ret;
1578        }
1579        /* vd[i] == v0.mask[i] ? imm : vs2[i] */
1580        tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, ret, val2, val1);
1581    } else {
1582        /* vd[i] == v0.mask[i] ? vs1[i] : vs2[i] */
1583        tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, ret, val2, val1);
1584    }
1585}
1586
1587static void tcg_out_vshifti(TCGContext *s, RISCVInsn opc_vi, RISCVInsn opc_vx,
1588                             TCGReg dst, TCGReg src, unsigned imm)
1589{
1590    if (imm < 32) {
1591        tcg_out_opc_vi(s, opc_vi, dst, src, imm);
1592    } else {
1593        tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP0, imm);
1594        tcg_out_opc_vx(s, opc_vx, dst, src, TCG_REG_TMP0);
1595    }
1596}
1597
1598static void init_setting_vtype(TCGContext *s)
1599{
1600    s->riscv_cur_type = TCG_TYPE_COUNT;
1601}
1602
1603static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
1604{
1605    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
1606    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
1607    int ret;
1608
1609    init_setting_vtype(s);
1610
1611    tcg_debug_assert((offset & 1) == 0);
1612    if (offset == sextreg(offset, 0, 20)) {
1613        /* short jump: -2097150 to 2097152 */
1614        tcg_out_opc_jump(s, OPC_JAL, link, offset);
1615    } else if (offset == (int32_t)offset) {
1616        /* long jump: -2147483646 to 2147483648 */
1617        tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
1618        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
1619        ret = reloc_call(s->code_ptr - 2, arg);
1620        tcg_debug_assert(ret == true);
1621    } else {
1622        /* far jump: 64-bit */
1623        tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
1624        tcg_target_long base = (tcg_target_long)arg - imm;
1625        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
1626        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
1627    }
1628}
1629
1630static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
1631                         const TCGHelperInfo *info)
1632{
1633    tcg_out_call_int(s, arg, false);
1634}
1635
1636static void tcg_out_mb(TCGContext *s, TCGArg a0)
1637{
1638    tcg_insn_unit insn = OPC_FENCE;
1639
1640    if (a0 & TCG_MO_LD_LD) {
1641        insn |= 0x02200000;
1642    }
1643    if (a0 & TCG_MO_ST_LD) {
1644        insn |= 0x01200000;
1645    }
1646    if (a0 & TCG_MO_LD_ST) {
1647        insn |= 0x02100000;
1648    }
1649    if (a0 & TCG_MO_ST_ST) {
1650        insn |= 0x01100000;
1651    }
1652    tcg_out32(s, insn);
1653}
1654
1655/*
1656 * Load/store and TLB
1657 */
1658
1659static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1660{
1661    tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1662    bool ok = reloc_jimm20(s->code_ptr - 1, target);
1663    tcg_debug_assert(ok);
1664}
1665
1666bool tcg_target_has_memory_bswap(MemOp memop)
1667{
1668    return false;
1669}
1670
1671/* We have three temps, we might as well expose them. */
1672static const TCGLdstHelperParam ldst_helper_param = {
1673    .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 }
1674};
1675
1676static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1677{
1678    MemOp opc = get_memop(l->oi);
1679
1680    /* resolve label address */
1681    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1682        return false;
1683    }
1684
1685    /* call load helper */
1686    tcg_out_ld_helper_args(s, l, &ldst_helper_param);
1687    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false);
1688    tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param);
1689
1690    tcg_out_goto(s, l->raddr);
1691    return true;
1692}
1693
1694static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1695{
1696    MemOp opc = get_memop(l->oi);
1697
1698    /* resolve label address */
1699    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1700        return false;
1701    }
1702
1703    /* call store helper */
1704    tcg_out_st_helper_args(s, l, &ldst_helper_param);
1705    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
1706
1707    tcg_out_goto(s, l->raddr);
1708    return true;
1709}
1710
1711/* We expect to use a 12-bit negative offset from ENV.  */
1712#define MIN_TLB_MASK_TABLE_OFS  -(1 << 11)
1713
1714/*
1715 * For system-mode, perform the TLB load and compare.
1716 * For user-mode, perform any required alignment tests.
1717 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1718 * is required and fill in @h with the host address for the fast path.
1719 */
1720static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
1721                                           TCGReg addr_reg, MemOpIdx oi,
1722                                           bool is_ld)
1723{
1724    TCGType addr_type = s->addr_type;
1725    TCGLabelQemuLdst *ldst = NULL;
1726    MemOp opc = get_memop(oi);
1727    TCGAtomAlign aa;
1728    unsigned a_mask;
1729
1730    aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1731    a_mask = (1u << aa.align) - 1;
1732
1733    if (tcg_use_softmmu) {
1734        unsigned s_bits = opc & MO_SIZE;
1735        unsigned s_mask = (1u << s_bits) - 1;
1736        int mem_index = get_mmuidx(oi);
1737        int fast_ofs = tlb_mask_table_ofs(s, mem_index);
1738        int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1739        int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1740        int compare_mask;
1741        TCGReg addr_adj;
1742
1743        ldst = new_ldst_label(s);
1744        ldst->is_ld = is_ld;
1745        ldst->oi = oi;
1746        ldst->addr_reg = addr_reg;
1747
1748        init_setting_vtype(s);
1749
1750        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1751        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1752
1753        tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
1754                        s->page_bits - CPU_TLB_ENTRY_BITS);
1755        tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1756        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1757
1758        /*
1759         * For aligned accesses, we check the first byte and include the
1760         * alignment bits within the address.  For unaligned access, we
1761         * check that we don't cross pages using the address of the last
1762         * byte of the access.
1763         */
1764        addr_adj = addr_reg;
1765        if (a_mask < s_mask) {
1766            addr_adj = TCG_REG_TMP0;
1767            tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
1768                            addr_adj, addr_reg, s_mask - a_mask);
1769        }
1770        compare_mask = s->page_mask | a_mask;
1771        if (compare_mask == sextreg(compare_mask, 0, 12)) {
1772            tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
1773        } else {
1774            tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
1775            tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
1776        }
1777
1778        /* Load the tlb comparator and the addend.  */
1779        QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1780        tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1781                   is_ld ? offsetof(CPUTLBEntry, addr_read)
1782                         : offsetof(CPUTLBEntry, addr_write));
1783        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1784                   offsetof(CPUTLBEntry, addend));
1785
1786        /* Compare masked address with the TLB entry. */
1787        ldst->label_ptr[0] = s->code_ptr;
1788        tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1789
1790        /* TLB Hit - translate address using addend.  */
1791        if (addr_type != TCG_TYPE_I32) {
1792            tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
1793        } else if (cpuinfo & CPUINFO_ZBA) {
1794            tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0,
1795                            addr_reg, TCG_REG_TMP2);
1796        } else {
1797            tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
1798            tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0,
1799                            TCG_REG_TMP0, TCG_REG_TMP2);
1800        }
1801        *pbase = TCG_REG_TMP0;
1802    } else {
1803        TCGReg base;
1804
1805        if (a_mask) {
1806            ldst = new_ldst_label(s);
1807            ldst->is_ld = is_ld;
1808            ldst->oi = oi;
1809            ldst->addr_reg = addr_reg;
1810
1811            init_setting_vtype(s);
1812
1813            /* We are expecting alignment max 7, so we can always use andi. */
1814            tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
1815            tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
1816
1817            ldst->label_ptr[0] = s->code_ptr;
1818            tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1819        }
1820
1821        if (guest_base != 0) {
1822            base = TCG_REG_TMP0;
1823            if (addr_type != TCG_TYPE_I32) {
1824                tcg_out_opc_reg(s, OPC_ADD, base, addr_reg,
1825                                TCG_GUEST_BASE_REG);
1826            } else if (cpuinfo & CPUINFO_ZBA) {
1827                tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg,
1828                                TCG_GUEST_BASE_REG);
1829            } else {
1830                tcg_out_ext32u(s, base, addr_reg);
1831                tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
1832            }
1833        } else if (addr_type != TCG_TYPE_I32) {
1834            base = addr_reg;
1835        } else {
1836            base = TCG_REG_TMP0;
1837            tcg_out_ext32u(s, base, addr_reg);
1838        }
1839        *pbase = base;
1840    }
1841
1842    return ldst;
1843}
1844
1845static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
1846                                   TCGReg base, MemOp opc, TCGType type)
1847{
1848    /* Byte swapping is left to middle-end expansion. */
1849    tcg_debug_assert((opc & MO_BSWAP) == 0);
1850
1851    switch (opc & (MO_SSIZE)) {
1852    case MO_UB:
1853        tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
1854        break;
1855    case MO_SB:
1856        tcg_out_opc_imm(s, OPC_LB, val, base, 0);
1857        break;
1858    case MO_UW:
1859        tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
1860        break;
1861    case MO_SW:
1862        tcg_out_opc_imm(s, OPC_LH, val, base, 0);
1863        break;
1864    case MO_UL:
1865        if (type == TCG_TYPE_I64) {
1866            tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
1867            break;
1868        }
1869        /* FALLTHRU */
1870    case MO_SL:
1871        tcg_out_opc_imm(s, OPC_LW, val, base, 0);
1872        break;
1873    case MO_UQ:
1874        tcg_out_opc_imm(s, OPC_LD, val, base, 0);
1875        break;
1876    default:
1877        g_assert_not_reached();
1878    }
1879}
1880
1881static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1882                            MemOpIdx oi, TCGType data_type)
1883{
1884    TCGLabelQemuLdst *ldst;
1885    TCGReg base;
1886
1887    ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
1888    tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
1889
1890    if (ldst) {
1891        ldst->type = data_type;
1892        ldst->datalo_reg = data_reg;
1893        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1894    }
1895}
1896
1897static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
1898                                   TCGReg base, MemOp opc)
1899{
1900    /* Byte swapping is left to middle-end expansion. */
1901    tcg_debug_assert((opc & MO_BSWAP) == 0);
1902
1903    switch (opc & (MO_SSIZE)) {
1904    case MO_8:
1905        tcg_out_opc_store(s, OPC_SB, base, val, 0);
1906        break;
1907    case MO_16:
1908        tcg_out_opc_store(s, OPC_SH, base, val, 0);
1909        break;
1910    case MO_32:
1911        tcg_out_opc_store(s, OPC_SW, base, val, 0);
1912        break;
1913    case MO_64:
1914        tcg_out_opc_store(s, OPC_SD, base, val, 0);
1915        break;
1916    default:
1917        g_assert_not_reached();
1918    }
1919}
1920
1921static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1922                            MemOpIdx oi, TCGType data_type)
1923{
1924    TCGLabelQemuLdst *ldst;
1925    TCGReg base;
1926
1927    ldst = prepare_host_addr(s, &base, addr_reg, oi, false);
1928    tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
1929
1930    if (ldst) {
1931        ldst->type = data_type;
1932        ldst->datalo_reg = data_reg;
1933        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1934    }
1935}
1936
1937static const tcg_insn_unit *tb_ret_addr;
1938
1939static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1940{
1941    /* Reuse the zeroing that exists for goto_ptr.  */
1942    if (a0 == 0) {
1943        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1944    } else {
1945        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1946        tcg_out_call_int(s, tb_ret_addr, true);
1947    }
1948}
1949
1950static void tcg_out_goto_tb(TCGContext *s, int which)
1951{
1952    /* Direct branch will be patched by tb_target_set_jmp_target. */
1953    set_jmp_insn_offset(s, which);
1954    tcg_out32(s, OPC_JAL);
1955
1956    /* When branch is out of range, fall through to indirect. */
1957    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
1958               get_jmp_target_addr(s, which));
1959    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1960    set_jmp_reset_offset(s, which);
1961}
1962
1963void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1964                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1965{
1966    uintptr_t addr = tb->jmp_target_addr[n];
1967    ptrdiff_t offset = addr - jmp_rx;
1968    tcg_insn_unit insn;
1969
1970    /* Either directly branch, or fall through to indirect branch. */
1971    if (offset == sextreg(offset, 0, 20)) {
1972        insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
1973    } else {
1974        insn = OPC_NOP;
1975    }
1976    qatomic_set((uint32_t *)jmp_rw, insn);
1977    flush_idcache_range(jmp_rx, jmp_rw, 4);
1978}
1979
1980
1981static void tgen_add(TCGContext *s, TCGType type,
1982                     TCGReg a0, TCGReg a1, TCGReg a2)
1983{
1984    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDW : OPC_ADD;
1985    tcg_out_opc_reg(s, insn, a0, a1, a2);
1986}
1987
1988static void tgen_addi(TCGContext *s, TCGType type,
1989                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1990{
1991    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI;
1992    tcg_out_opc_imm(s, insn, a0, a1, a2);
1993}
1994
1995static const TCGOutOpBinary outop_add = {
1996    .base.static_constraint = C_O1_I2(r, r, rI),
1997    .out_rrr = tgen_add,
1998    .out_rri = tgen_addi,
1999};
2000
2001static void tgen_and(TCGContext *s, TCGType type,
2002                     TCGReg a0, TCGReg a1, TCGReg a2)
2003{
2004    tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
2005}
2006
2007static void tgen_andi(TCGContext *s, TCGType type,
2008                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2009{
2010    tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
2011}
2012
2013static const TCGOutOpBinary outop_and = {
2014    .base.static_constraint = C_O1_I2(r, r, rI),
2015    .out_rrr = tgen_and,
2016    .out_rri = tgen_andi,
2017};
2018
2019static void tgen_andc(TCGContext *s, TCGType type,
2020                      TCGReg a0, TCGReg a1, TCGReg a2)
2021{
2022    tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
2023}
2024
2025static TCGConstraintSetIndex cset_zbb_rrr(TCGType type, unsigned flags)
2026{
2027    return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, r) : C_NotImplemented;
2028}
2029
2030static const TCGOutOpBinary outop_andc = {
2031    .base.static_constraint = C_Dynamic,
2032    .base.dynamic_constraint = cset_zbb_rrr,
2033    .out_rrr = tgen_andc,
2034};
2035
2036static void tgen_clz(TCGContext *s, TCGType type,
2037                     TCGReg a0, TCGReg a1, TCGReg a2)
2038{
2039    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ;
2040    tcg_out_cltz(s, type, insn, a0, a1, a2, false);
2041}
2042
2043static void tgen_clzi(TCGContext *s, TCGType type,
2044                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2045{
2046    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CLZW : OPC_CLZ;
2047    tcg_out_cltz(s, type, insn, a0, a1, a2, true);
2048}
2049
2050static TCGConstraintSetIndex cset_clzctz(TCGType type, unsigned flags)
2051{
2052    return cpuinfo & CPUINFO_ZBB ? C_N1_I2(r, r, rM) : C_NotImplemented;
2053}
2054
2055static const TCGOutOpBinary outop_clz = {
2056    .base.static_constraint = C_Dynamic,
2057    .base.dynamic_constraint = cset_clzctz,
2058    .out_rrr = tgen_clz,
2059    .out_rri = tgen_clzi,
2060};
2061
2062static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
2063{
2064    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CPOPW : OPC_CPOP;
2065    tcg_out_opc_imm(s, insn, a0, a1, 0);
2066}
2067
2068static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
2069{
2070    return cpuinfo & CPUINFO_ZBB ? C_O1_I1(r, r) : C_NotImplemented;
2071}
2072
2073static const TCGOutOpUnary outop_ctpop = {
2074    .base.static_constraint = C_Dynamic,
2075    .base.dynamic_constraint = cset_ctpop,
2076    .out_rr = tgen_ctpop,
2077};
2078
2079static void tgen_ctz(TCGContext *s, TCGType type,
2080                     TCGReg a0, TCGReg a1, TCGReg a2)
2081{
2082    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ;
2083    tcg_out_cltz(s, type, insn, a0, a1, a2, false);
2084}
2085
2086static void tgen_ctzi(TCGContext *s, TCGType type,
2087                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2088{
2089    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_CTZW : OPC_CTZ;
2090    tcg_out_cltz(s, type, insn, a0, a1, a2, true);
2091}
2092
2093static const TCGOutOpBinary outop_ctz = {
2094    .base.static_constraint = C_Dynamic,
2095    .base.dynamic_constraint = cset_clzctz,
2096    .out_rrr = tgen_ctz,
2097    .out_rri = tgen_ctzi,
2098};
2099
2100static void tgen_divs(TCGContext *s, TCGType type,
2101                      TCGReg a0, TCGReg a1, TCGReg a2)
2102{
2103    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVW : OPC_DIV;
2104    tcg_out_opc_reg(s, insn, a0, a1, a2);
2105}
2106
2107static const TCGOutOpBinary outop_divs = {
2108    .base.static_constraint = C_O1_I2(r, r, r),
2109    .out_rrr = tgen_divs,
2110};
2111
2112static const TCGOutOpDivRem outop_divs2 = {
2113    .base.static_constraint = C_NotImplemented,
2114};
2115
2116static void tgen_divu(TCGContext *s, TCGType type,
2117                      TCGReg a0, TCGReg a1, TCGReg a2)
2118{
2119    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_DIVUW : OPC_DIVU;
2120    tcg_out_opc_reg(s, insn, a0, a1, a2);
2121}
2122
2123static const TCGOutOpBinary outop_divu = {
2124    .base.static_constraint = C_O1_I2(r, r, r),
2125    .out_rrr = tgen_divu,
2126};
2127
2128static const TCGOutOpDivRem outop_divu2 = {
2129    .base.static_constraint = C_NotImplemented,
2130};
2131
2132static void tgen_eqv(TCGContext *s, TCGType type,
2133                     TCGReg a0, TCGReg a1, TCGReg a2)
2134{
2135    tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
2136}
2137
2138static const TCGOutOpBinary outop_eqv = {
2139    .base.static_constraint = C_Dynamic,
2140    .base.dynamic_constraint = cset_zbb_rrr,
2141    .out_rrr = tgen_eqv,
2142};
2143
2144static void tgen_mul(TCGContext *s, TCGType type,
2145                     TCGReg a0, TCGReg a1, TCGReg a2)
2146{
2147    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_MULW : OPC_MUL;
2148    tcg_out_opc_reg(s, insn, a0, a1, a2);
2149}
2150
2151static const TCGOutOpBinary outop_mul = {
2152    .base.static_constraint = C_O1_I2(r, r, r),
2153    .out_rrr = tgen_mul,
2154};
2155
2156static const TCGOutOpMul2 outop_muls2 = {
2157    .base.static_constraint = C_NotImplemented,
2158};
2159
2160static TCGConstraintSetIndex cset_mulh(TCGType type, unsigned flags)
2161{
2162    return type == TCG_TYPE_I32 ? C_NotImplemented : C_O1_I2(r, r, r);
2163}
2164
2165static void tgen_mulsh(TCGContext *s, TCGType type,
2166                       TCGReg a0, TCGReg a1, TCGReg a2)
2167{
2168    tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
2169}
2170
2171static const TCGOutOpBinary outop_mulsh = {
2172    .base.static_constraint = C_Dynamic,
2173    .base.dynamic_constraint = cset_mulh,
2174    .out_rrr = tgen_mulsh,
2175};
2176
2177static const TCGOutOpMul2 outop_mulu2 = {
2178    .base.static_constraint = C_NotImplemented,
2179};
2180
2181static void tgen_muluh(TCGContext *s, TCGType type,
2182                       TCGReg a0, TCGReg a1, TCGReg a2)
2183{
2184    tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
2185}
2186
2187static const TCGOutOpBinary outop_muluh = {
2188    .base.static_constraint = C_Dynamic,
2189    .base.dynamic_constraint = cset_mulh,
2190    .out_rrr = tgen_muluh,
2191};
2192
2193static const TCGOutOpBinary outop_nand = {
2194    .base.static_constraint = C_NotImplemented,
2195};
2196
2197static const TCGOutOpBinary outop_nor = {
2198    .base.static_constraint = C_NotImplemented,
2199};
2200
2201static void tgen_or(TCGContext *s, TCGType type,
2202                    TCGReg a0, TCGReg a1, TCGReg a2)
2203{
2204    tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
2205}
2206
2207static void tgen_ori(TCGContext *s, TCGType type,
2208                     TCGReg a0, TCGReg a1, tcg_target_long a2)
2209{
2210    tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
2211}
2212
2213static const TCGOutOpBinary outop_or = {
2214    .base.static_constraint = C_O1_I2(r, r, rI),
2215    .out_rrr = tgen_or,
2216    .out_rri = tgen_ori,
2217};
2218
2219static void tgen_orc(TCGContext *s, TCGType type,
2220                     TCGReg a0, TCGReg a1, TCGReg a2)
2221{
2222    tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
2223}
2224
2225static const TCGOutOpBinary outop_orc = {
2226    .base.static_constraint = C_Dynamic,
2227    .base.dynamic_constraint = cset_zbb_rrr,
2228    .out_rrr = tgen_orc,
2229};
2230
2231static void tgen_rems(TCGContext *s, TCGType type,
2232                      TCGReg a0, TCGReg a1, TCGReg a2)
2233{
2234    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMW : OPC_REM;
2235    tcg_out_opc_reg(s, insn, a0, a1, a2);
2236}
2237
2238static const TCGOutOpBinary outop_rems = {
2239    .base.static_constraint = C_O1_I2(r, r, r),
2240    .out_rrr = tgen_rems,
2241};
2242
2243static void tgen_remu(TCGContext *s, TCGType type,
2244                      TCGReg a0, TCGReg a1, TCGReg a2)
2245{
2246    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_REMUW : OPC_REMU;
2247    tcg_out_opc_reg(s, insn, a0, a1, a2);
2248}
2249
2250static const TCGOutOpBinary outop_remu = {
2251    .base.static_constraint = C_O1_I2(r, r, r),
2252    .out_rrr = tgen_remu,
2253};
2254
2255static TCGConstraintSetIndex cset_rot(TCGType type, unsigned flags)
2256{
2257    return cpuinfo & CPUINFO_ZBB ? C_O1_I2(r, r, ri) : C_NotImplemented;
2258}
2259
2260static void tgen_rotr(TCGContext *s, TCGType type,
2261                      TCGReg a0, TCGReg a1, TCGReg a2)
2262{
2263    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORW : OPC_ROR;
2264    tcg_out_opc_reg(s, insn, a0, a1, a2);
2265}
2266
2267static void tgen_rotri(TCGContext *s, TCGType type,
2268                       TCGReg a0, TCGReg a1, tcg_target_long a2)
2269{
2270    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_RORIW : OPC_RORI;
2271    unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
2272    tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
2273}
2274
2275static const TCGOutOpBinary outop_rotr = {
2276    .base.static_constraint = C_Dynamic,
2277    .base.dynamic_constraint = cset_rot,
2278    .out_rrr = tgen_rotr,
2279    .out_rri = tgen_rotri,
2280};
2281
2282static void tgen_rotl(TCGContext *s, TCGType type,
2283                      TCGReg a0, TCGReg a1, TCGReg a2)
2284{
2285    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_ROLW : OPC_ROL;
2286    tcg_out_opc_reg(s, insn, a0, a1, a2);
2287}
2288
2289static void tgen_rotli(TCGContext *s, TCGType type,
2290                       TCGReg a0, TCGReg a1, tcg_target_long a2)
2291{
2292    tgen_rotri(s, type, a0, a1, -a2);
2293}
2294
2295static const TCGOutOpBinary outop_rotl = {
2296    .base.static_constraint = C_Dynamic,
2297    .base.dynamic_constraint = cset_rot,
2298    .out_rrr = tgen_rotl,
2299    .out_rri = tgen_rotli,
2300};
2301
2302static void tgen_sar(TCGContext *s, TCGType type,
2303                     TCGReg a0, TCGReg a1, TCGReg a2)
2304{
2305    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAW : OPC_SRA;
2306    tcg_out_opc_reg(s, insn, a0, a1, a2);
2307}
2308
2309static void tgen_sari(TCGContext *s, TCGType type,
2310                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2311{
2312    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRAIW : OPC_SRAI;
2313    unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
2314    tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
2315}
2316
2317static const TCGOutOpBinary outop_sar = {
2318    .base.static_constraint = C_O1_I2(r, r, ri),
2319    .out_rrr = tgen_sar,
2320    .out_rri = tgen_sari,
2321};
2322
2323static void tgen_shl(TCGContext *s, TCGType type,
2324                     TCGReg a0, TCGReg a1, TCGReg a2)
2325{
2326    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLW : OPC_SLL;
2327    tcg_out_opc_reg(s, insn, a0, a1, a2);
2328}
2329
2330static void tgen_shli(TCGContext *s, TCGType type,
2331                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2332{
2333    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SLLIW : OPC_SLLI;
2334    unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
2335    tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
2336}
2337
2338static const TCGOutOpBinary outop_shl = {
2339    .base.static_constraint = C_O1_I2(r, r, ri),
2340    .out_rrr = tgen_shl,
2341    .out_rri = tgen_shli,
2342};
2343
2344static void tgen_shr(TCGContext *s, TCGType type,
2345                     TCGReg a0, TCGReg a1, TCGReg a2)
2346{
2347    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLW : OPC_SRL;
2348    tcg_out_opc_reg(s, insn, a0, a1, a2);
2349}
2350
2351static void tgen_shri(TCGContext *s, TCGType type,
2352                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2353{
2354    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SRLIW : OPC_SRLI;
2355    unsigned mask = type == TCG_TYPE_I32 ? 31 : 63;
2356    tcg_out_opc_imm(s, insn, a0, a1, a2 & mask);
2357}
2358
2359static const TCGOutOpBinary outop_shr = {
2360    .base.static_constraint = C_O1_I2(r, r, ri),
2361    .out_rrr = tgen_shr,
2362    .out_rri = tgen_shri,
2363};
2364
2365static void tgen_sub(TCGContext *s, TCGType type,
2366                     TCGReg a0, TCGReg a1, TCGReg a2)
2367{
2368    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SUBW : OPC_SUB;
2369    tcg_out_opc_reg(s, insn, a0, a1, a2);
2370}
2371
2372static const TCGOutOpSubtract outop_sub = {
2373    .base.static_constraint = C_O1_I2(r, r, r),
2374    .out_rrr = tgen_sub,
2375};
2376
2377static void tgen_xor(TCGContext *s, TCGType type,
2378                     TCGReg a0, TCGReg a1, TCGReg a2)
2379{
2380    tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
2381}
2382
2383static void tgen_xori(TCGContext *s, TCGType type,
2384                      TCGReg a0, TCGReg a1, tcg_target_long a2)
2385{
2386    tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
2387}
2388
2389static const TCGOutOpBinary outop_xor = {
2390    .base.static_constraint = C_O1_I2(r, r, rI),
2391    .out_rrr = tgen_xor,
2392    .out_rri = tgen_xori,
2393};
2394
2395static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
2396{
2397    tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
2398}
2399
2400static const TCGOutOpUnary outop_neg = {
2401    .base.static_constraint = C_O1_I1(r, r),
2402    .out_rr = tgen_neg,
2403};
2404
2405static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
2406{
2407    tgen_xori(s, type, a0, a1, -1);
2408}
2409
2410static const TCGOutOpUnary outop_not = {
2411    .base.static_constraint = C_O1_I1(r, r),
2412    .out_rr = tgen_not,
2413};
2414
2415
2416static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
2417                       const TCGArg args[TCG_MAX_OP_ARGS],
2418                       const int const_args[TCG_MAX_OP_ARGS])
2419{
2420    TCGArg a0 = args[0];
2421    TCGArg a1 = args[1];
2422    TCGArg a2 = args[2];
2423    int c2 = const_args[2];
2424
2425    switch (opc) {
2426    case INDEX_op_goto_ptr:
2427        tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
2428        break;
2429
2430    case INDEX_op_br:
2431        tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
2432        tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
2433        break;
2434
2435    case INDEX_op_ld8u_i32:
2436    case INDEX_op_ld8u_i64:
2437        tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
2438        break;
2439    case INDEX_op_ld8s_i32:
2440    case INDEX_op_ld8s_i64:
2441        tcg_out_ldst(s, OPC_LB, a0, a1, a2);
2442        break;
2443    case INDEX_op_ld16u_i32:
2444    case INDEX_op_ld16u_i64:
2445        tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
2446        break;
2447    case INDEX_op_ld16s_i32:
2448    case INDEX_op_ld16s_i64:
2449        tcg_out_ldst(s, OPC_LH, a0, a1, a2);
2450        break;
2451    case INDEX_op_ld32u_i64:
2452        tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
2453        break;
2454    case INDEX_op_ld_i32:
2455    case INDEX_op_ld32s_i64:
2456        tcg_out_ldst(s, OPC_LW, a0, a1, a2);
2457        break;
2458    case INDEX_op_ld_i64:
2459        tcg_out_ldst(s, OPC_LD, a0, a1, a2);
2460        break;
2461
2462    case INDEX_op_st8_i32:
2463    case INDEX_op_st8_i64:
2464        tcg_out_ldst(s, OPC_SB, a0, a1, a2);
2465        break;
2466    case INDEX_op_st16_i32:
2467    case INDEX_op_st16_i64:
2468        tcg_out_ldst(s, OPC_SH, a0, a1, a2);
2469        break;
2470    case INDEX_op_st_i32:
2471    case INDEX_op_st32_i64:
2472        tcg_out_ldst(s, OPC_SW, a0, a1, a2);
2473        break;
2474    case INDEX_op_st_i64:
2475        tcg_out_ldst(s, OPC_SD, a0, a1, a2);
2476        break;
2477
2478    case INDEX_op_bswap64_i64:
2479        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
2480        break;
2481    case INDEX_op_bswap32_i32:
2482        a2 = 0;
2483        /* fall through */
2484    case INDEX_op_bswap32_i64:
2485        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
2486        if (a2 & TCG_BSWAP_OZ) {
2487            tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
2488        } else {
2489            tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
2490        }
2491        break;
2492    case INDEX_op_bswap16_i64:
2493    case INDEX_op_bswap16_i32:
2494        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
2495        if (a2 & TCG_BSWAP_OZ) {
2496            tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
2497        } else {
2498            tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
2499        }
2500        break;
2501
2502    case INDEX_op_add2_i32:
2503        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2504                        const_args[4], const_args[5], false, true);
2505        break;
2506    case INDEX_op_add2_i64:
2507        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2508                        const_args[4], const_args[5], false, false);
2509        break;
2510    case INDEX_op_sub2_i32:
2511        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2512                        const_args[4], const_args[5], true, true);
2513        break;
2514    case INDEX_op_sub2_i64:
2515        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
2516                        const_args[4], const_args[5], true, false);
2517        break;
2518
2519    case INDEX_op_brcond_i32:
2520    case INDEX_op_brcond_i64:
2521        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
2522        break;
2523
2524    case INDEX_op_movcond_i32:
2525    case INDEX_op_movcond_i64:
2526        tcg_out_movcond(s, args[5], a0, a1, a2, c2,
2527                        args[3], const_args[3], args[4], const_args[4]);
2528        break;
2529
2530    case INDEX_op_qemu_ld_i32:
2531        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
2532        break;
2533    case INDEX_op_qemu_ld_i64:
2534        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
2535        break;
2536    case INDEX_op_qemu_st_i32:
2537        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
2538        break;
2539    case INDEX_op_qemu_st_i64:
2540        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
2541        break;
2542
2543    case INDEX_op_extrh_i64_i32:
2544        tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
2545        break;
2546
2547    case INDEX_op_mb:
2548        tcg_out_mb(s, a0);
2549        break;
2550
2551    case INDEX_op_extract_i64:
2552        if (a2 + args[3] == 32) {
2553            if (a2 == 0) {
2554                tcg_out_ext32u(s, a0, a1);
2555            } else {
2556                tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2);
2557            }
2558            break;
2559        }
2560        /* FALLTHRU */
2561    case INDEX_op_extract_i32:
2562        switch (args[3]) {
2563        case 1:
2564            tcg_out_opc_imm(s, OPC_BEXTI, a0, a1, a2);
2565            break;
2566        case 16:
2567            tcg_debug_assert(a2 == 0);
2568            tcg_out_ext16u(s, a0, a1);
2569            break;
2570        default:
2571            g_assert_not_reached();
2572        }
2573        break;
2574
2575    case INDEX_op_sextract_i64:
2576        if (a2 + args[3] == 32) {
2577            if (a2 == 0) {
2578                tcg_out_ext32s(s, a0, a1);
2579            } else {
2580                tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2);
2581            }
2582            break;
2583        }
2584        /* FALLTHRU */
2585    case INDEX_op_sextract_i32:
2586        if (a2 == 0 && args[3] == 8) {
2587            tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
2588        } else if (a2 == 0 && args[3] == 16) {
2589            tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
2590        } else {
2591            g_assert_not_reached();
2592        }
2593        break;
2594
2595    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2596    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2597    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2598    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
2599    case INDEX_op_extu_i32_i64:
2600    case INDEX_op_extrl_i64_i32:
2601    default:
2602        g_assert_not_reached();
2603    }
2604}
2605
2606static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2607                           unsigned vecl, unsigned vece,
2608                           const TCGArg args[TCG_MAX_OP_ARGS],
2609                           const int const_args[TCG_MAX_OP_ARGS])
2610{
2611    TCGType type = vecl + TCG_TYPE_V64;
2612    TCGArg a0, a1, a2;
2613    int c2;
2614
2615    a0 = args[0];
2616    a1 = args[1];
2617    a2 = args[2];
2618    c2 = const_args[2];
2619
2620    switch (opc) {
2621    case INDEX_op_dupm_vec:
2622        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2623        break;
2624    case INDEX_op_ld_vec:
2625        tcg_out_ld(s, type, a0, a1, a2);
2626        break;
2627    case INDEX_op_st_vec:
2628        tcg_out_st(s, type, a0, a1, a2);
2629        break;
2630    case INDEX_op_add_vec:
2631        set_vtype_len_sew(s, type, vece);
2632        tcg_out_opc_vv_vi(s, OPC_VADD_VV, OPC_VADD_VI, a0, a1, a2, c2);
2633        break;
2634    case INDEX_op_sub_vec:
2635        set_vtype_len_sew(s, type, vece);
2636        if (const_args[1]) {
2637            tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a2, a1);
2638        } else {
2639            tcg_out_opc_vv(s, OPC_VSUB_VV, a0, a1, a2);
2640        }
2641        break;
2642    case INDEX_op_and_vec:
2643        set_vtype_len(s, type);
2644        tcg_out_opc_vv_vi(s, OPC_VAND_VV, OPC_VAND_VI, a0, a1, a2, c2);
2645        break;
2646    case INDEX_op_or_vec:
2647        set_vtype_len(s, type);
2648        tcg_out_opc_vv_vi(s, OPC_VOR_VV, OPC_VOR_VI, a0, a1, a2, c2);
2649        break;
2650    case INDEX_op_xor_vec:
2651        set_vtype_len(s, type);
2652        tcg_out_opc_vv_vi(s, OPC_VXOR_VV, OPC_VXOR_VI, a0, a1, a2, c2);
2653        break;
2654    case INDEX_op_not_vec:
2655        set_vtype_len(s, type);
2656        tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1);
2657        break;
2658    case INDEX_op_neg_vec:
2659        set_vtype_len_sew(s, type, vece);
2660        tcg_out_opc_vi(s, OPC_VRSUB_VI, a0, a1, 0);
2661        break;
2662    case INDEX_op_mul_vec:
2663        set_vtype_len_sew(s, type, vece);
2664        tcg_out_opc_vv(s, OPC_VMUL_VV, a0, a1, a2);
2665        break;
2666    case INDEX_op_ssadd_vec:
2667        set_vtype_len_sew(s, type, vece);
2668        tcg_out_opc_vv_vi(s, OPC_VSADD_VV, OPC_VSADD_VI, a0, a1, a2, c2);
2669        break;
2670    case INDEX_op_sssub_vec:
2671        set_vtype_len_sew(s, type, vece);
2672        tcg_out_opc_vv_vi(s, OPC_VSSUB_VV, OPC_VSSUB_VI, a0, a1, a2, c2);
2673        break;
2674    case INDEX_op_usadd_vec:
2675        set_vtype_len_sew(s, type, vece);
2676        tcg_out_opc_vv_vi(s, OPC_VSADDU_VV, OPC_VSADDU_VI, a0, a1, a2, c2);
2677        break;
2678    case INDEX_op_ussub_vec:
2679        set_vtype_len_sew(s, type, vece);
2680        tcg_out_opc_vv_vi(s, OPC_VSSUBU_VV, OPC_VSSUBU_VI, a0, a1, a2, c2);
2681        break;
2682    case INDEX_op_smax_vec:
2683        set_vtype_len_sew(s, type, vece);
2684        tcg_out_opc_vv_vi(s, OPC_VMAX_VV, OPC_VMAX_VI, a0, a1, a2, c2);
2685        break;
2686    case INDEX_op_smin_vec:
2687        set_vtype_len_sew(s, type, vece);
2688        tcg_out_opc_vv_vi(s, OPC_VMIN_VV, OPC_VMIN_VI, a0, a1, a2, c2);
2689        break;
2690    case INDEX_op_umax_vec:
2691        set_vtype_len_sew(s, type, vece);
2692        tcg_out_opc_vv_vi(s, OPC_VMAXU_VV, OPC_VMAXU_VI, a0, a1, a2, c2);
2693        break;
2694    case INDEX_op_umin_vec:
2695        set_vtype_len_sew(s, type, vece);
2696        tcg_out_opc_vv_vi(s, OPC_VMINU_VV, OPC_VMINU_VI, a0, a1, a2, c2);
2697        break;
2698    case INDEX_op_shls_vec:
2699        set_vtype_len_sew(s, type, vece);
2700        tcg_out_opc_vx(s, OPC_VSLL_VX, a0, a1, a2);
2701        break;
2702    case INDEX_op_shrs_vec:
2703        set_vtype_len_sew(s, type, vece);
2704        tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, a2);
2705        break;
2706    case INDEX_op_sars_vec:
2707        set_vtype_len_sew(s, type, vece);
2708        tcg_out_opc_vx(s, OPC_VSRA_VX, a0, a1, a2);
2709        break;
2710    case INDEX_op_shlv_vec:
2711        set_vtype_len_sew(s, type, vece);
2712        tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2);
2713        break;
2714    case INDEX_op_shrv_vec:
2715        set_vtype_len_sew(s, type, vece);
2716        tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2);
2717        break;
2718    case INDEX_op_sarv_vec:
2719        set_vtype_len_sew(s, type, vece);
2720        tcg_out_opc_vv(s, OPC_VSRA_VV, a0, a1, a2);
2721        break;
2722    case INDEX_op_shli_vec:
2723        set_vtype_len_sew(s, type, vece);
2724        tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, a0, a1, a2);
2725        break;
2726    case INDEX_op_shri_vec:
2727        set_vtype_len_sew(s, type, vece);
2728        tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1, a2);
2729        break;
2730    case INDEX_op_sari_vec:
2731        set_vtype_len_sew(s, type, vece);
2732        tcg_out_vshifti(s, OPC_VSRA_VI, OPC_VSRA_VX, a0, a1, a2);
2733        break;
2734    case INDEX_op_rotli_vec:
2735        set_vtype_len_sew(s, type, vece);
2736        tcg_out_vshifti(s, OPC_VSLL_VI, OPC_VSLL_VX, TCG_REG_V0, a1, a2);
2737        tcg_out_vshifti(s, OPC_VSRL_VI, OPC_VSRL_VX, a0, a1,
2738                        -a2 & ((8 << vece) - 1));
2739        tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
2740        break;
2741    case INDEX_op_rotls_vec:
2742        set_vtype_len_sew(s, type, vece);
2743        tcg_out_opc_vx(s, OPC_VSLL_VX, TCG_REG_V0, a1, a2);
2744        tcg_out_opc_reg(s, OPC_SUBW, TCG_REG_TMP0, TCG_REG_ZERO, a2);
2745        tcg_out_opc_vx(s, OPC_VSRL_VX, a0, a1, TCG_REG_TMP0);
2746        tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
2747        break;
2748    case INDEX_op_rotlv_vec:
2749        set_vtype_len_sew(s, type, vece);
2750        tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0);
2751        tcg_out_opc_vv(s, OPC_VSRL_VV, TCG_REG_V0, a1, TCG_REG_V0);
2752        tcg_out_opc_vv(s, OPC_VSLL_VV, a0, a1, a2);
2753        tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
2754        break;
2755    case INDEX_op_rotrv_vec:
2756        set_vtype_len_sew(s, type, vece);
2757        tcg_out_opc_vi(s, OPC_VRSUB_VI, TCG_REG_V0, a2, 0);
2758        tcg_out_opc_vv(s, OPC_VSLL_VV, TCG_REG_V0, a1, TCG_REG_V0);
2759        tcg_out_opc_vv(s, OPC_VSRL_VV, a0, a1, a2);
2760        tcg_out_opc_vv(s, OPC_VOR_VV, a0, a0, TCG_REG_V0);
2761        break;
2762    case INDEX_op_cmp_vec:
2763        tcg_out_cmpsel(s, type, vece, args[3], a0, a1, a2, c2,
2764                       -1, true, 0, true);
2765        break;
2766    case INDEX_op_cmpsel_vec:
2767        tcg_out_cmpsel(s, type, vece, args[5], a0, a1, a2, c2,
2768                       args[3], const_args[3], args[4], const_args[4]);
2769        break;
2770    case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov.  */
2771    case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec.  */
2772    default:
2773        g_assert_not_reached();
2774    }
2775}
2776
2777void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2778                       TCGArg a0, ...)
2779{
2780    g_assert_not_reached();
2781}
2782
2783int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2784{
2785    switch (opc) {
2786    case INDEX_op_add_vec:
2787    case INDEX_op_sub_vec:
2788    case INDEX_op_and_vec:
2789    case INDEX_op_or_vec:
2790    case INDEX_op_xor_vec:
2791    case INDEX_op_not_vec:
2792    case INDEX_op_neg_vec:
2793    case INDEX_op_mul_vec:
2794    case INDEX_op_ssadd_vec:
2795    case INDEX_op_sssub_vec:
2796    case INDEX_op_usadd_vec:
2797    case INDEX_op_ussub_vec:
2798    case INDEX_op_smax_vec:
2799    case INDEX_op_smin_vec:
2800    case INDEX_op_umax_vec:
2801    case INDEX_op_umin_vec:
2802    case INDEX_op_shls_vec:
2803    case INDEX_op_shrs_vec:
2804    case INDEX_op_sars_vec:
2805    case INDEX_op_shlv_vec:
2806    case INDEX_op_shrv_vec:
2807    case INDEX_op_sarv_vec:
2808    case INDEX_op_shri_vec:
2809    case INDEX_op_shli_vec:
2810    case INDEX_op_sari_vec:
2811    case INDEX_op_rotls_vec:
2812    case INDEX_op_rotlv_vec:
2813    case INDEX_op_rotrv_vec:
2814    case INDEX_op_rotli_vec:
2815    case INDEX_op_cmp_vec:
2816    case INDEX_op_cmpsel_vec:
2817        return 1;
2818    default:
2819        return 0;
2820    }
2821}
2822
2823static TCGConstraintSetIndex
2824tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
2825{
2826    switch (op) {
2827    case INDEX_op_goto_ptr:
2828        return C_O0_I1(r);
2829
2830    case INDEX_op_ld8u_i32:
2831    case INDEX_op_ld8s_i32:
2832    case INDEX_op_ld16u_i32:
2833    case INDEX_op_ld16s_i32:
2834    case INDEX_op_ld_i32:
2835    case INDEX_op_ld8u_i64:
2836    case INDEX_op_ld8s_i64:
2837    case INDEX_op_ld16u_i64:
2838    case INDEX_op_ld16s_i64:
2839    case INDEX_op_ld32s_i64:
2840    case INDEX_op_ld32u_i64:
2841    case INDEX_op_ld_i64:
2842    case INDEX_op_extu_i32_i64:
2843    case INDEX_op_extrl_i64_i32:
2844    case INDEX_op_extrh_i64_i32:
2845    case INDEX_op_ext_i32_i64:
2846    case INDEX_op_extract_i32:
2847    case INDEX_op_extract_i64:
2848    case INDEX_op_sextract_i32:
2849    case INDEX_op_sextract_i64:
2850    case INDEX_op_bswap16_i32:
2851    case INDEX_op_bswap32_i32:
2852    case INDEX_op_bswap16_i64:
2853    case INDEX_op_bswap32_i64:
2854    case INDEX_op_bswap64_i64:
2855        return C_O1_I1(r, r);
2856
2857    case INDEX_op_st8_i32:
2858    case INDEX_op_st16_i32:
2859    case INDEX_op_st_i32:
2860    case INDEX_op_st8_i64:
2861    case INDEX_op_st16_i64:
2862    case INDEX_op_st32_i64:
2863    case INDEX_op_st_i64:
2864        return C_O0_I2(rz, r);
2865
2866    case INDEX_op_brcond_i32:
2867    case INDEX_op_brcond_i64:
2868        return C_O0_I2(rz, rz);
2869
2870    case INDEX_op_movcond_i32:
2871    case INDEX_op_movcond_i64:
2872        return C_O1_I4(r, r, rI, rM, rM);
2873
2874    case INDEX_op_add2_i32:
2875    case INDEX_op_add2_i64:
2876    case INDEX_op_sub2_i32:
2877    case INDEX_op_sub2_i64:
2878        return C_O2_I4(r, r, rz, rz, rM, rM);
2879
2880    case INDEX_op_qemu_ld_i32:
2881    case INDEX_op_qemu_ld_i64:
2882        return C_O1_I1(r, r);
2883    case INDEX_op_qemu_st_i32:
2884    case INDEX_op_qemu_st_i64:
2885        return C_O0_I2(rz, r);
2886
2887    case INDEX_op_st_vec:
2888        return C_O0_I2(v, r);
2889    case INDEX_op_dup_vec:
2890    case INDEX_op_dupm_vec:
2891    case INDEX_op_ld_vec:
2892        return C_O1_I1(v, r);
2893    case INDEX_op_neg_vec:
2894    case INDEX_op_not_vec:
2895    case INDEX_op_shli_vec:
2896    case INDEX_op_shri_vec:
2897    case INDEX_op_sari_vec:
2898    case INDEX_op_rotli_vec:
2899        return C_O1_I1(v, v);
2900    case INDEX_op_add_vec:
2901    case INDEX_op_and_vec:
2902    case INDEX_op_or_vec:
2903    case INDEX_op_xor_vec:
2904    case INDEX_op_ssadd_vec:
2905    case INDEX_op_sssub_vec:
2906    case INDEX_op_usadd_vec:
2907    case INDEX_op_ussub_vec:
2908    case INDEX_op_smax_vec:
2909    case INDEX_op_smin_vec:
2910    case INDEX_op_umax_vec:
2911    case INDEX_op_umin_vec:
2912        return C_O1_I2(v, v, vK);
2913    case INDEX_op_sub_vec:
2914        return C_O1_I2(v, vK, v);
2915    case INDEX_op_mul_vec:
2916    case INDEX_op_shlv_vec:
2917    case INDEX_op_shrv_vec:
2918    case INDEX_op_sarv_vec:
2919    case INDEX_op_rotlv_vec:
2920    case INDEX_op_rotrv_vec:
2921        return C_O1_I2(v, v, v);
2922    case INDEX_op_shls_vec:
2923    case INDEX_op_shrs_vec:
2924    case INDEX_op_sars_vec:
2925    case INDEX_op_rotls_vec:
2926        return C_O1_I2(v, v, r);
2927    case INDEX_op_cmp_vec:
2928        return C_O1_I2(v, v, vL);
2929    case INDEX_op_cmpsel_vec:
2930        return C_O1_I4(v, v, vL, vK, vK);
2931    default:
2932        return C_NotImplemented;
2933    }
2934}
2935
2936static const int tcg_target_callee_save_regs[] = {
2937    TCG_REG_S0,       /* used for the global env (TCG_AREG0) */
2938    TCG_REG_S1,
2939    TCG_REG_S2,
2940    TCG_REG_S3,
2941    TCG_REG_S4,
2942    TCG_REG_S5,
2943    TCG_REG_S6,
2944    TCG_REG_S7,
2945    TCG_REG_S8,
2946    TCG_REG_S9,
2947    TCG_REG_S10,
2948    TCG_REG_S11,
2949    TCG_REG_RA,       /* should be last for ABI compliance */
2950};
2951
2952/* Stack frame parameters.  */
2953#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
2954#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2955#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2956#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2957                     + TCG_TARGET_STACK_ALIGN - 1) \
2958                    & -TCG_TARGET_STACK_ALIGN)
2959#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2960
2961/* We're expecting to be able to use an immediate for frame allocation.  */
2962QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2963
2964/* Generate global QEMU prologue and epilogue code */
2965static void tcg_target_qemu_prologue(TCGContext *s)
2966{
2967    int i;
2968
2969    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2970
2971    /* TB prologue */
2972    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2973    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2974        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2975                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2976    }
2977
2978    if (!tcg_use_softmmu && guest_base) {
2979        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2980        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2981    }
2982
2983    /* Call generated code */
2984    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2985    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2986
2987    /* Return path for goto_ptr. Set return value to 0 */
2988    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2989    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2990
2991    /* TB epilogue */
2992    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2993    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2994        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2995                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2996    }
2997
2998    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2999    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
3000}
3001
3002static void tcg_out_tb_start(TCGContext *s)
3003{
3004    init_setting_vtype(s);
3005}
3006
3007static bool vtype_check(unsigned vtype)
3008{
3009    unsigned long tmp;
3010
3011    /* vsetvl tmp, zero, vtype */
3012    asm(".insn r 0x57, 7, 0x40, %0, zero, %1" : "=r"(tmp) : "r"(vtype));
3013    return tmp != 0;
3014}
3015
3016static void probe_frac_lmul_1(TCGType type, MemOp vsew)
3017{
3018    VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew];
3019    unsigned avl = tcg_type_size(type) >> vsew;
3020    int lmul = type - riscv_lg2_vlenb;
3021    unsigned vtype = encode_vtype(true, true, vsew, lmul & 7);
3022    bool lmul_eq_avl = true;
3023
3024    /* Guaranteed by Zve64x. */
3025    assert(lmul < 3);
3026
3027    /*
3028     * For LMUL < -3, the host vector size is so large that TYPE
3029     * is smaller than the minimum 1/8 fraction.
3030     *
3031     * For other fractional LMUL settings, implementations must
3032     * support SEW settings between SEW_MIN and LMUL * ELEN, inclusive.
3033     * So if ELEN = 64, LMUL = 1/2, then SEW will support e8, e16, e32,
3034     * but e64 may not be supported. In other words, the hardware only
3035     * guarantees SEW_MIN <= SEW <= LMUL * ELEN.  Check.
3036     */
3037    if (lmul < 0 && (lmul < -3 || !vtype_check(vtype))) {
3038        vtype = encode_vtype(true, true, vsew, VLMUL_M1);
3039        lmul_eq_avl = false;
3040    }
3041
3042    if (avl < 32) {
3043        p->vset_insn = encode_vseti(OPC_VSETIVLI, TCG_REG_ZERO, avl, vtype);
3044    } else if (lmul_eq_avl) {
3045        /* rd != 0 and rs1 == 0 uses vlmax */
3046        p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_TMP0, TCG_REG_ZERO, vtype);
3047    } else {
3048        p->movi_insn = encode_i(OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, avl);
3049        p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_ZERO, TCG_REG_TMP0, vtype);
3050    }
3051}
3052
3053static void probe_frac_lmul(void)
3054{
3055    /* Match riscv_lg2_vlenb to TCG_TYPE_V64. */
3056    QEMU_BUILD_BUG_ON(TCG_TYPE_V64 != 3);
3057
3058    for (TCGType t = TCG_TYPE_V64; t <= TCG_TYPE_V256; t++) {
3059        for (MemOp e = MO_8; e <= MO_64; e++) {
3060            probe_frac_lmul_1(t, e);
3061        }
3062    }
3063}
3064
3065static void tcg_target_init(TCGContext *s)
3066{
3067    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3068    tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3069
3070    tcg_target_call_clobber_regs = -1;
3071    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
3072    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
3073    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
3074    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
3075    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
3076    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
3077    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
3078    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
3079    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
3080    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
3081    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10);
3082    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11);
3083
3084    s->reserved_regs = 0;
3085    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
3086    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
3087    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
3088    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
3089    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
3090    tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
3091    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
3092
3093    if (cpuinfo & CPUINFO_ZVE64X) {
3094        switch (riscv_lg2_vlenb) {
3095        case TCG_TYPE_V64:
3096            tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
3097            tcg_target_available_regs[TCG_TYPE_V128] = ALL_DVECTOR_REG_GROUPS;
3098            tcg_target_available_regs[TCG_TYPE_V256] = ALL_QVECTOR_REG_GROUPS;
3099            s->reserved_regs |= (~ALL_QVECTOR_REG_GROUPS & ALL_VECTOR_REGS);
3100            break;
3101        case TCG_TYPE_V128:
3102            tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
3103            tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
3104            tcg_target_available_regs[TCG_TYPE_V256] = ALL_DVECTOR_REG_GROUPS;
3105            s->reserved_regs |= (~ALL_DVECTOR_REG_GROUPS & ALL_VECTOR_REGS);
3106            break;
3107        default:
3108            /* Guaranteed by Zve64x. */
3109            tcg_debug_assert(riscv_lg2_vlenb >= TCG_TYPE_V256);
3110            tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
3111            tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
3112            tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
3113            break;
3114        }
3115        tcg_regset_set_reg(s->reserved_regs, TCG_REG_V0);
3116        probe_frac_lmul();
3117    }
3118}
3119
3120typedef struct {
3121    DebugFrameHeader h;
3122    uint8_t fde_def_cfa[4];
3123    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
3124} DebugFrame;
3125
3126#define ELF_HOST_MACHINE EM_RISCV
3127
3128static const DebugFrame debug_frame = {
3129    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
3130    .h.cie.id = -1,
3131    .h.cie.version = 1,
3132    .h.cie.code_align = 1,
3133    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
3134    .h.cie.return_column = TCG_REG_RA,
3135
3136    /* Total FDE size does not include the "len" member.  */
3137    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3138
3139    .fde_def_cfa = {
3140        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ... */
3141        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3142        (FRAME_SIZE >> 7)
3143    },
3144    .fde_reg_ofs = {
3145        0x80 + 9,  12,                  /* DW_CFA_offset, s1,  -96 */
3146        0x80 + 18, 11,                  /* DW_CFA_offset, s2,  -88 */
3147        0x80 + 19, 10,                  /* DW_CFA_offset, s3,  -80 */
3148        0x80 + 20, 9,                   /* DW_CFA_offset, s4,  -72 */
3149        0x80 + 21, 8,                   /* DW_CFA_offset, s5,  -64 */
3150        0x80 + 22, 7,                   /* DW_CFA_offset, s6,  -56 */
3151        0x80 + 23, 6,                   /* DW_CFA_offset, s7,  -48 */
3152        0x80 + 24, 5,                   /* DW_CFA_offset, s8,  -40 */
3153        0x80 + 25, 4,                   /* DW_CFA_offset, s9,  -32 */
3154        0x80 + 26, 3,                   /* DW_CFA_offset, s10, -24 */
3155        0x80 + 27, 2,                   /* DW_CFA_offset, s11, -16 */
3156        0x80 + 1 , 1,                   /* DW_CFA_offset, ra,  -8 */
3157    }
3158};
3159
3160void tcg_register_jit(const void *buf, size_t buf_size)
3161{
3162    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3163}
3164