xref: /openbmc/qemu/tcg/riscv/tcg-target.c.inc (revision ffe98631)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
8 *
9 * Based on i386/tcg-target.c and mips/tcg-target.c
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
28 */
29
30#include "../tcg-ldst.c.inc"
31#include "../tcg-pool.c.inc"
32
33#ifdef CONFIG_DEBUG_TCG
34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
35    "zero",
36    "ra",
37    "sp",
38    "gp",
39    "tp",
40    "t0",
41    "t1",
42    "t2",
43    "s0",
44    "s1",
45    "a0",
46    "a1",
47    "a2",
48    "a3",
49    "a4",
50    "a5",
51    "a6",
52    "a7",
53    "s2",
54    "s3",
55    "s4",
56    "s5",
57    "s6",
58    "s7",
59    "s8",
60    "s9",
61    "s10",
62    "s11",
63    "t3",
64    "t4",
65    "t5",
66    "t6"
67};
68#endif
69
70static const int tcg_target_reg_alloc_order[] = {
71    /* Call saved registers */
72    /* TCG_REG_S0 reservered for TCG_AREG0 */
73    TCG_REG_S1,
74    TCG_REG_S2,
75    TCG_REG_S3,
76    TCG_REG_S4,
77    TCG_REG_S5,
78    TCG_REG_S6,
79    TCG_REG_S7,
80    TCG_REG_S8,
81    TCG_REG_S9,
82    TCG_REG_S10,
83    TCG_REG_S11,
84
85    /* Call clobbered registers */
86    TCG_REG_T0,
87    TCG_REG_T1,
88    TCG_REG_T2,
89    TCG_REG_T3,
90    TCG_REG_T4,
91    TCG_REG_T5,
92    TCG_REG_T6,
93
94    /* Argument registers */
95    TCG_REG_A0,
96    TCG_REG_A1,
97    TCG_REG_A2,
98    TCG_REG_A3,
99    TCG_REG_A4,
100    TCG_REG_A5,
101    TCG_REG_A6,
102    TCG_REG_A7,
103};
104
105static const int tcg_target_call_iarg_regs[] = {
106    TCG_REG_A0,
107    TCG_REG_A1,
108    TCG_REG_A2,
109    TCG_REG_A3,
110    TCG_REG_A4,
111    TCG_REG_A5,
112    TCG_REG_A6,
113    TCG_REG_A7,
114};
115
116static const int tcg_target_call_oarg_regs[] = {
117    TCG_REG_A0,
118    TCG_REG_A1,
119};
120
121#define TCG_CT_CONST_ZERO  0x100
122#define TCG_CT_CONST_S12   0x200
123#define TCG_CT_CONST_N12   0x400
124#define TCG_CT_CONST_M12   0x800
125
126#define ALL_GENERAL_REGS      MAKE_64BIT_MASK(0, 32)
127/*
128 * For softmmu, we need to avoid conflicts with the first 5
129 * argument registers to call the helper.  Some of these are
130 * also used for the tlb lookup.
131 */
132#ifdef CONFIG_SOFTMMU
133#define SOFTMMU_RESERVE_REGS  MAKE_64BIT_MASK(TCG_REG_A0, 5)
134#else
135#define SOFTMMU_RESERVE_REGS  0
136#endif
137
138
139static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
140{
141    if (TCG_TARGET_REG_BITS == 32) {
142        return sextract32(val, pos, len);
143    } else {
144        return sextract64(val, pos, len);
145    }
146}
147
148/* test if a constant matches the constraint */
149static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
150{
151    if (ct & TCG_CT_CONST) {
152        return 1;
153    }
154    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
155        return 1;
156    }
157    /*
158     * Sign extended from 12 bits: [-0x800, 0x7ff].
159     * Used for most arithmetic, as this is the isa field.
160     */
161    if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
162        return 1;
163    }
164    /*
165     * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
166     * Used for subtraction, where a constant must be handled by ADDI.
167     */
168    if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
169        return 1;
170    }
171    /*
172     * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
173     * Used by addsub2, which may need the negative operation,
174     * and requires the modified constant to be representable.
175     */
176    if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
177        return 1;
178    }
179    return 0;
180}
181
182/*
183 * RISC-V Base ISA opcodes (IM)
184 */
185
186typedef enum {
187    OPC_ADD = 0x33,
188    OPC_ADDI = 0x13,
189    OPC_AND = 0x7033,
190    OPC_ANDI = 0x7013,
191    OPC_AUIPC = 0x17,
192    OPC_BEQ = 0x63,
193    OPC_BGE = 0x5063,
194    OPC_BGEU = 0x7063,
195    OPC_BLT = 0x4063,
196    OPC_BLTU = 0x6063,
197    OPC_BNE = 0x1063,
198    OPC_DIV = 0x2004033,
199    OPC_DIVU = 0x2005033,
200    OPC_JAL = 0x6f,
201    OPC_JALR = 0x67,
202    OPC_LB = 0x3,
203    OPC_LBU = 0x4003,
204    OPC_LD = 0x3003,
205    OPC_LH = 0x1003,
206    OPC_LHU = 0x5003,
207    OPC_LUI = 0x37,
208    OPC_LW = 0x2003,
209    OPC_LWU = 0x6003,
210    OPC_MUL = 0x2000033,
211    OPC_MULH = 0x2001033,
212    OPC_MULHSU = 0x2002033,
213    OPC_MULHU = 0x2003033,
214    OPC_OR = 0x6033,
215    OPC_ORI = 0x6013,
216    OPC_REM = 0x2006033,
217    OPC_REMU = 0x2007033,
218    OPC_SB = 0x23,
219    OPC_SD = 0x3023,
220    OPC_SH = 0x1023,
221    OPC_SLL = 0x1033,
222    OPC_SLLI = 0x1013,
223    OPC_SLT = 0x2033,
224    OPC_SLTI = 0x2013,
225    OPC_SLTIU = 0x3013,
226    OPC_SLTU = 0x3033,
227    OPC_SRA = 0x40005033,
228    OPC_SRAI = 0x40005013,
229    OPC_SRL = 0x5033,
230    OPC_SRLI = 0x5013,
231    OPC_SUB = 0x40000033,
232    OPC_SW = 0x2023,
233    OPC_XOR = 0x4033,
234    OPC_XORI = 0x4013,
235
236#if TCG_TARGET_REG_BITS == 64
237    OPC_ADDIW = 0x1b,
238    OPC_ADDW = 0x3b,
239    OPC_DIVUW = 0x200503b,
240    OPC_DIVW = 0x200403b,
241    OPC_MULW = 0x200003b,
242    OPC_REMUW = 0x200703b,
243    OPC_REMW = 0x200603b,
244    OPC_SLLIW = 0x101b,
245    OPC_SLLW = 0x103b,
246    OPC_SRAIW = 0x4000501b,
247    OPC_SRAW = 0x4000503b,
248    OPC_SRLIW = 0x501b,
249    OPC_SRLW = 0x503b,
250    OPC_SUBW = 0x4000003b,
251#else
252    /* Simplify code throughout by defining aliases for RV32.  */
253    OPC_ADDIW = OPC_ADDI,
254    OPC_ADDW = OPC_ADD,
255    OPC_DIVUW = OPC_DIVU,
256    OPC_DIVW = OPC_DIV,
257    OPC_MULW = OPC_MUL,
258    OPC_REMUW = OPC_REMU,
259    OPC_REMW = OPC_REM,
260    OPC_SLLIW = OPC_SLLI,
261    OPC_SLLW = OPC_SLL,
262    OPC_SRAIW = OPC_SRAI,
263    OPC_SRAW = OPC_SRA,
264    OPC_SRLIW = OPC_SRLI,
265    OPC_SRLW = OPC_SRL,
266    OPC_SUBW = OPC_SUB,
267#endif
268
269    OPC_FENCE = 0x0000000f,
270    OPC_NOP   = OPC_ADDI,   /* nop = addi r0,r0,0 */
271} RISCVInsn;
272
273/*
274 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
275 */
276
277/* Type-R */
278
279static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2)
280{
281    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20;
282}
283
284/* Type-I */
285
286static int32_t encode_imm12(uint32_t imm)
287{
288    return (imm & 0xfff) << 20;
289}
290
291static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm)
292{
293    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm);
294}
295
296/* Type-S */
297
298static int32_t encode_simm12(uint32_t imm)
299{
300    int32_t ret = 0;
301
302    ret |= (imm & 0xFE0) << 20;
303    ret |= (imm & 0x1F) << 7;
304
305    return ret;
306}
307
308static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
309{
310    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm);
311}
312
313/* Type-SB */
314
315static int32_t encode_sbimm12(uint32_t imm)
316{
317    int32_t ret = 0;
318
319    ret |= (imm & 0x1000) << 19;
320    ret |= (imm & 0x7e0) << 20;
321    ret |= (imm & 0x1e) << 7;
322    ret |= (imm & 0x800) >> 4;
323
324    return ret;
325}
326
327static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
328{
329    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm);
330}
331
332/* Type-U */
333
334static int32_t encode_uimm20(uint32_t imm)
335{
336    return imm & 0xfffff000;
337}
338
339static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm)
340{
341    return opc | (rd & 0x1f) << 7 | encode_uimm20(imm);
342}
343
344/* Type-UJ */
345
346static int32_t encode_ujimm20(uint32_t imm)
347{
348    int32_t ret = 0;
349
350    ret |= (imm & 0x0007fe) << (21 - 1);
351    ret |= (imm & 0x000800) << (20 - 11);
352    ret |= (imm & 0x0ff000) << (12 - 12);
353    ret |= (imm & 0x100000) << (31 - 20);
354
355    return ret;
356}
357
358static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
359{
360    return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
361}
362
363/*
364 * RISC-V instruction emitters
365 */
366
367static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc,
368                            TCGReg rd, TCGReg rs1, TCGReg rs2)
369{
370    tcg_out32(s, encode_r(opc, rd, rs1, rs2));
371}
372
373static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc,
374                            TCGReg rd, TCGReg rs1, TCGArg imm)
375{
376    tcg_out32(s, encode_i(opc, rd, rs1, imm));
377}
378
379static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc,
380                              TCGReg rs1, TCGReg rs2, uint32_t imm)
381{
382    tcg_out32(s, encode_s(opc, rs1, rs2, imm));
383}
384
385static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc,
386                               TCGReg rs1, TCGReg rs2, uint32_t imm)
387{
388    tcg_out32(s, encode_sb(opc, rs1, rs2, imm));
389}
390
391static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc,
392                              TCGReg rd, uint32_t imm)
393{
394    tcg_out32(s, encode_u(opc, rd, imm));
395}
396
397static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc,
398                             TCGReg rd, uint32_t imm)
399{
400    tcg_out32(s, encode_uj(opc, rd, imm));
401}
402
403static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
404{
405    int i;
406    for (i = 0; i < count; ++i) {
407        p[i] = OPC_NOP;
408    }
409}
410
411/*
412 * Relocations
413 */
414
415static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
416{
417    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
418    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
419
420    tcg_debug_assert((offset & 1) == 0);
421    if (offset == sextreg(offset, 0, 12)) {
422        *src_rw |= encode_sbimm12(offset);
423        return true;
424    }
425
426    return false;
427}
428
429static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
430{
431    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
432    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
433
434    tcg_debug_assert((offset & 1) == 0);
435    if (offset == sextreg(offset, 0, 20)) {
436        *src_rw |= encode_ujimm20(offset);
437        return true;
438    }
439
440    return false;
441}
442
443static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
444{
445    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
446    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
447    int32_t lo = sextreg(offset, 0, 12);
448    int32_t hi = offset - lo;
449
450    if (offset == hi + lo) {
451        src_rw[0] |= encode_uimm20(hi);
452        src_rw[1] |= encode_imm12(lo);
453        return true;
454    }
455
456    return false;
457}
458
459static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
460                        intptr_t value, intptr_t addend)
461{
462    tcg_debug_assert(addend == 0);
463    switch (type) {
464    case R_RISCV_BRANCH:
465        return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
466    case R_RISCV_JAL:
467        return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
468    case R_RISCV_CALL:
469        return reloc_call(code_ptr, (tcg_insn_unit *)value);
470    default:
471        g_assert_not_reached();
472    }
473}
474
475/*
476 * TCG intrinsics
477 */
478
479static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
480{
481    if (ret == arg) {
482        return true;
483    }
484    switch (type) {
485    case TCG_TYPE_I32:
486    case TCG_TYPE_I64:
487        tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
488        break;
489    default:
490        g_assert_not_reached();
491    }
492    return true;
493}
494
495static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
496                         tcg_target_long val)
497{
498    tcg_target_long lo, hi, tmp;
499    int shift, ret;
500
501    if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
502        val = (int32_t)val;
503    }
504
505    lo = sextreg(val, 0, 12);
506    if (val == lo) {
507        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo);
508        return;
509    }
510
511    hi = val - lo;
512    if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) {
513        tcg_out_opc_upper(s, OPC_LUI, rd, hi);
514        if (lo != 0) {
515            tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
516        }
517        return;
518    }
519
520    /* We can only be here if TCG_TARGET_REG_BITS != 32 */
521    tmp = tcg_pcrel_diff(s, (void *)val);
522    if (tmp == (int32_t)tmp) {
523        tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
524        tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
525        ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
526        tcg_debug_assert(ret == true);
527        return;
528    }
529
530    /* Look for a single 20-bit section.  */
531    shift = ctz64(val);
532    tmp = val >> shift;
533    if (tmp == sextreg(tmp, 0, 20)) {
534        tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12);
535        if (shift > 12) {
536            tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12);
537        } else {
538            tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift);
539        }
540        return;
541    }
542
543    /* Look for a few high zero bits, with lots of bits set in the middle.  */
544    shift = clz64(val);
545    tmp = val << shift;
546    if (tmp == sextreg(tmp, 12, 20) << 12) {
547        tcg_out_opc_upper(s, OPC_LUI, rd, tmp);
548        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
549        return;
550    } else if (tmp == sextreg(tmp, 0, 12)) {
551        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp);
552        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
553        return;
554    }
555
556    /* Drop into the constant pool.  */
557    new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0);
558    tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
559    tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
560}
561
562static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
563{
564    tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff);
565}
566
567static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
568{
569    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
570    tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
571}
572
573static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
574{
575    tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
576    tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
577}
578
579static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
580{
581    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
582    tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
583}
584
585static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
586{
587    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
588    tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
589}
590
591static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
592{
593    tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
594}
595
596static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
597                         TCGReg addr, intptr_t offset)
598{
599    intptr_t imm12 = sextreg(offset, 0, 12);
600
601    if (offset != imm12) {
602        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
603
604        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
605            imm12 = sextreg(diff, 0, 12);
606            tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12);
607        } else {
608            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
609            if (addr != TCG_REG_ZERO) {
610                tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr);
611            }
612        }
613        addr = TCG_REG_TMP2;
614    }
615
616    switch (opc) {
617    case OPC_SB:
618    case OPC_SH:
619    case OPC_SW:
620    case OPC_SD:
621        tcg_out_opc_store(s, opc, addr, data, imm12);
622        break;
623    case OPC_LB:
624    case OPC_LBU:
625    case OPC_LH:
626    case OPC_LHU:
627    case OPC_LW:
628    case OPC_LWU:
629    case OPC_LD:
630        tcg_out_opc_imm(s, opc, data, addr, imm12);
631        break;
632    default:
633        g_assert_not_reached();
634    }
635}
636
637static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
638                       TCGReg arg1, intptr_t arg2)
639{
640    bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32);
641    tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2);
642}
643
644static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
645                       TCGReg arg1, intptr_t arg2)
646{
647    bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32);
648    tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2);
649}
650
651static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
652                        TCGReg base, intptr_t ofs)
653{
654    if (val == 0) {
655        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
656        return true;
657    }
658    return false;
659}
660
661static void tcg_out_addsub2(TCGContext *s,
662                            TCGReg rl, TCGReg rh,
663                            TCGReg al, TCGReg ah,
664                            TCGArg bl, TCGArg bh,
665                            bool cbl, bool cbh, bool is_sub, bool is32bit)
666{
667    const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
668    const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
669    const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
670    TCGReg th = TCG_REG_TMP1;
671
672    /* If we have a negative constant such that negating it would
673       make the high part zero, we can (usually) eliminate one insn.  */
674    if (cbl && cbh && bh == -1 && bl != 0) {
675        bl = -bl;
676        bh = 0;
677        is_sub = !is_sub;
678    }
679
680    /* By operating on the high part first, we get to use the final
681       carry operation to move back from the temporary.  */
682    if (!cbh) {
683        tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
684    } else if (bh != 0 || ah == rl) {
685        tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
686    } else {
687        th = ah;
688    }
689
690    /* Note that tcg optimization should eliminate the bl == 0 case.  */
691    if (is_sub) {
692        if (cbl) {
693            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
694            tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
695        } else {
696            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
697            tcg_out_opc_reg(s, opc_sub, rl, al, bl);
698        }
699        tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
700    } else {
701        if (cbl) {
702            tcg_out_opc_imm(s, opc_addi, rl, al, bl);
703            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
704        } else if (al == bl) {
705            /*
706             * If the input regs overlap, this is a simple doubling
707             * and carry-out is the input msb.  This special case is
708             * required when the output reg overlaps the input,
709             * but we might as well use it always.
710             */
711            tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
712            tcg_out_opc_reg(s, opc_add, rl, al, al);
713        } else {
714            tcg_out_opc_reg(s, opc_add, rl, al, bl);
715            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
716                            rl, (rl == bl ? al : bl));
717        }
718        tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
719    }
720}
721
722static const struct {
723    RISCVInsn op;
724    bool swap;
725} tcg_brcond_to_riscv[] = {
726    [TCG_COND_EQ] =  { OPC_BEQ,  false },
727    [TCG_COND_NE] =  { OPC_BNE,  false },
728    [TCG_COND_LT] =  { OPC_BLT,  false },
729    [TCG_COND_GE] =  { OPC_BGE,  false },
730    [TCG_COND_LE] =  { OPC_BGE,  true  },
731    [TCG_COND_GT] =  { OPC_BLT,  true  },
732    [TCG_COND_LTU] = { OPC_BLTU, false },
733    [TCG_COND_GEU] = { OPC_BGEU, false },
734    [TCG_COND_LEU] = { OPC_BGEU, true  },
735    [TCG_COND_GTU] = { OPC_BLTU, true  }
736};
737
738static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
739                           TCGReg arg2, TCGLabel *l)
740{
741    RISCVInsn op = tcg_brcond_to_riscv[cond].op;
742
743    tcg_debug_assert(op != 0);
744
745    if (tcg_brcond_to_riscv[cond].swap) {
746        TCGReg t = arg1;
747        arg1 = arg2;
748        arg2 = t;
749    }
750
751    tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
752    tcg_out_opc_branch(s, op, arg1, arg2, 0);
753}
754
755static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
756                            TCGReg arg1, TCGReg arg2)
757{
758    switch (cond) {
759    case TCG_COND_EQ:
760        tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2);
761        tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1);
762        break;
763    case TCG_COND_NE:
764        tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2);
765        tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret);
766        break;
767    case TCG_COND_LT:
768        tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
769        break;
770    case TCG_COND_GE:
771        tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
772        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
773        break;
774    case TCG_COND_LE:
775        tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
776        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
777        break;
778    case TCG_COND_GT:
779        tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
780        break;
781    case TCG_COND_LTU:
782        tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
783        break;
784    case TCG_COND_GEU:
785        tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
786        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
787        break;
788    case TCG_COND_LEU:
789        tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
790        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
791        break;
792    case TCG_COND_GTU:
793        tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
794        break;
795    default:
796         g_assert_not_reached();
797         break;
798     }
799}
800
801static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
802                            TCGReg bl, TCGReg bh, TCGLabel *l)
803{
804    /* todo */
805    g_assert_not_reached();
806}
807
808static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
809                             TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
810{
811    /* todo */
812    g_assert_not_reached();
813}
814
815static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
816{
817    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
818    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
819    int ret;
820
821    tcg_debug_assert((offset & 1) == 0);
822    if (offset == sextreg(offset, 0, 20)) {
823        /* short jump: -2097150 to 2097152 */
824        tcg_out_opc_jump(s, OPC_JAL, link, offset);
825    } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) {
826        /* long jump: -2147483646 to 2147483648 */
827        tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
828        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
829        ret = reloc_call(s->code_ptr - 2, arg);
830        tcg_debug_assert(ret == true);
831    } else if (TCG_TARGET_REG_BITS == 64) {
832        /* far jump: 64-bit */
833        tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
834        tcg_target_long base = (tcg_target_long)arg - imm;
835        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
836        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
837    } else {
838        g_assert_not_reached();
839    }
840}
841
842static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
843                         const TCGHelperInfo *info)
844{
845    tcg_out_call_int(s, arg, false);
846}
847
848static void tcg_out_mb(TCGContext *s, TCGArg a0)
849{
850    tcg_insn_unit insn = OPC_FENCE;
851
852    if (a0 & TCG_MO_LD_LD) {
853        insn |= 0x02200000;
854    }
855    if (a0 & TCG_MO_ST_LD) {
856        insn |= 0x01200000;
857    }
858    if (a0 & TCG_MO_LD_ST) {
859        insn |= 0x02100000;
860    }
861    if (a0 & TCG_MO_ST_ST) {
862        insn |= 0x02200000;
863    }
864    tcg_out32(s, insn);
865}
866
867/*
868 * Load/store and TLB
869 */
870
871#if defined(CONFIG_SOFTMMU)
872/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
873 *                                     MemOpIdx oi, uintptr_t ra)
874 */
875static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
876    [MO_UB] = helper_ret_ldub_mmu,
877    [MO_SB] = helper_ret_ldsb_mmu,
878#if HOST_BIG_ENDIAN
879    [MO_UW] = helper_be_lduw_mmu,
880    [MO_SW] = helper_be_ldsw_mmu,
881    [MO_UL] = helper_be_ldul_mmu,
882#if TCG_TARGET_REG_BITS == 64
883    [MO_SL] = helper_be_ldsl_mmu,
884#endif
885    [MO_UQ] = helper_be_ldq_mmu,
886#else
887    [MO_UW] = helper_le_lduw_mmu,
888    [MO_SW] = helper_le_ldsw_mmu,
889    [MO_UL] = helper_le_ldul_mmu,
890#if TCG_TARGET_REG_BITS == 64
891    [MO_SL] = helper_le_ldsl_mmu,
892#endif
893    [MO_UQ] = helper_le_ldq_mmu,
894#endif
895};
896
897/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
898 *                                     uintxx_t val, MemOpIdx oi,
899 *                                     uintptr_t ra)
900 */
901static void * const qemu_st_helpers[MO_SIZE + 1] = {
902    [MO_8]   = helper_ret_stb_mmu,
903#if HOST_BIG_ENDIAN
904    [MO_16] = helper_be_stw_mmu,
905    [MO_32] = helper_be_stl_mmu,
906    [MO_64] = helper_be_stq_mmu,
907#else
908    [MO_16] = helper_le_stw_mmu,
909    [MO_32] = helper_le_stl_mmu,
910    [MO_64] = helper_le_stq_mmu,
911#endif
912};
913
914/* We don't support oversize guests */
915QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS);
916
917/* We expect to use a 12-bit negative offset from ENV.  */
918QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
919QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
920
921static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
922{
923    tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
924    bool ok = reloc_jimm20(s->code_ptr - 1, target);
925    tcg_debug_assert(ok);
926}
927
928static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
929                               TCGReg addrh, MemOpIdx oi,
930                               tcg_insn_unit **label_ptr, bool is_load)
931{
932    MemOp opc = get_memop(oi);
933    unsigned s_bits = opc & MO_SIZE;
934    unsigned a_bits = get_alignment_bits(opc);
935    tcg_target_long compare_mask;
936    int mem_index = get_mmuidx(oi);
937    int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
938    int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
939    int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
940    TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
941
942    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
943    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
944
945    tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl,
946                    TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
947    tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
948    tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
949
950    /* Load the tlb comparator and the addend.  */
951    tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
952               is_load ? offsetof(CPUTLBEntry, addr_read)
953               : offsetof(CPUTLBEntry, addr_write));
954    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
955               offsetof(CPUTLBEntry, addend));
956
957    /* We don't support unaligned accesses. */
958    if (a_bits < s_bits) {
959        a_bits = s_bits;
960    }
961    /* Clear the non-page, non-alignment bits from the address.  */
962    compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
963    if (compare_mask == sextreg(compare_mask, 0, 12)) {
964        tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask);
965    } else {
966        tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
967        tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
968    }
969
970    /* Compare masked address with the TLB entry. */
971    label_ptr[0] = s->code_ptr;
972    tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
973
974    /* TLB Hit - translate address using addend.  */
975    if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
976        tcg_out_ext32u(s, TCG_REG_TMP0, addrl);
977        addrl = TCG_REG_TMP0;
978    }
979    tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
980    return TCG_REG_TMP0;
981}
982
983static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
984                                TCGType ext,
985                                TCGReg datalo, TCGReg datahi,
986                                TCGReg addrlo, TCGReg addrhi,
987                                void *raddr, tcg_insn_unit **label_ptr)
988{
989    TCGLabelQemuLdst *label = new_ldst_label(s);
990
991    label->is_ld = is_ld;
992    label->oi = oi;
993    label->type = ext;
994    label->datalo_reg = datalo;
995    label->datahi_reg = datahi;
996    label->addrlo_reg = addrlo;
997    label->addrhi_reg = addrhi;
998    label->raddr = tcg_splitwx_to_rx(raddr);
999    label->label_ptr[0] = label_ptr[0];
1000}
1001
1002static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1003{
1004    MemOpIdx oi = l->oi;
1005    MemOp opc = get_memop(oi);
1006    TCGReg a0 = tcg_target_call_iarg_regs[0];
1007    TCGReg a1 = tcg_target_call_iarg_regs[1];
1008    TCGReg a2 = tcg_target_call_iarg_regs[2];
1009    TCGReg a3 = tcg_target_call_iarg_regs[3];
1010
1011    /* We don't support oversize guests */
1012    if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1013        g_assert_not_reached();
1014    }
1015
1016    /* resolve label address */
1017    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1018        return false;
1019    }
1020
1021    /* call load helper */
1022    tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
1023    tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg);
1024    tcg_out_movi(s, TCG_TYPE_PTR, a2, oi);
1025    tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr);
1026
1027    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false);
1028    tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0);
1029
1030    tcg_out_goto(s, l->raddr);
1031    return true;
1032}
1033
1034static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1035{
1036    MemOpIdx oi = l->oi;
1037    MemOp opc = get_memop(oi);
1038    MemOp s_bits = opc & MO_SIZE;
1039    TCGReg a0 = tcg_target_call_iarg_regs[0];
1040    TCGReg a1 = tcg_target_call_iarg_regs[1];
1041    TCGReg a2 = tcg_target_call_iarg_regs[2];
1042    TCGReg a3 = tcg_target_call_iarg_regs[3];
1043    TCGReg a4 = tcg_target_call_iarg_regs[4];
1044
1045    /* We don't support oversize guests */
1046    if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1047        g_assert_not_reached();
1048    }
1049
1050    /* resolve label address */
1051    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1052        return false;
1053    }
1054
1055    /* call store helper */
1056    tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
1057    tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg);
1058    tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg);
1059    switch (s_bits) {
1060    case MO_8:
1061        tcg_out_ext8u(s, a2, a2);
1062        break;
1063    case MO_16:
1064        tcg_out_ext16u(s, a2, a2);
1065        break;
1066    default:
1067        break;
1068    }
1069    tcg_out_movi(s, TCG_TYPE_PTR, a3, oi);
1070    tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr);
1071
1072    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
1073
1074    tcg_out_goto(s, l->raddr);
1075    return true;
1076}
1077#else
1078
1079static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
1080                                   unsigned a_bits)
1081{
1082    unsigned a_mask = (1 << a_bits) - 1;
1083    TCGLabelQemuLdst *l = new_ldst_label(s);
1084
1085    l->is_ld = is_ld;
1086    l->addrlo_reg = addr_reg;
1087
1088    /* We are expecting a_bits to max out at 7, so we can always use andi. */
1089    tcg_debug_assert(a_bits < 12);
1090    tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
1091
1092    l->label_ptr[0] = s->code_ptr;
1093    tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1094
1095    l->raddr = tcg_splitwx_to_rx(s->code_ptr);
1096}
1097
1098static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
1099{
1100    /* resolve label address */
1101    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1102        return false;
1103    }
1104
1105    tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
1106    tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
1107
1108    /* tail call, with the return address back inline. */
1109    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
1110    tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
1111                                       : helper_unaligned_st), true);
1112    return true;
1113}
1114
1115static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1116{
1117    return tcg_out_fail_alignment(s, l);
1118}
1119
1120static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1121{
1122    return tcg_out_fail_alignment(s, l);
1123}
1124
1125#endif /* CONFIG_SOFTMMU */
1126
1127static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
1128                                   TCGReg base, MemOp opc, bool is_64)
1129{
1130    /* Byte swapping is left to middle-end expansion. */
1131    tcg_debug_assert((opc & MO_BSWAP) == 0);
1132
1133    switch (opc & (MO_SSIZE)) {
1134    case MO_UB:
1135        tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
1136        break;
1137    case MO_SB:
1138        tcg_out_opc_imm(s, OPC_LB, lo, base, 0);
1139        break;
1140    case MO_UW:
1141        tcg_out_opc_imm(s, OPC_LHU, lo, base, 0);
1142        break;
1143    case MO_SW:
1144        tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
1145        break;
1146    case MO_UL:
1147        if (TCG_TARGET_REG_BITS == 64 && is_64) {
1148            tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
1149            break;
1150        }
1151        /* FALLTHRU */
1152    case MO_SL:
1153        tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1154        break;
1155    case MO_UQ:
1156        /* Prefer to load from offset 0 first, but allow for overlap.  */
1157        if (TCG_TARGET_REG_BITS == 64) {
1158            tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
1159        } else if (lo != base) {
1160            tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1161            tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
1162        } else {
1163            tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
1164            tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1165        }
1166        break;
1167    default:
1168        g_assert_not_reached();
1169    }
1170}
1171
1172static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1173{
1174    TCGReg addr_regl, addr_regh __attribute__((unused));
1175    TCGReg data_regl, data_regh;
1176    MemOpIdx oi;
1177    MemOp opc;
1178#if defined(CONFIG_SOFTMMU)
1179    tcg_insn_unit *label_ptr[1];
1180#else
1181    unsigned a_bits;
1182#endif
1183    TCGReg base;
1184
1185    data_regl = *args++;
1186    data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1187    addr_regl = *args++;
1188    addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1189    oi = *args++;
1190    opc = get_memop(oi);
1191
1192#if defined(CONFIG_SOFTMMU)
1193    base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1);
1194    tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1195    add_qemu_ldst_label(s, 1, oi,
1196                        (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1197                        data_regl, data_regh, addr_regl, addr_regh,
1198                        s->code_ptr, label_ptr);
1199#else
1200    a_bits = get_alignment_bits(opc);
1201    if (a_bits) {
1202        tcg_out_test_alignment(s, true, addr_regl, a_bits);
1203    }
1204    base = addr_regl;
1205    if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1206        tcg_out_ext32u(s, TCG_REG_TMP0, base);
1207        base = TCG_REG_TMP0;
1208    }
1209    if (guest_base != 0) {
1210        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
1211        base = TCG_REG_TMP0;
1212    }
1213    tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1214#endif
1215}
1216
1217static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
1218                                   TCGReg base, MemOp opc)
1219{
1220    /* Byte swapping is left to middle-end expansion. */
1221    tcg_debug_assert((opc & MO_BSWAP) == 0);
1222
1223    switch (opc & (MO_SSIZE)) {
1224    case MO_8:
1225        tcg_out_opc_store(s, OPC_SB, base, lo, 0);
1226        break;
1227    case MO_16:
1228        tcg_out_opc_store(s, OPC_SH, base, lo, 0);
1229        break;
1230    case MO_32:
1231        tcg_out_opc_store(s, OPC_SW, base, lo, 0);
1232        break;
1233    case MO_64:
1234        if (TCG_TARGET_REG_BITS == 64) {
1235            tcg_out_opc_store(s, OPC_SD, base, lo, 0);
1236        } else {
1237            tcg_out_opc_store(s, OPC_SW, base, lo, 0);
1238            tcg_out_opc_store(s, OPC_SW, base, hi, 4);
1239        }
1240        break;
1241    default:
1242        g_assert_not_reached();
1243    }
1244}
1245
1246static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1247{
1248    TCGReg addr_regl, addr_regh __attribute__((unused));
1249    TCGReg data_regl, data_regh;
1250    MemOpIdx oi;
1251    MemOp opc;
1252#if defined(CONFIG_SOFTMMU)
1253    tcg_insn_unit *label_ptr[1];
1254#else
1255    unsigned a_bits;
1256#endif
1257    TCGReg base;
1258
1259    data_regl = *args++;
1260    data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1261    addr_regl = *args++;
1262    addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1263    oi = *args++;
1264    opc = get_memop(oi);
1265
1266#if defined(CONFIG_SOFTMMU)
1267    base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0);
1268    tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1269    add_qemu_ldst_label(s, 0, oi,
1270                        (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1271                        data_regl, data_regh, addr_regl, addr_regh,
1272                        s->code_ptr, label_ptr);
1273#else
1274    a_bits = get_alignment_bits(opc);
1275    if (a_bits) {
1276        tcg_out_test_alignment(s, false, addr_regl, a_bits);
1277    }
1278    base = addr_regl;
1279    if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1280        tcg_out_ext32u(s, TCG_REG_TMP0, base);
1281        base = TCG_REG_TMP0;
1282    }
1283    if (guest_base != 0) {
1284        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
1285        base = TCG_REG_TMP0;
1286    }
1287    tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1288#endif
1289}
1290
1291static const tcg_insn_unit *tb_ret_addr;
1292
1293static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1294{
1295    /* Reuse the zeroing that exists for goto_ptr.  */
1296    if (a0 == 0) {
1297        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1298    } else {
1299        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1300        tcg_out_call_int(s, tb_ret_addr, true);
1301    }
1302}
1303
1304static void tcg_out_goto_tb(TCGContext *s, int which)
1305{
1306    /* Direct branch will be patched by tb_target_set_jmp_target. */
1307    set_jmp_insn_offset(s, which);
1308    tcg_out32(s, OPC_JAL);
1309
1310    /* When branch is out of range, fall through to indirect. */
1311    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
1312               get_jmp_target_addr(s, which));
1313    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1314    set_jmp_reset_offset(s, which);
1315}
1316
1317void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1318                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1319{
1320    uintptr_t addr = tb->jmp_target_addr[n];
1321    ptrdiff_t offset = addr - jmp_rx;
1322    tcg_insn_unit insn;
1323
1324    /* Either directly branch, or fall through to indirect branch. */
1325    if (offset == sextreg(offset, 0, 20)) {
1326        insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
1327    } else {
1328        insn = OPC_NOP;
1329    }
1330    qatomic_set((uint32_t *)jmp_rw, insn);
1331    flush_idcache_range(jmp_rx, jmp_rw, 4);
1332}
1333
1334static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1335                       const TCGArg args[TCG_MAX_OP_ARGS],
1336                       const int const_args[TCG_MAX_OP_ARGS])
1337{
1338    TCGArg a0 = args[0];
1339    TCGArg a1 = args[1];
1340    TCGArg a2 = args[2];
1341    int c2 = const_args[2];
1342
1343    switch (opc) {
1344    case INDEX_op_goto_ptr:
1345        tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
1346        break;
1347
1348    case INDEX_op_br:
1349        tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
1350        tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1351        break;
1352
1353    case INDEX_op_ld8u_i32:
1354    case INDEX_op_ld8u_i64:
1355        tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
1356        break;
1357    case INDEX_op_ld8s_i32:
1358    case INDEX_op_ld8s_i64:
1359        tcg_out_ldst(s, OPC_LB, a0, a1, a2);
1360        break;
1361    case INDEX_op_ld16u_i32:
1362    case INDEX_op_ld16u_i64:
1363        tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
1364        break;
1365    case INDEX_op_ld16s_i32:
1366    case INDEX_op_ld16s_i64:
1367        tcg_out_ldst(s, OPC_LH, a0, a1, a2);
1368        break;
1369    case INDEX_op_ld32u_i64:
1370        tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
1371        break;
1372    case INDEX_op_ld_i32:
1373    case INDEX_op_ld32s_i64:
1374        tcg_out_ldst(s, OPC_LW, a0, a1, a2);
1375        break;
1376    case INDEX_op_ld_i64:
1377        tcg_out_ldst(s, OPC_LD, a0, a1, a2);
1378        break;
1379
1380    case INDEX_op_st8_i32:
1381    case INDEX_op_st8_i64:
1382        tcg_out_ldst(s, OPC_SB, a0, a1, a2);
1383        break;
1384    case INDEX_op_st16_i32:
1385    case INDEX_op_st16_i64:
1386        tcg_out_ldst(s, OPC_SH, a0, a1, a2);
1387        break;
1388    case INDEX_op_st_i32:
1389    case INDEX_op_st32_i64:
1390        tcg_out_ldst(s, OPC_SW, a0, a1, a2);
1391        break;
1392    case INDEX_op_st_i64:
1393        tcg_out_ldst(s, OPC_SD, a0, a1, a2);
1394        break;
1395
1396    case INDEX_op_add_i32:
1397        if (c2) {
1398            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
1399        } else {
1400            tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
1401        }
1402        break;
1403    case INDEX_op_add_i64:
1404        if (c2) {
1405            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
1406        } else {
1407            tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
1408        }
1409        break;
1410
1411    case INDEX_op_sub_i32:
1412        if (c2) {
1413            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
1414        } else {
1415            tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
1416        }
1417        break;
1418    case INDEX_op_sub_i64:
1419        if (c2) {
1420            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
1421        } else {
1422            tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
1423        }
1424        break;
1425
1426    case INDEX_op_and_i32:
1427    case INDEX_op_and_i64:
1428        if (c2) {
1429            tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
1430        } else {
1431            tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
1432        }
1433        break;
1434
1435    case INDEX_op_or_i32:
1436    case INDEX_op_or_i64:
1437        if (c2) {
1438            tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
1439        } else {
1440            tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
1441        }
1442        break;
1443
1444    case INDEX_op_xor_i32:
1445    case INDEX_op_xor_i64:
1446        if (c2) {
1447            tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
1448        } else {
1449            tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
1450        }
1451        break;
1452
1453    case INDEX_op_not_i32:
1454    case INDEX_op_not_i64:
1455        tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
1456        break;
1457
1458    case INDEX_op_neg_i32:
1459        tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
1460        break;
1461    case INDEX_op_neg_i64:
1462        tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
1463        break;
1464
1465    case INDEX_op_mul_i32:
1466        tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
1467        break;
1468    case INDEX_op_mul_i64:
1469        tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1470        break;
1471
1472    case INDEX_op_div_i32:
1473        tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
1474        break;
1475    case INDEX_op_div_i64:
1476        tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
1477        break;
1478
1479    case INDEX_op_divu_i32:
1480        tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
1481        break;
1482    case INDEX_op_divu_i64:
1483        tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
1484        break;
1485
1486    case INDEX_op_rem_i32:
1487        tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
1488        break;
1489    case INDEX_op_rem_i64:
1490        tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
1491        break;
1492
1493    case INDEX_op_remu_i32:
1494        tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
1495        break;
1496    case INDEX_op_remu_i64:
1497        tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
1498        break;
1499
1500    case INDEX_op_shl_i32:
1501        if (c2) {
1502            tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
1503        } else {
1504            tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
1505        }
1506        break;
1507    case INDEX_op_shl_i64:
1508        if (c2) {
1509            tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
1510        } else {
1511            tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
1512        }
1513        break;
1514
1515    case INDEX_op_shr_i32:
1516        if (c2) {
1517            tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
1518        } else {
1519            tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
1520        }
1521        break;
1522    case INDEX_op_shr_i64:
1523        if (c2) {
1524            tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
1525        } else {
1526            tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
1527        }
1528        break;
1529
1530    case INDEX_op_sar_i32:
1531        if (c2) {
1532            tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
1533        } else {
1534            tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
1535        }
1536        break;
1537    case INDEX_op_sar_i64:
1538        if (c2) {
1539            tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
1540        } else {
1541            tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
1542        }
1543        break;
1544
1545    case INDEX_op_add2_i32:
1546        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1547                        const_args[4], const_args[5], false, true);
1548        break;
1549    case INDEX_op_add2_i64:
1550        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1551                        const_args[4], const_args[5], false, false);
1552        break;
1553    case INDEX_op_sub2_i32:
1554        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1555                        const_args[4], const_args[5], true, true);
1556        break;
1557    case INDEX_op_sub2_i64:
1558        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1559                        const_args[4], const_args[5], true, false);
1560        break;
1561
1562    case INDEX_op_brcond_i32:
1563    case INDEX_op_brcond_i64:
1564        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1565        break;
1566    case INDEX_op_brcond2_i32:
1567        tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
1568        break;
1569
1570    case INDEX_op_setcond_i32:
1571    case INDEX_op_setcond_i64:
1572        tcg_out_setcond(s, args[3], a0, a1, a2);
1573        break;
1574    case INDEX_op_setcond2_i32:
1575        tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
1576        break;
1577
1578    case INDEX_op_qemu_ld_i32:
1579        tcg_out_qemu_ld(s, args, false);
1580        break;
1581    case INDEX_op_qemu_ld_i64:
1582        tcg_out_qemu_ld(s, args, true);
1583        break;
1584    case INDEX_op_qemu_st_i32:
1585        tcg_out_qemu_st(s, args, false);
1586        break;
1587    case INDEX_op_qemu_st_i64:
1588        tcg_out_qemu_st(s, args, true);
1589        break;
1590
1591    case INDEX_op_ext8u_i32:
1592    case INDEX_op_ext8u_i64:
1593        tcg_out_ext8u(s, a0, a1);
1594        break;
1595
1596    case INDEX_op_ext16u_i32:
1597    case INDEX_op_ext16u_i64:
1598        tcg_out_ext16u(s, a0, a1);
1599        break;
1600
1601    case INDEX_op_ext32u_i64:
1602    case INDEX_op_extu_i32_i64:
1603        tcg_out_ext32u(s, a0, a1);
1604        break;
1605
1606    case INDEX_op_ext8s_i32:
1607    case INDEX_op_ext8s_i64:
1608        tcg_out_ext8s(s, a0, a1);
1609        break;
1610
1611    case INDEX_op_ext16s_i32:
1612    case INDEX_op_ext16s_i64:
1613        tcg_out_ext16s(s, a0, a1);
1614        break;
1615
1616    case INDEX_op_ext32s_i64:
1617    case INDEX_op_extrl_i64_i32:
1618    case INDEX_op_ext_i32_i64:
1619        tcg_out_ext32s(s, a0, a1);
1620        break;
1621
1622    case INDEX_op_extrh_i64_i32:
1623        tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
1624        break;
1625
1626    case INDEX_op_mulsh_i32:
1627    case INDEX_op_mulsh_i64:
1628        tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
1629        break;
1630
1631    case INDEX_op_muluh_i32:
1632    case INDEX_op_muluh_i64:
1633        tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
1634        break;
1635
1636    case INDEX_op_mb:
1637        tcg_out_mb(s, a0);
1638        break;
1639
1640    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1641    case INDEX_op_mov_i64:
1642    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1643    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1644    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1645    default:
1646        g_assert_not_reached();
1647    }
1648}
1649
1650static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1651{
1652    switch (op) {
1653    case INDEX_op_goto_ptr:
1654        return C_O0_I1(r);
1655
1656    case INDEX_op_ld8u_i32:
1657    case INDEX_op_ld8s_i32:
1658    case INDEX_op_ld16u_i32:
1659    case INDEX_op_ld16s_i32:
1660    case INDEX_op_ld_i32:
1661    case INDEX_op_not_i32:
1662    case INDEX_op_neg_i32:
1663    case INDEX_op_ld8u_i64:
1664    case INDEX_op_ld8s_i64:
1665    case INDEX_op_ld16u_i64:
1666    case INDEX_op_ld16s_i64:
1667    case INDEX_op_ld32s_i64:
1668    case INDEX_op_ld32u_i64:
1669    case INDEX_op_ld_i64:
1670    case INDEX_op_not_i64:
1671    case INDEX_op_neg_i64:
1672    case INDEX_op_ext8u_i32:
1673    case INDEX_op_ext8u_i64:
1674    case INDEX_op_ext16u_i32:
1675    case INDEX_op_ext16u_i64:
1676    case INDEX_op_ext32u_i64:
1677    case INDEX_op_extu_i32_i64:
1678    case INDEX_op_ext8s_i32:
1679    case INDEX_op_ext8s_i64:
1680    case INDEX_op_ext16s_i32:
1681    case INDEX_op_ext16s_i64:
1682    case INDEX_op_ext32s_i64:
1683    case INDEX_op_extrl_i64_i32:
1684    case INDEX_op_extrh_i64_i32:
1685    case INDEX_op_ext_i32_i64:
1686        return C_O1_I1(r, r);
1687
1688    case INDEX_op_st8_i32:
1689    case INDEX_op_st16_i32:
1690    case INDEX_op_st_i32:
1691    case INDEX_op_st8_i64:
1692    case INDEX_op_st16_i64:
1693    case INDEX_op_st32_i64:
1694    case INDEX_op_st_i64:
1695        return C_O0_I2(rZ, r);
1696
1697    case INDEX_op_add_i32:
1698    case INDEX_op_and_i32:
1699    case INDEX_op_or_i32:
1700    case INDEX_op_xor_i32:
1701    case INDEX_op_add_i64:
1702    case INDEX_op_and_i64:
1703    case INDEX_op_or_i64:
1704    case INDEX_op_xor_i64:
1705        return C_O1_I2(r, r, rI);
1706
1707    case INDEX_op_sub_i32:
1708    case INDEX_op_sub_i64:
1709        return C_O1_I2(r, rZ, rN);
1710
1711    case INDEX_op_mul_i32:
1712    case INDEX_op_mulsh_i32:
1713    case INDEX_op_muluh_i32:
1714    case INDEX_op_div_i32:
1715    case INDEX_op_divu_i32:
1716    case INDEX_op_rem_i32:
1717    case INDEX_op_remu_i32:
1718    case INDEX_op_setcond_i32:
1719    case INDEX_op_mul_i64:
1720    case INDEX_op_mulsh_i64:
1721    case INDEX_op_muluh_i64:
1722    case INDEX_op_div_i64:
1723    case INDEX_op_divu_i64:
1724    case INDEX_op_rem_i64:
1725    case INDEX_op_remu_i64:
1726    case INDEX_op_setcond_i64:
1727        return C_O1_I2(r, rZ, rZ);
1728
1729    case INDEX_op_shl_i32:
1730    case INDEX_op_shr_i32:
1731    case INDEX_op_sar_i32:
1732    case INDEX_op_shl_i64:
1733    case INDEX_op_shr_i64:
1734    case INDEX_op_sar_i64:
1735        return C_O1_I2(r, r, ri);
1736
1737    case INDEX_op_brcond_i32:
1738    case INDEX_op_brcond_i64:
1739        return C_O0_I2(rZ, rZ);
1740
1741    case INDEX_op_add2_i32:
1742    case INDEX_op_add2_i64:
1743    case INDEX_op_sub2_i32:
1744    case INDEX_op_sub2_i64:
1745        return C_O2_I4(r, r, rZ, rZ, rM, rM);
1746
1747    case INDEX_op_brcond2_i32:
1748        return C_O0_I4(rZ, rZ, rZ, rZ);
1749
1750    case INDEX_op_setcond2_i32:
1751        return C_O1_I4(r, rZ, rZ, rZ, rZ);
1752
1753    case INDEX_op_qemu_ld_i32:
1754        return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1755                ? C_O1_I1(r, L) : C_O1_I2(r, L, L));
1756    case INDEX_op_qemu_st_i32:
1757        return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1758                ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L));
1759    case INDEX_op_qemu_ld_i64:
1760        return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
1761               : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
1762               : C_O2_I2(r, r, L, L));
1763    case INDEX_op_qemu_st_i64:
1764        return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L)
1765               : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L)
1766               : C_O0_I4(LZ, LZ, L, L));
1767
1768    default:
1769        g_assert_not_reached();
1770    }
1771}
1772
1773static const int tcg_target_callee_save_regs[] = {
1774    TCG_REG_S0,       /* used for the global env (TCG_AREG0) */
1775    TCG_REG_S1,
1776    TCG_REG_S2,
1777    TCG_REG_S3,
1778    TCG_REG_S4,
1779    TCG_REG_S5,
1780    TCG_REG_S6,
1781    TCG_REG_S7,
1782    TCG_REG_S8,
1783    TCG_REG_S9,
1784    TCG_REG_S10,
1785    TCG_REG_S11,
1786    TCG_REG_RA,       /* should be last for ABI compliance */
1787};
1788
1789/* Stack frame parameters.  */
1790#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
1791#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1792#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1793#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1794                     + TCG_TARGET_STACK_ALIGN - 1) \
1795                    & -TCG_TARGET_STACK_ALIGN)
1796#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1797
1798/* We're expecting to be able to use an immediate for frame allocation.  */
1799QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
1800
1801/* Generate global QEMU prologue and epilogue code */
1802static void tcg_target_qemu_prologue(TCGContext *s)
1803{
1804    int i;
1805
1806    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
1807
1808    /* TB prologue */
1809    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
1810    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1811        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1812                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1813    }
1814
1815#if !defined(CONFIG_SOFTMMU)
1816    tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
1817    tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1818#endif
1819
1820    /* Call generated code */
1821    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1822    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
1823
1824    /* Return path for goto_ptr. Set return value to 0 */
1825    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1826    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
1827
1828    /* TB epilogue */
1829    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
1830    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1831        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1832                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1833    }
1834
1835    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
1836    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
1837}
1838
1839static void tcg_target_init(TCGContext *s)
1840{
1841    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
1842    if (TCG_TARGET_REG_BITS == 64) {
1843        tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
1844    }
1845
1846    tcg_target_call_clobber_regs = -1u;
1847    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
1848    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
1849    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
1850    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
1851    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
1852    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
1853    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
1854    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
1855    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
1856    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
1857    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10);
1858    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11);
1859
1860    s->reserved_regs = 0;
1861    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
1862    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
1863    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
1864    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
1865    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1866    tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
1867    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
1868}
1869
1870typedef struct {
1871    DebugFrameHeader h;
1872    uint8_t fde_def_cfa[4];
1873    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1874} DebugFrame;
1875
1876#define ELF_HOST_MACHINE EM_RISCV
1877
1878static const DebugFrame debug_frame = {
1879    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
1880    .h.cie.id = -1,
1881    .h.cie.version = 1,
1882    .h.cie.code_align = 1,
1883    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
1884    .h.cie.return_column = TCG_REG_RA,
1885
1886    /* Total FDE size does not include the "len" member.  */
1887    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1888
1889    .fde_def_cfa = {
1890        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ... */
1891        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
1892        (FRAME_SIZE >> 7)
1893    },
1894    .fde_reg_ofs = {
1895        0x80 + 9,  12,                  /* DW_CFA_offset, s1,  -96 */
1896        0x80 + 18, 11,                  /* DW_CFA_offset, s2,  -88 */
1897        0x80 + 19, 10,                  /* DW_CFA_offset, s3,  -80 */
1898        0x80 + 20, 9,                   /* DW_CFA_offset, s4,  -72 */
1899        0x80 + 21, 8,                   /* DW_CFA_offset, s5,  -64 */
1900        0x80 + 22, 7,                   /* DW_CFA_offset, s6,  -56 */
1901        0x80 + 23, 6,                   /* DW_CFA_offset, s7,  -48 */
1902        0x80 + 24, 5,                   /* DW_CFA_offset, s8,  -40 */
1903        0x80 + 25, 4,                   /* DW_CFA_offset, s9,  -32 */
1904        0x80 + 26, 3,                   /* DW_CFA_offset, s10, -24 */
1905        0x80 + 27, 2,                   /* DW_CFA_offset, s11, -16 */
1906        0x80 + 1 , 1,                   /* DW_CFA_offset, ra,  -8 */
1907    }
1908};
1909
1910void tcg_register_jit(const void *buf, size_t buf_size)
1911{
1912    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1913}
1914