xref: /openbmc/qemu/tcg/riscv/tcg-target.c.inc (revision 8c6631e6)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
8 *
9 * Based on i386/tcg-target.c and mips/tcg-target.c
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
28 */
29
30#include "../tcg-ldst.c.inc"
31#include "../tcg-pool.c.inc"
32
33#ifdef CONFIG_DEBUG_TCG
34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
35    "zero",
36    "ra",
37    "sp",
38    "gp",
39    "tp",
40    "t0",
41    "t1",
42    "t2",
43    "s0",
44    "s1",
45    "a0",
46    "a1",
47    "a2",
48    "a3",
49    "a4",
50    "a5",
51    "a6",
52    "a7",
53    "s2",
54    "s3",
55    "s4",
56    "s5",
57    "s6",
58    "s7",
59    "s8",
60    "s9",
61    "s10",
62    "s11",
63    "t3",
64    "t4",
65    "t5",
66    "t6"
67};
68#endif
69
70static const int tcg_target_reg_alloc_order[] = {
71    /* Call saved registers */
72    /* TCG_REG_S0 reservered for TCG_AREG0 */
73    TCG_REG_S1,
74    TCG_REG_S2,
75    TCG_REG_S3,
76    TCG_REG_S4,
77    TCG_REG_S5,
78    TCG_REG_S6,
79    TCG_REG_S7,
80    TCG_REG_S8,
81    TCG_REG_S9,
82    TCG_REG_S10,
83    TCG_REG_S11,
84
85    /* Call clobbered registers */
86    TCG_REG_T0,
87    TCG_REG_T1,
88    TCG_REG_T2,
89    TCG_REG_T3,
90    TCG_REG_T4,
91    TCG_REG_T5,
92    TCG_REG_T6,
93
94    /* Argument registers */
95    TCG_REG_A0,
96    TCG_REG_A1,
97    TCG_REG_A2,
98    TCG_REG_A3,
99    TCG_REG_A4,
100    TCG_REG_A5,
101    TCG_REG_A6,
102    TCG_REG_A7,
103};
104
105static const int tcg_target_call_iarg_regs[] = {
106    TCG_REG_A0,
107    TCG_REG_A1,
108    TCG_REG_A2,
109    TCG_REG_A3,
110    TCG_REG_A4,
111    TCG_REG_A5,
112    TCG_REG_A6,
113    TCG_REG_A7,
114};
115
116static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
117{
118    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
119    tcg_debug_assert(slot >= 0 && slot <= 1);
120    return TCG_REG_A0 + slot;
121}
122
123#define TCG_CT_CONST_ZERO  0x100
124#define TCG_CT_CONST_S12   0x200
125#define TCG_CT_CONST_N12   0x400
126#define TCG_CT_CONST_M12   0x800
127
128#define ALL_GENERAL_REGS      MAKE_64BIT_MASK(0, 32)
129/*
130 * For softmmu, we need to avoid conflicts with the first 5
131 * argument registers to call the helper.  Some of these are
132 * also used for the tlb lookup.
133 */
134#ifdef CONFIG_SOFTMMU
135#define SOFTMMU_RESERVE_REGS  MAKE_64BIT_MASK(TCG_REG_A0, 5)
136#else
137#define SOFTMMU_RESERVE_REGS  0
138#endif
139
140
141static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
142{
143    if (TCG_TARGET_REG_BITS == 32) {
144        return sextract32(val, pos, len);
145    } else {
146        return sextract64(val, pos, len);
147    }
148}
149
150/* test if a constant matches the constraint */
151static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
152{
153    if (ct & TCG_CT_CONST) {
154        return 1;
155    }
156    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
157        return 1;
158    }
159    /*
160     * Sign extended from 12 bits: [-0x800, 0x7ff].
161     * Used for most arithmetic, as this is the isa field.
162     */
163    if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
164        return 1;
165    }
166    /*
167     * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
168     * Used for subtraction, where a constant must be handled by ADDI.
169     */
170    if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
171        return 1;
172    }
173    /*
174     * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
175     * Used by addsub2, which may need the negative operation,
176     * and requires the modified constant to be representable.
177     */
178    if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
179        return 1;
180    }
181    return 0;
182}
183
184/*
185 * RISC-V Base ISA opcodes (IM)
186 */
187
188typedef enum {
189    OPC_ADD = 0x33,
190    OPC_ADDI = 0x13,
191    OPC_AND = 0x7033,
192    OPC_ANDI = 0x7013,
193    OPC_AUIPC = 0x17,
194    OPC_BEQ = 0x63,
195    OPC_BGE = 0x5063,
196    OPC_BGEU = 0x7063,
197    OPC_BLT = 0x4063,
198    OPC_BLTU = 0x6063,
199    OPC_BNE = 0x1063,
200    OPC_DIV = 0x2004033,
201    OPC_DIVU = 0x2005033,
202    OPC_JAL = 0x6f,
203    OPC_JALR = 0x67,
204    OPC_LB = 0x3,
205    OPC_LBU = 0x4003,
206    OPC_LD = 0x3003,
207    OPC_LH = 0x1003,
208    OPC_LHU = 0x5003,
209    OPC_LUI = 0x37,
210    OPC_LW = 0x2003,
211    OPC_LWU = 0x6003,
212    OPC_MUL = 0x2000033,
213    OPC_MULH = 0x2001033,
214    OPC_MULHSU = 0x2002033,
215    OPC_MULHU = 0x2003033,
216    OPC_OR = 0x6033,
217    OPC_ORI = 0x6013,
218    OPC_REM = 0x2006033,
219    OPC_REMU = 0x2007033,
220    OPC_SB = 0x23,
221    OPC_SD = 0x3023,
222    OPC_SH = 0x1023,
223    OPC_SLL = 0x1033,
224    OPC_SLLI = 0x1013,
225    OPC_SLT = 0x2033,
226    OPC_SLTI = 0x2013,
227    OPC_SLTIU = 0x3013,
228    OPC_SLTU = 0x3033,
229    OPC_SRA = 0x40005033,
230    OPC_SRAI = 0x40005013,
231    OPC_SRL = 0x5033,
232    OPC_SRLI = 0x5013,
233    OPC_SUB = 0x40000033,
234    OPC_SW = 0x2023,
235    OPC_XOR = 0x4033,
236    OPC_XORI = 0x4013,
237
238#if TCG_TARGET_REG_BITS == 64
239    OPC_ADDIW = 0x1b,
240    OPC_ADDW = 0x3b,
241    OPC_DIVUW = 0x200503b,
242    OPC_DIVW = 0x200403b,
243    OPC_MULW = 0x200003b,
244    OPC_REMUW = 0x200703b,
245    OPC_REMW = 0x200603b,
246    OPC_SLLIW = 0x101b,
247    OPC_SLLW = 0x103b,
248    OPC_SRAIW = 0x4000501b,
249    OPC_SRAW = 0x4000503b,
250    OPC_SRLIW = 0x501b,
251    OPC_SRLW = 0x503b,
252    OPC_SUBW = 0x4000003b,
253#else
254    /* Simplify code throughout by defining aliases for RV32.  */
255    OPC_ADDIW = OPC_ADDI,
256    OPC_ADDW = OPC_ADD,
257    OPC_DIVUW = OPC_DIVU,
258    OPC_DIVW = OPC_DIV,
259    OPC_MULW = OPC_MUL,
260    OPC_REMUW = OPC_REMU,
261    OPC_REMW = OPC_REM,
262    OPC_SLLIW = OPC_SLLI,
263    OPC_SLLW = OPC_SLL,
264    OPC_SRAIW = OPC_SRAI,
265    OPC_SRAW = OPC_SRA,
266    OPC_SRLIW = OPC_SRLI,
267    OPC_SRLW = OPC_SRL,
268    OPC_SUBW = OPC_SUB,
269#endif
270
271    OPC_FENCE = 0x0000000f,
272    OPC_NOP   = OPC_ADDI,   /* nop = addi r0,r0,0 */
273} RISCVInsn;
274
275/*
276 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
277 */
278
279/* Type-R */
280
281static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2)
282{
283    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20;
284}
285
286/* Type-I */
287
288static int32_t encode_imm12(uint32_t imm)
289{
290    return (imm & 0xfff) << 20;
291}
292
293static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm)
294{
295    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm);
296}
297
298/* Type-S */
299
300static int32_t encode_simm12(uint32_t imm)
301{
302    int32_t ret = 0;
303
304    ret |= (imm & 0xFE0) << 20;
305    ret |= (imm & 0x1F) << 7;
306
307    return ret;
308}
309
310static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
311{
312    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm);
313}
314
315/* Type-SB */
316
317static int32_t encode_sbimm12(uint32_t imm)
318{
319    int32_t ret = 0;
320
321    ret |= (imm & 0x1000) << 19;
322    ret |= (imm & 0x7e0) << 20;
323    ret |= (imm & 0x1e) << 7;
324    ret |= (imm & 0x800) >> 4;
325
326    return ret;
327}
328
329static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
330{
331    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm);
332}
333
334/* Type-U */
335
336static int32_t encode_uimm20(uint32_t imm)
337{
338    return imm & 0xfffff000;
339}
340
341static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm)
342{
343    return opc | (rd & 0x1f) << 7 | encode_uimm20(imm);
344}
345
346/* Type-UJ */
347
348static int32_t encode_ujimm20(uint32_t imm)
349{
350    int32_t ret = 0;
351
352    ret |= (imm & 0x0007fe) << (21 - 1);
353    ret |= (imm & 0x000800) << (20 - 11);
354    ret |= (imm & 0x0ff000) << (12 - 12);
355    ret |= (imm & 0x100000) << (31 - 20);
356
357    return ret;
358}
359
360static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
361{
362    return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
363}
364
365/*
366 * RISC-V instruction emitters
367 */
368
369static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc,
370                            TCGReg rd, TCGReg rs1, TCGReg rs2)
371{
372    tcg_out32(s, encode_r(opc, rd, rs1, rs2));
373}
374
375static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc,
376                            TCGReg rd, TCGReg rs1, TCGArg imm)
377{
378    tcg_out32(s, encode_i(opc, rd, rs1, imm));
379}
380
381static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc,
382                              TCGReg rs1, TCGReg rs2, uint32_t imm)
383{
384    tcg_out32(s, encode_s(opc, rs1, rs2, imm));
385}
386
387static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc,
388                               TCGReg rs1, TCGReg rs2, uint32_t imm)
389{
390    tcg_out32(s, encode_sb(opc, rs1, rs2, imm));
391}
392
393static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc,
394                              TCGReg rd, uint32_t imm)
395{
396    tcg_out32(s, encode_u(opc, rd, imm));
397}
398
399static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc,
400                             TCGReg rd, uint32_t imm)
401{
402    tcg_out32(s, encode_uj(opc, rd, imm));
403}
404
405static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
406{
407    int i;
408    for (i = 0; i < count; ++i) {
409        p[i] = OPC_NOP;
410    }
411}
412
413/*
414 * Relocations
415 */
416
417static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
418{
419    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
420    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
421
422    tcg_debug_assert((offset & 1) == 0);
423    if (offset == sextreg(offset, 0, 12)) {
424        *src_rw |= encode_sbimm12(offset);
425        return true;
426    }
427
428    return false;
429}
430
431static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
432{
433    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
434    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
435
436    tcg_debug_assert((offset & 1) == 0);
437    if (offset == sextreg(offset, 0, 20)) {
438        *src_rw |= encode_ujimm20(offset);
439        return true;
440    }
441
442    return false;
443}
444
445static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
446{
447    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
448    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
449    int32_t lo = sextreg(offset, 0, 12);
450    int32_t hi = offset - lo;
451
452    if (offset == hi + lo) {
453        src_rw[0] |= encode_uimm20(hi);
454        src_rw[1] |= encode_imm12(lo);
455        return true;
456    }
457
458    return false;
459}
460
461static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
462                        intptr_t value, intptr_t addend)
463{
464    tcg_debug_assert(addend == 0);
465    switch (type) {
466    case R_RISCV_BRANCH:
467        return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
468    case R_RISCV_JAL:
469        return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
470    case R_RISCV_CALL:
471        return reloc_call(code_ptr, (tcg_insn_unit *)value);
472    default:
473        g_assert_not_reached();
474    }
475}
476
477/*
478 * TCG intrinsics
479 */
480
481static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
482{
483    if (ret == arg) {
484        return true;
485    }
486    switch (type) {
487    case TCG_TYPE_I32:
488    case TCG_TYPE_I64:
489        tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
490        break;
491    default:
492        g_assert_not_reached();
493    }
494    return true;
495}
496
497static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
498                         tcg_target_long val)
499{
500    tcg_target_long lo, hi, tmp;
501    int shift, ret;
502
503    if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
504        val = (int32_t)val;
505    }
506
507    lo = sextreg(val, 0, 12);
508    if (val == lo) {
509        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo);
510        return;
511    }
512
513    hi = val - lo;
514    if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) {
515        tcg_out_opc_upper(s, OPC_LUI, rd, hi);
516        if (lo != 0) {
517            tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
518        }
519        return;
520    }
521
522    /* We can only be here if TCG_TARGET_REG_BITS != 32 */
523    tmp = tcg_pcrel_diff(s, (void *)val);
524    if (tmp == (int32_t)tmp) {
525        tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
526        tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
527        ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
528        tcg_debug_assert(ret == true);
529        return;
530    }
531
532    /* Look for a single 20-bit section.  */
533    shift = ctz64(val);
534    tmp = val >> shift;
535    if (tmp == sextreg(tmp, 0, 20)) {
536        tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12);
537        if (shift > 12) {
538            tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12);
539        } else {
540            tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift);
541        }
542        return;
543    }
544
545    /* Look for a few high zero bits, with lots of bits set in the middle.  */
546    shift = clz64(val);
547    tmp = val << shift;
548    if (tmp == sextreg(tmp, 12, 20) << 12) {
549        tcg_out_opc_upper(s, OPC_LUI, rd, tmp);
550        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
551        return;
552    } else if (tmp == sextreg(tmp, 0, 12)) {
553        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp);
554        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
555        return;
556    }
557
558    /* Drop into the constant pool.  */
559    new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0);
560    tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
561    tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
562}
563
564static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
565                             tcg_target_long imm)
566{
567    /* This function is only used for passing structs by reference. */
568    g_assert_not_reached();
569}
570
571static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
572{
573    tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff);
574}
575
576static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
577{
578    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
579    tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
580}
581
582static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
583{
584    tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
585    tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
586}
587
588static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
589{
590    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
591    tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
592}
593
594static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
595{
596    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
597    tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
598}
599
600static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
601{
602    tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
603}
604
605static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
606                         TCGReg addr, intptr_t offset)
607{
608    intptr_t imm12 = sextreg(offset, 0, 12);
609
610    if (offset != imm12) {
611        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
612
613        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
614            imm12 = sextreg(diff, 0, 12);
615            tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12);
616        } else {
617            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
618            if (addr != TCG_REG_ZERO) {
619                tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr);
620            }
621        }
622        addr = TCG_REG_TMP2;
623    }
624
625    switch (opc) {
626    case OPC_SB:
627    case OPC_SH:
628    case OPC_SW:
629    case OPC_SD:
630        tcg_out_opc_store(s, opc, addr, data, imm12);
631        break;
632    case OPC_LB:
633    case OPC_LBU:
634    case OPC_LH:
635    case OPC_LHU:
636    case OPC_LW:
637    case OPC_LWU:
638    case OPC_LD:
639        tcg_out_opc_imm(s, opc, data, addr, imm12);
640        break;
641    default:
642        g_assert_not_reached();
643    }
644}
645
646static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
647                       TCGReg arg1, intptr_t arg2)
648{
649    bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32);
650    tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2);
651}
652
653static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
654                       TCGReg arg1, intptr_t arg2)
655{
656    bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32);
657    tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2);
658}
659
660static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
661                        TCGReg base, intptr_t ofs)
662{
663    if (val == 0) {
664        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
665        return true;
666    }
667    return false;
668}
669
670static void tcg_out_addsub2(TCGContext *s,
671                            TCGReg rl, TCGReg rh,
672                            TCGReg al, TCGReg ah,
673                            TCGArg bl, TCGArg bh,
674                            bool cbl, bool cbh, bool is_sub, bool is32bit)
675{
676    const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
677    const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
678    const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
679    TCGReg th = TCG_REG_TMP1;
680
681    /* If we have a negative constant such that negating it would
682       make the high part zero, we can (usually) eliminate one insn.  */
683    if (cbl && cbh && bh == -1 && bl != 0) {
684        bl = -bl;
685        bh = 0;
686        is_sub = !is_sub;
687    }
688
689    /* By operating on the high part first, we get to use the final
690       carry operation to move back from the temporary.  */
691    if (!cbh) {
692        tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
693    } else if (bh != 0 || ah == rl) {
694        tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
695    } else {
696        th = ah;
697    }
698
699    /* Note that tcg optimization should eliminate the bl == 0 case.  */
700    if (is_sub) {
701        if (cbl) {
702            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
703            tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
704        } else {
705            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
706            tcg_out_opc_reg(s, opc_sub, rl, al, bl);
707        }
708        tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
709    } else {
710        if (cbl) {
711            tcg_out_opc_imm(s, opc_addi, rl, al, bl);
712            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
713        } else if (al == bl) {
714            /*
715             * If the input regs overlap, this is a simple doubling
716             * and carry-out is the input msb.  This special case is
717             * required when the output reg overlaps the input,
718             * but we might as well use it always.
719             */
720            tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
721            tcg_out_opc_reg(s, opc_add, rl, al, al);
722        } else {
723            tcg_out_opc_reg(s, opc_add, rl, al, bl);
724            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
725                            rl, (rl == bl ? al : bl));
726        }
727        tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
728    }
729}
730
731static const struct {
732    RISCVInsn op;
733    bool swap;
734} tcg_brcond_to_riscv[] = {
735    [TCG_COND_EQ] =  { OPC_BEQ,  false },
736    [TCG_COND_NE] =  { OPC_BNE,  false },
737    [TCG_COND_LT] =  { OPC_BLT,  false },
738    [TCG_COND_GE] =  { OPC_BGE,  false },
739    [TCG_COND_LE] =  { OPC_BGE,  true  },
740    [TCG_COND_GT] =  { OPC_BLT,  true  },
741    [TCG_COND_LTU] = { OPC_BLTU, false },
742    [TCG_COND_GEU] = { OPC_BGEU, false },
743    [TCG_COND_LEU] = { OPC_BGEU, true  },
744    [TCG_COND_GTU] = { OPC_BLTU, true  }
745};
746
747static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
748                           TCGReg arg2, TCGLabel *l)
749{
750    RISCVInsn op = tcg_brcond_to_riscv[cond].op;
751
752    tcg_debug_assert(op != 0);
753
754    if (tcg_brcond_to_riscv[cond].swap) {
755        TCGReg t = arg1;
756        arg1 = arg2;
757        arg2 = t;
758    }
759
760    tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
761    tcg_out_opc_branch(s, op, arg1, arg2, 0);
762}
763
764static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
765                            TCGReg arg1, TCGReg arg2)
766{
767    switch (cond) {
768    case TCG_COND_EQ:
769        tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2);
770        tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1);
771        break;
772    case TCG_COND_NE:
773        tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2);
774        tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret);
775        break;
776    case TCG_COND_LT:
777        tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
778        break;
779    case TCG_COND_GE:
780        tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
781        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
782        break;
783    case TCG_COND_LE:
784        tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
785        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
786        break;
787    case TCG_COND_GT:
788        tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
789        break;
790    case TCG_COND_LTU:
791        tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
792        break;
793    case TCG_COND_GEU:
794        tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
795        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
796        break;
797    case TCG_COND_LEU:
798        tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
799        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
800        break;
801    case TCG_COND_GTU:
802        tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
803        break;
804    default:
805         g_assert_not_reached();
806         break;
807     }
808}
809
810static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
811                            TCGReg bl, TCGReg bh, TCGLabel *l)
812{
813    /* todo */
814    g_assert_not_reached();
815}
816
817static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
818                             TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
819{
820    /* todo */
821    g_assert_not_reached();
822}
823
824static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
825{
826    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
827    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
828    int ret;
829
830    tcg_debug_assert((offset & 1) == 0);
831    if (offset == sextreg(offset, 0, 20)) {
832        /* short jump: -2097150 to 2097152 */
833        tcg_out_opc_jump(s, OPC_JAL, link, offset);
834    } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) {
835        /* long jump: -2147483646 to 2147483648 */
836        tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
837        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
838        ret = reloc_call(s->code_ptr - 2, arg);
839        tcg_debug_assert(ret == true);
840    } else if (TCG_TARGET_REG_BITS == 64) {
841        /* far jump: 64-bit */
842        tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
843        tcg_target_long base = (tcg_target_long)arg - imm;
844        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
845        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
846    } else {
847        g_assert_not_reached();
848    }
849}
850
851static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
852                         const TCGHelperInfo *info)
853{
854    tcg_out_call_int(s, arg, false);
855}
856
857static void tcg_out_mb(TCGContext *s, TCGArg a0)
858{
859    tcg_insn_unit insn = OPC_FENCE;
860
861    if (a0 & TCG_MO_LD_LD) {
862        insn |= 0x02200000;
863    }
864    if (a0 & TCG_MO_ST_LD) {
865        insn |= 0x01200000;
866    }
867    if (a0 & TCG_MO_LD_ST) {
868        insn |= 0x02100000;
869    }
870    if (a0 & TCG_MO_ST_ST) {
871        insn |= 0x02200000;
872    }
873    tcg_out32(s, insn);
874}
875
876/*
877 * Load/store and TLB
878 */
879
880#if defined(CONFIG_SOFTMMU)
881/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
882 *                                     MemOpIdx oi, uintptr_t ra)
883 */
884static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
885    [MO_UB] = helper_ret_ldub_mmu,
886    [MO_SB] = helper_ret_ldsb_mmu,
887#if HOST_BIG_ENDIAN
888    [MO_UW] = helper_be_lduw_mmu,
889    [MO_SW] = helper_be_ldsw_mmu,
890    [MO_UL] = helper_be_ldul_mmu,
891#if TCG_TARGET_REG_BITS == 64
892    [MO_SL] = helper_be_ldsl_mmu,
893#endif
894    [MO_UQ] = helper_be_ldq_mmu,
895#else
896    [MO_UW] = helper_le_lduw_mmu,
897    [MO_SW] = helper_le_ldsw_mmu,
898    [MO_UL] = helper_le_ldul_mmu,
899#if TCG_TARGET_REG_BITS == 64
900    [MO_SL] = helper_le_ldsl_mmu,
901#endif
902    [MO_UQ] = helper_le_ldq_mmu,
903#endif
904};
905
906/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
907 *                                     uintxx_t val, MemOpIdx oi,
908 *                                     uintptr_t ra)
909 */
910static void * const qemu_st_helpers[MO_SIZE + 1] = {
911    [MO_8]   = helper_ret_stb_mmu,
912#if HOST_BIG_ENDIAN
913    [MO_16] = helper_be_stw_mmu,
914    [MO_32] = helper_be_stl_mmu,
915    [MO_64] = helper_be_stq_mmu,
916#else
917    [MO_16] = helper_le_stw_mmu,
918    [MO_32] = helper_le_stl_mmu,
919    [MO_64] = helper_le_stq_mmu,
920#endif
921};
922
923/* We don't support oversize guests */
924QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS);
925
926/* We expect to use a 12-bit negative offset from ENV.  */
927QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
928QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
929
930static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
931{
932    tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
933    bool ok = reloc_jimm20(s->code_ptr - 1, target);
934    tcg_debug_assert(ok);
935}
936
937static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
938                               TCGReg addrh, MemOpIdx oi,
939                               tcg_insn_unit **label_ptr, bool is_load)
940{
941    MemOp opc = get_memop(oi);
942    unsigned s_bits = opc & MO_SIZE;
943    unsigned a_bits = get_alignment_bits(opc);
944    tcg_target_long compare_mask;
945    int mem_index = get_mmuidx(oi);
946    int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
947    int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
948    int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
949    TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
950
951    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
952    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
953
954    tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl,
955                    TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
956    tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
957    tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
958
959    /* Load the tlb comparator and the addend.  */
960    tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
961               is_load ? offsetof(CPUTLBEntry, addr_read)
962               : offsetof(CPUTLBEntry, addr_write));
963    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
964               offsetof(CPUTLBEntry, addend));
965
966    /* We don't support unaligned accesses. */
967    if (a_bits < s_bits) {
968        a_bits = s_bits;
969    }
970    /* Clear the non-page, non-alignment bits from the address.  */
971    compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
972    if (compare_mask == sextreg(compare_mask, 0, 12)) {
973        tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask);
974    } else {
975        tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
976        tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
977    }
978
979    /* Compare masked address with the TLB entry. */
980    label_ptr[0] = s->code_ptr;
981    tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
982
983    /* TLB Hit - translate address using addend.  */
984    if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
985        tcg_out_ext32u(s, TCG_REG_TMP0, addrl);
986        addrl = TCG_REG_TMP0;
987    }
988    tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
989    return TCG_REG_TMP0;
990}
991
992static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
993                                TCGType ext,
994                                TCGReg datalo, TCGReg datahi,
995                                TCGReg addrlo, TCGReg addrhi,
996                                void *raddr, tcg_insn_unit **label_ptr)
997{
998    TCGLabelQemuLdst *label = new_ldst_label(s);
999
1000    label->is_ld = is_ld;
1001    label->oi = oi;
1002    label->type = ext;
1003    label->datalo_reg = datalo;
1004    label->datahi_reg = datahi;
1005    label->addrlo_reg = addrlo;
1006    label->addrhi_reg = addrhi;
1007    label->raddr = tcg_splitwx_to_rx(raddr);
1008    label->label_ptr[0] = label_ptr[0];
1009}
1010
1011static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1012{
1013    MemOpIdx oi = l->oi;
1014    MemOp opc = get_memop(oi);
1015    TCGReg a0 = tcg_target_call_iarg_regs[0];
1016    TCGReg a1 = tcg_target_call_iarg_regs[1];
1017    TCGReg a2 = tcg_target_call_iarg_regs[2];
1018    TCGReg a3 = tcg_target_call_iarg_regs[3];
1019
1020    /* We don't support oversize guests */
1021    if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1022        g_assert_not_reached();
1023    }
1024
1025    /* resolve label address */
1026    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1027        return false;
1028    }
1029
1030    /* call load helper */
1031    tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
1032    tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg);
1033    tcg_out_movi(s, TCG_TYPE_PTR, a2, oi);
1034    tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr);
1035
1036    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false);
1037    tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0);
1038
1039    tcg_out_goto(s, l->raddr);
1040    return true;
1041}
1042
1043static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1044{
1045    MemOpIdx oi = l->oi;
1046    MemOp opc = get_memop(oi);
1047    MemOp s_bits = opc & MO_SIZE;
1048    TCGReg a0 = tcg_target_call_iarg_regs[0];
1049    TCGReg a1 = tcg_target_call_iarg_regs[1];
1050    TCGReg a2 = tcg_target_call_iarg_regs[2];
1051    TCGReg a3 = tcg_target_call_iarg_regs[3];
1052    TCGReg a4 = tcg_target_call_iarg_regs[4];
1053
1054    /* We don't support oversize guests */
1055    if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
1056        g_assert_not_reached();
1057    }
1058
1059    /* resolve label address */
1060    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1061        return false;
1062    }
1063
1064    /* call store helper */
1065    tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
1066    tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg);
1067    tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg);
1068    switch (s_bits) {
1069    case MO_8:
1070        tcg_out_ext8u(s, a2, a2);
1071        break;
1072    case MO_16:
1073        tcg_out_ext16u(s, a2, a2);
1074        break;
1075    default:
1076        break;
1077    }
1078    tcg_out_movi(s, TCG_TYPE_PTR, a3, oi);
1079    tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr);
1080
1081    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
1082
1083    tcg_out_goto(s, l->raddr);
1084    return true;
1085}
1086#else
1087
1088static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
1089                                   unsigned a_bits)
1090{
1091    unsigned a_mask = (1 << a_bits) - 1;
1092    TCGLabelQemuLdst *l = new_ldst_label(s);
1093
1094    l->is_ld = is_ld;
1095    l->addrlo_reg = addr_reg;
1096
1097    /* We are expecting a_bits to max out at 7, so we can always use andi. */
1098    tcg_debug_assert(a_bits < 12);
1099    tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
1100
1101    l->label_ptr[0] = s->code_ptr;
1102    tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1103
1104    l->raddr = tcg_splitwx_to_rx(s->code_ptr);
1105}
1106
1107static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
1108{
1109    /* resolve label address */
1110    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1111        return false;
1112    }
1113
1114    tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
1115    tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
1116
1117    /* tail call, with the return address back inline. */
1118    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
1119    tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
1120                                       : helper_unaligned_st), true);
1121    return true;
1122}
1123
1124static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1125{
1126    return tcg_out_fail_alignment(s, l);
1127}
1128
1129static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1130{
1131    return tcg_out_fail_alignment(s, l);
1132}
1133
1134#endif /* CONFIG_SOFTMMU */
1135
1136static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
1137                                   TCGReg base, MemOp opc, bool is_64)
1138{
1139    /* Byte swapping is left to middle-end expansion. */
1140    tcg_debug_assert((opc & MO_BSWAP) == 0);
1141
1142    switch (opc & (MO_SSIZE)) {
1143    case MO_UB:
1144        tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
1145        break;
1146    case MO_SB:
1147        tcg_out_opc_imm(s, OPC_LB, lo, base, 0);
1148        break;
1149    case MO_UW:
1150        tcg_out_opc_imm(s, OPC_LHU, lo, base, 0);
1151        break;
1152    case MO_SW:
1153        tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
1154        break;
1155    case MO_UL:
1156        if (TCG_TARGET_REG_BITS == 64 && is_64) {
1157            tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
1158            break;
1159        }
1160        /* FALLTHRU */
1161    case MO_SL:
1162        tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1163        break;
1164    case MO_UQ:
1165        /* Prefer to load from offset 0 first, but allow for overlap.  */
1166        if (TCG_TARGET_REG_BITS == 64) {
1167            tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
1168        } else if (lo != base) {
1169            tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1170            tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
1171        } else {
1172            tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
1173            tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
1174        }
1175        break;
1176    default:
1177        g_assert_not_reached();
1178    }
1179}
1180
1181static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
1182{
1183    TCGReg addr_regl, addr_regh __attribute__((unused));
1184    TCGReg data_regl, data_regh;
1185    MemOpIdx oi;
1186    MemOp opc;
1187#if defined(CONFIG_SOFTMMU)
1188    tcg_insn_unit *label_ptr[1];
1189#else
1190    unsigned a_bits;
1191#endif
1192    TCGReg base;
1193
1194    data_regl = *args++;
1195    data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1196    addr_regl = *args++;
1197    addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1198    oi = *args++;
1199    opc = get_memop(oi);
1200
1201#if defined(CONFIG_SOFTMMU)
1202    base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1);
1203    tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1204    add_qemu_ldst_label(s, 1, oi,
1205                        (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1206                        data_regl, data_regh, addr_regl, addr_regh,
1207                        s->code_ptr, label_ptr);
1208#else
1209    a_bits = get_alignment_bits(opc);
1210    if (a_bits) {
1211        tcg_out_test_alignment(s, true, addr_regl, a_bits);
1212    }
1213    base = addr_regl;
1214    if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1215        tcg_out_ext32u(s, TCG_REG_TMP0, base);
1216        base = TCG_REG_TMP0;
1217    }
1218    if (guest_base != 0) {
1219        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
1220        base = TCG_REG_TMP0;
1221    }
1222    tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
1223#endif
1224}
1225
1226static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
1227                                   TCGReg base, MemOp opc)
1228{
1229    /* Byte swapping is left to middle-end expansion. */
1230    tcg_debug_assert((opc & MO_BSWAP) == 0);
1231
1232    switch (opc & (MO_SSIZE)) {
1233    case MO_8:
1234        tcg_out_opc_store(s, OPC_SB, base, lo, 0);
1235        break;
1236    case MO_16:
1237        tcg_out_opc_store(s, OPC_SH, base, lo, 0);
1238        break;
1239    case MO_32:
1240        tcg_out_opc_store(s, OPC_SW, base, lo, 0);
1241        break;
1242    case MO_64:
1243        if (TCG_TARGET_REG_BITS == 64) {
1244            tcg_out_opc_store(s, OPC_SD, base, lo, 0);
1245        } else {
1246            tcg_out_opc_store(s, OPC_SW, base, lo, 0);
1247            tcg_out_opc_store(s, OPC_SW, base, hi, 4);
1248        }
1249        break;
1250    default:
1251        g_assert_not_reached();
1252    }
1253}
1254
1255static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
1256{
1257    TCGReg addr_regl, addr_regh __attribute__((unused));
1258    TCGReg data_regl, data_regh;
1259    MemOpIdx oi;
1260    MemOp opc;
1261#if defined(CONFIG_SOFTMMU)
1262    tcg_insn_unit *label_ptr[1];
1263#else
1264    unsigned a_bits;
1265#endif
1266    TCGReg base;
1267
1268    data_regl = *args++;
1269    data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
1270    addr_regl = *args++;
1271    addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
1272    oi = *args++;
1273    opc = get_memop(oi);
1274
1275#if defined(CONFIG_SOFTMMU)
1276    base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0);
1277    tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1278    add_qemu_ldst_label(s, 0, oi,
1279                        (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1280                        data_regl, data_regh, addr_regl, addr_regh,
1281                        s->code_ptr, label_ptr);
1282#else
1283    a_bits = get_alignment_bits(opc);
1284    if (a_bits) {
1285        tcg_out_test_alignment(s, false, addr_regl, a_bits);
1286    }
1287    base = addr_regl;
1288    if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
1289        tcg_out_ext32u(s, TCG_REG_TMP0, base);
1290        base = TCG_REG_TMP0;
1291    }
1292    if (guest_base != 0) {
1293        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
1294        base = TCG_REG_TMP0;
1295    }
1296    tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
1297#endif
1298}
1299
1300static const tcg_insn_unit *tb_ret_addr;
1301
1302static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1303{
1304    /* Reuse the zeroing that exists for goto_ptr.  */
1305    if (a0 == 0) {
1306        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1307    } else {
1308        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1309        tcg_out_call_int(s, tb_ret_addr, true);
1310    }
1311}
1312
1313static void tcg_out_goto_tb(TCGContext *s, int which)
1314{
1315    /* Direct branch will be patched by tb_target_set_jmp_target. */
1316    set_jmp_insn_offset(s, which);
1317    tcg_out32(s, OPC_JAL);
1318
1319    /* When branch is out of range, fall through to indirect. */
1320    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
1321               get_jmp_target_addr(s, which));
1322    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1323    set_jmp_reset_offset(s, which);
1324}
1325
1326void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1327                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1328{
1329    uintptr_t addr = tb->jmp_target_addr[n];
1330    ptrdiff_t offset = addr - jmp_rx;
1331    tcg_insn_unit insn;
1332
1333    /* Either directly branch, or fall through to indirect branch. */
1334    if (offset == sextreg(offset, 0, 20)) {
1335        insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
1336    } else {
1337        insn = OPC_NOP;
1338    }
1339    qatomic_set((uint32_t *)jmp_rw, insn);
1340    flush_idcache_range(jmp_rx, jmp_rw, 4);
1341}
1342
1343static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1344                       const TCGArg args[TCG_MAX_OP_ARGS],
1345                       const int const_args[TCG_MAX_OP_ARGS])
1346{
1347    TCGArg a0 = args[0];
1348    TCGArg a1 = args[1];
1349    TCGArg a2 = args[2];
1350    int c2 = const_args[2];
1351
1352    switch (opc) {
1353    case INDEX_op_goto_ptr:
1354        tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
1355        break;
1356
1357    case INDEX_op_br:
1358        tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
1359        tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1360        break;
1361
1362    case INDEX_op_ld8u_i32:
1363    case INDEX_op_ld8u_i64:
1364        tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
1365        break;
1366    case INDEX_op_ld8s_i32:
1367    case INDEX_op_ld8s_i64:
1368        tcg_out_ldst(s, OPC_LB, a0, a1, a2);
1369        break;
1370    case INDEX_op_ld16u_i32:
1371    case INDEX_op_ld16u_i64:
1372        tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
1373        break;
1374    case INDEX_op_ld16s_i32:
1375    case INDEX_op_ld16s_i64:
1376        tcg_out_ldst(s, OPC_LH, a0, a1, a2);
1377        break;
1378    case INDEX_op_ld32u_i64:
1379        tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
1380        break;
1381    case INDEX_op_ld_i32:
1382    case INDEX_op_ld32s_i64:
1383        tcg_out_ldst(s, OPC_LW, a0, a1, a2);
1384        break;
1385    case INDEX_op_ld_i64:
1386        tcg_out_ldst(s, OPC_LD, a0, a1, a2);
1387        break;
1388
1389    case INDEX_op_st8_i32:
1390    case INDEX_op_st8_i64:
1391        tcg_out_ldst(s, OPC_SB, a0, a1, a2);
1392        break;
1393    case INDEX_op_st16_i32:
1394    case INDEX_op_st16_i64:
1395        tcg_out_ldst(s, OPC_SH, a0, a1, a2);
1396        break;
1397    case INDEX_op_st_i32:
1398    case INDEX_op_st32_i64:
1399        tcg_out_ldst(s, OPC_SW, a0, a1, a2);
1400        break;
1401    case INDEX_op_st_i64:
1402        tcg_out_ldst(s, OPC_SD, a0, a1, a2);
1403        break;
1404
1405    case INDEX_op_add_i32:
1406        if (c2) {
1407            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
1408        } else {
1409            tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
1410        }
1411        break;
1412    case INDEX_op_add_i64:
1413        if (c2) {
1414            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
1415        } else {
1416            tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
1417        }
1418        break;
1419
1420    case INDEX_op_sub_i32:
1421        if (c2) {
1422            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
1423        } else {
1424            tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
1425        }
1426        break;
1427    case INDEX_op_sub_i64:
1428        if (c2) {
1429            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
1430        } else {
1431            tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
1432        }
1433        break;
1434
1435    case INDEX_op_and_i32:
1436    case INDEX_op_and_i64:
1437        if (c2) {
1438            tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
1439        } else {
1440            tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
1441        }
1442        break;
1443
1444    case INDEX_op_or_i32:
1445    case INDEX_op_or_i64:
1446        if (c2) {
1447            tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
1448        } else {
1449            tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
1450        }
1451        break;
1452
1453    case INDEX_op_xor_i32:
1454    case INDEX_op_xor_i64:
1455        if (c2) {
1456            tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
1457        } else {
1458            tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
1459        }
1460        break;
1461
1462    case INDEX_op_not_i32:
1463    case INDEX_op_not_i64:
1464        tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
1465        break;
1466
1467    case INDEX_op_neg_i32:
1468        tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
1469        break;
1470    case INDEX_op_neg_i64:
1471        tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
1472        break;
1473
1474    case INDEX_op_mul_i32:
1475        tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
1476        break;
1477    case INDEX_op_mul_i64:
1478        tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1479        break;
1480
1481    case INDEX_op_div_i32:
1482        tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
1483        break;
1484    case INDEX_op_div_i64:
1485        tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
1486        break;
1487
1488    case INDEX_op_divu_i32:
1489        tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
1490        break;
1491    case INDEX_op_divu_i64:
1492        tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
1493        break;
1494
1495    case INDEX_op_rem_i32:
1496        tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
1497        break;
1498    case INDEX_op_rem_i64:
1499        tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
1500        break;
1501
1502    case INDEX_op_remu_i32:
1503        tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
1504        break;
1505    case INDEX_op_remu_i64:
1506        tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
1507        break;
1508
1509    case INDEX_op_shl_i32:
1510        if (c2) {
1511            tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
1512        } else {
1513            tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
1514        }
1515        break;
1516    case INDEX_op_shl_i64:
1517        if (c2) {
1518            tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
1519        } else {
1520            tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
1521        }
1522        break;
1523
1524    case INDEX_op_shr_i32:
1525        if (c2) {
1526            tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
1527        } else {
1528            tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
1529        }
1530        break;
1531    case INDEX_op_shr_i64:
1532        if (c2) {
1533            tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
1534        } else {
1535            tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
1536        }
1537        break;
1538
1539    case INDEX_op_sar_i32:
1540        if (c2) {
1541            tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
1542        } else {
1543            tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
1544        }
1545        break;
1546    case INDEX_op_sar_i64:
1547        if (c2) {
1548            tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
1549        } else {
1550            tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
1551        }
1552        break;
1553
1554    case INDEX_op_add2_i32:
1555        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1556                        const_args[4], const_args[5], false, true);
1557        break;
1558    case INDEX_op_add2_i64:
1559        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1560                        const_args[4], const_args[5], false, false);
1561        break;
1562    case INDEX_op_sub2_i32:
1563        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1564                        const_args[4], const_args[5], true, true);
1565        break;
1566    case INDEX_op_sub2_i64:
1567        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1568                        const_args[4], const_args[5], true, false);
1569        break;
1570
1571    case INDEX_op_brcond_i32:
1572    case INDEX_op_brcond_i64:
1573        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1574        break;
1575    case INDEX_op_brcond2_i32:
1576        tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
1577        break;
1578
1579    case INDEX_op_setcond_i32:
1580    case INDEX_op_setcond_i64:
1581        tcg_out_setcond(s, args[3], a0, a1, a2);
1582        break;
1583    case INDEX_op_setcond2_i32:
1584        tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
1585        break;
1586
1587    case INDEX_op_qemu_ld_i32:
1588        tcg_out_qemu_ld(s, args, false);
1589        break;
1590    case INDEX_op_qemu_ld_i64:
1591        tcg_out_qemu_ld(s, args, true);
1592        break;
1593    case INDEX_op_qemu_st_i32:
1594        tcg_out_qemu_st(s, args, false);
1595        break;
1596    case INDEX_op_qemu_st_i64:
1597        tcg_out_qemu_st(s, args, true);
1598        break;
1599
1600    case INDEX_op_ext8u_i32:
1601    case INDEX_op_ext8u_i64:
1602        tcg_out_ext8u(s, a0, a1);
1603        break;
1604
1605    case INDEX_op_ext16u_i32:
1606    case INDEX_op_ext16u_i64:
1607        tcg_out_ext16u(s, a0, a1);
1608        break;
1609
1610    case INDEX_op_ext32u_i64:
1611    case INDEX_op_extu_i32_i64:
1612        tcg_out_ext32u(s, a0, a1);
1613        break;
1614
1615    case INDEX_op_ext8s_i32:
1616    case INDEX_op_ext8s_i64:
1617        tcg_out_ext8s(s, a0, a1);
1618        break;
1619
1620    case INDEX_op_ext16s_i32:
1621    case INDEX_op_ext16s_i64:
1622        tcg_out_ext16s(s, a0, a1);
1623        break;
1624
1625    case INDEX_op_ext32s_i64:
1626    case INDEX_op_extrl_i64_i32:
1627    case INDEX_op_ext_i32_i64:
1628        tcg_out_ext32s(s, a0, a1);
1629        break;
1630
1631    case INDEX_op_extrh_i64_i32:
1632        tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
1633        break;
1634
1635    case INDEX_op_mulsh_i32:
1636    case INDEX_op_mulsh_i64:
1637        tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
1638        break;
1639
1640    case INDEX_op_muluh_i32:
1641    case INDEX_op_muluh_i64:
1642        tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
1643        break;
1644
1645    case INDEX_op_mb:
1646        tcg_out_mb(s, a0);
1647        break;
1648
1649    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1650    case INDEX_op_mov_i64:
1651    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1652    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1653    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1654    default:
1655        g_assert_not_reached();
1656    }
1657}
1658
1659static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1660{
1661    switch (op) {
1662    case INDEX_op_goto_ptr:
1663        return C_O0_I1(r);
1664
1665    case INDEX_op_ld8u_i32:
1666    case INDEX_op_ld8s_i32:
1667    case INDEX_op_ld16u_i32:
1668    case INDEX_op_ld16s_i32:
1669    case INDEX_op_ld_i32:
1670    case INDEX_op_not_i32:
1671    case INDEX_op_neg_i32:
1672    case INDEX_op_ld8u_i64:
1673    case INDEX_op_ld8s_i64:
1674    case INDEX_op_ld16u_i64:
1675    case INDEX_op_ld16s_i64:
1676    case INDEX_op_ld32s_i64:
1677    case INDEX_op_ld32u_i64:
1678    case INDEX_op_ld_i64:
1679    case INDEX_op_not_i64:
1680    case INDEX_op_neg_i64:
1681    case INDEX_op_ext8u_i32:
1682    case INDEX_op_ext8u_i64:
1683    case INDEX_op_ext16u_i32:
1684    case INDEX_op_ext16u_i64:
1685    case INDEX_op_ext32u_i64:
1686    case INDEX_op_extu_i32_i64:
1687    case INDEX_op_ext8s_i32:
1688    case INDEX_op_ext8s_i64:
1689    case INDEX_op_ext16s_i32:
1690    case INDEX_op_ext16s_i64:
1691    case INDEX_op_ext32s_i64:
1692    case INDEX_op_extrl_i64_i32:
1693    case INDEX_op_extrh_i64_i32:
1694    case INDEX_op_ext_i32_i64:
1695        return C_O1_I1(r, r);
1696
1697    case INDEX_op_st8_i32:
1698    case INDEX_op_st16_i32:
1699    case INDEX_op_st_i32:
1700    case INDEX_op_st8_i64:
1701    case INDEX_op_st16_i64:
1702    case INDEX_op_st32_i64:
1703    case INDEX_op_st_i64:
1704        return C_O0_I2(rZ, r);
1705
1706    case INDEX_op_add_i32:
1707    case INDEX_op_and_i32:
1708    case INDEX_op_or_i32:
1709    case INDEX_op_xor_i32:
1710    case INDEX_op_add_i64:
1711    case INDEX_op_and_i64:
1712    case INDEX_op_or_i64:
1713    case INDEX_op_xor_i64:
1714        return C_O1_I2(r, r, rI);
1715
1716    case INDEX_op_sub_i32:
1717    case INDEX_op_sub_i64:
1718        return C_O1_I2(r, rZ, rN);
1719
1720    case INDEX_op_mul_i32:
1721    case INDEX_op_mulsh_i32:
1722    case INDEX_op_muluh_i32:
1723    case INDEX_op_div_i32:
1724    case INDEX_op_divu_i32:
1725    case INDEX_op_rem_i32:
1726    case INDEX_op_remu_i32:
1727    case INDEX_op_setcond_i32:
1728    case INDEX_op_mul_i64:
1729    case INDEX_op_mulsh_i64:
1730    case INDEX_op_muluh_i64:
1731    case INDEX_op_div_i64:
1732    case INDEX_op_divu_i64:
1733    case INDEX_op_rem_i64:
1734    case INDEX_op_remu_i64:
1735    case INDEX_op_setcond_i64:
1736        return C_O1_I2(r, rZ, rZ);
1737
1738    case INDEX_op_shl_i32:
1739    case INDEX_op_shr_i32:
1740    case INDEX_op_sar_i32:
1741    case INDEX_op_shl_i64:
1742    case INDEX_op_shr_i64:
1743    case INDEX_op_sar_i64:
1744        return C_O1_I2(r, r, ri);
1745
1746    case INDEX_op_brcond_i32:
1747    case INDEX_op_brcond_i64:
1748        return C_O0_I2(rZ, rZ);
1749
1750    case INDEX_op_add2_i32:
1751    case INDEX_op_add2_i64:
1752    case INDEX_op_sub2_i32:
1753    case INDEX_op_sub2_i64:
1754        return C_O2_I4(r, r, rZ, rZ, rM, rM);
1755
1756    case INDEX_op_brcond2_i32:
1757        return C_O0_I4(rZ, rZ, rZ, rZ);
1758
1759    case INDEX_op_setcond2_i32:
1760        return C_O1_I4(r, rZ, rZ, rZ, rZ);
1761
1762    case INDEX_op_qemu_ld_i32:
1763        return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1764                ? C_O1_I1(r, L) : C_O1_I2(r, L, L));
1765    case INDEX_op_qemu_st_i32:
1766        return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1767                ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L));
1768    case INDEX_op_qemu_ld_i64:
1769        return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
1770               : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
1771               : C_O2_I2(r, r, L, L));
1772    case INDEX_op_qemu_st_i64:
1773        return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L)
1774               : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L)
1775               : C_O0_I4(LZ, LZ, L, L));
1776
1777    default:
1778        g_assert_not_reached();
1779    }
1780}
1781
1782static const int tcg_target_callee_save_regs[] = {
1783    TCG_REG_S0,       /* used for the global env (TCG_AREG0) */
1784    TCG_REG_S1,
1785    TCG_REG_S2,
1786    TCG_REG_S3,
1787    TCG_REG_S4,
1788    TCG_REG_S5,
1789    TCG_REG_S6,
1790    TCG_REG_S7,
1791    TCG_REG_S8,
1792    TCG_REG_S9,
1793    TCG_REG_S10,
1794    TCG_REG_S11,
1795    TCG_REG_RA,       /* should be last for ABI compliance */
1796};
1797
1798/* Stack frame parameters.  */
1799#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
1800#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1801#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1802#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1803                     + TCG_TARGET_STACK_ALIGN - 1) \
1804                    & -TCG_TARGET_STACK_ALIGN)
1805#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1806
1807/* We're expecting to be able to use an immediate for frame allocation.  */
1808QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
1809
1810/* Generate global QEMU prologue and epilogue code */
1811static void tcg_target_qemu_prologue(TCGContext *s)
1812{
1813    int i;
1814
1815    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
1816
1817    /* TB prologue */
1818    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
1819    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1820        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1821                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1822    }
1823
1824#if !defined(CONFIG_SOFTMMU)
1825    tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
1826    tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1827#endif
1828
1829    /* Call generated code */
1830    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1831    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
1832
1833    /* Return path for goto_ptr. Set return value to 0 */
1834    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1835    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
1836
1837    /* TB epilogue */
1838    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
1839    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1840        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1841                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1842    }
1843
1844    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
1845    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
1846}
1847
1848static void tcg_target_init(TCGContext *s)
1849{
1850    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
1851    if (TCG_TARGET_REG_BITS == 64) {
1852        tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
1853    }
1854
1855    tcg_target_call_clobber_regs = -1u;
1856    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
1857    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
1858    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
1859    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
1860    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
1861    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
1862    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
1863    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
1864    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
1865    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
1866    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10);
1867    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11);
1868
1869    s->reserved_regs = 0;
1870    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
1871    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
1872    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
1873    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
1874    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1875    tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
1876    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
1877}
1878
1879typedef struct {
1880    DebugFrameHeader h;
1881    uint8_t fde_def_cfa[4];
1882    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1883} DebugFrame;
1884
1885#define ELF_HOST_MACHINE EM_RISCV
1886
1887static const DebugFrame debug_frame = {
1888    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
1889    .h.cie.id = -1,
1890    .h.cie.version = 1,
1891    .h.cie.code_align = 1,
1892    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
1893    .h.cie.return_column = TCG_REG_RA,
1894
1895    /* Total FDE size does not include the "len" member.  */
1896    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1897
1898    .fde_def_cfa = {
1899        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ... */
1900        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
1901        (FRAME_SIZE >> 7)
1902    },
1903    .fde_reg_ofs = {
1904        0x80 + 9,  12,                  /* DW_CFA_offset, s1,  -96 */
1905        0x80 + 18, 11,                  /* DW_CFA_offset, s2,  -88 */
1906        0x80 + 19, 10,                  /* DW_CFA_offset, s3,  -80 */
1907        0x80 + 20, 9,                   /* DW_CFA_offset, s4,  -72 */
1908        0x80 + 21, 8,                   /* DW_CFA_offset, s5,  -64 */
1909        0x80 + 22, 7,                   /* DW_CFA_offset, s6,  -56 */
1910        0x80 + 23, 6,                   /* DW_CFA_offset, s7,  -48 */
1911        0x80 + 24, 5,                   /* DW_CFA_offset, s8,  -40 */
1912        0x80 + 25, 4,                   /* DW_CFA_offset, s9,  -32 */
1913        0x80 + 26, 3,                   /* DW_CFA_offset, s10, -24 */
1914        0x80 + 27, 2,                   /* DW_CFA_offset, s11, -16 */
1915        0x80 + 1 , 1,                   /* DW_CFA_offset, ra,  -8 */
1916    }
1917};
1918
1919void tcg_register_jit(const void *buf, size_t buf_size)
1920{
1921    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1922}
1923