xref: /openbmc/qemu/tcg/riscv/tcg-target.c.inc (revision 1141159c)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
8 *
9 * Based on i386/tcg-target.c and mips/tcg-target.c
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
28 */
29
30#include "../tcg-ldst.c.inc"
31#include "../tcg-pool.c.inc"
32
33#ifdef CONFIG_DEBUG_TCG
34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
35    "zero",
36    "ra",
37    "sp",
38    "gp",
39    "tp",
40    "t0",
41    "t1",
42    "t2",
43    "s0",
44    "s1",
45    "a0",
46    "a1",
47    "a2",
48    "a3",
49    "a4",
50    "a5",
51    "a6",
52    "a7",
53    "s2",
54    "s3",
55    "s4",
56    "s5",
57    "s6",
58    "s7",
59    "s8",
60    "s9",
61    "s10",
62    "s11",
63    "t3",
64    "t4",
65    "t5",
66    "t6"
67};
68#endif
69
70static const int tcg_target_reg_alloc_order[] = {
71    /* Call saved registers */
72    /* TCG_REG_S0 reservered for TCG_AREG0 */
73    TCG_REG_S1,
74    TCG_REG_S2,
75    TCG_REG_S3,
76    TCG_REG_S4,
77    TCG_REG_S5,
78    TCG_REG_S6,
79    TCG_REG_S7,
80    TCG_REG_S8,
81    TCG_REG_S9,
82    TCG_REG_S10,
83    TCG_REG_S11,
84
85    /* Call clobbered registers */
86    TCG_REG_T0,
87    TCG_REG_T1,
88    TCG_REG_T2,
89    TCG_REG_T3,
90    TCG_REG_T4,
91    TCG_REG_T5,
92    TCG_REG_T6,
93
94    /* Argument registers */
95    TCG_REG_A0,
96    TCG_REG_A1,
97    TCG_REG_A2,
98    TCG_REG_A3,
99    TCG_REG_A4,
100    TCG_REG_A5,
101    TCG_REG_A6,
102    TCG_REG_A7,
103};
104
105static const int tcg_target_call_iarg_regs[] = {
106    TCG_REG_A0,
107    TCG_REG_A1,
108    TCG_REG_A2,
109    TCG_REG_A3,
110    TCG_REG_A4,
111    TCG_REG_A5,
112    TCG_REG_A6,
113    TCG_REG_A7,
114};
115
116static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
117{
118    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
119    tcg_debug_assert(slot >= 0 && slot <= 1);
120    return TCG_REG_A0 + slot;
121}
122
123#define TCG_CT_CONST_ZERO  0x100
124#define TCG_CT_CONST_S12   0x200
125#define TCG_CT_CONST_N12   0x400
126#define TCG_CT_CONST_M12   0x800
127
128#define ALL_GENERAL_REGS   MAKE_64BIT_MASK(0, 32)
129
130#define sextreg  sextract64
131
132/* test if a constant matches the constraint */
133static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
134{
135    if (ct & TCG_CT_CONST) {
136        return 1;
137    }
138    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
139        return 1;
140    }
141    /*
142     * Sign extended from 12 bits: [-0x800, 0x7ff].
143     * Used for most arithmetic, as this is the isa field.
144     */
145    if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
146        return 1;
147    }
148    /*
149     * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
150     * Used for subtraction, where a constant must be handled by ADDI.
151     */
152    if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
153        return 1;
154    }
155    /*
156     * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
157     * Used by addsub2, which may need the negative operation,
158     * and requires the modified constant to be representable.
159     */
160    if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
161        return 1;
162    }
163    return 0;
164}
165
166/*
167 * RISC-V Base ISA opcodes (IM)
168 */
169
170typedef enum {
171    OPC_ADD = 0x33,
172    OPC_ADDI = 0x13,
173    OPC_AND = 0x7033,
174    OPC_ANDI = 0x7013,
175    OPC_AUIPC = 0x17,
176    OPC_BEQ = 0x63,
177    OPC_BGE = 0x5063,
178    OPC_BGEU = 0x7063,
179    OPC_BLT = 0x4063,
180    OPC_BLTU = 0x6063,
181    OPC_BNE = 0x1063,
182    OPC_DIV = 0x2004033,
183    OPC_DIVU = 0x2005033,
184    OPC_JAL = 0x6f,
185    OPC_JALR = 0x67,
186    OPC_LB = 0x3,
187    OPC_LBU = 0x4003,
188    OPC_LD = 0x3003,
189    OPC_LH = 0x1003,
190    OPC_LHU = 0x5003,
191    OPC_LUI = 0x37,
192    OPC_LW = 0x2003,
193    OPC_LWU = 0x6003,
194    OPC_MUL = 0x2000033,
195    OPC_MULH = 0x2001033,
196    OPC_MULHSU = 0x2002033,
197    OPC_MULHU = 0x2003033,
198    OPC_OR = 0x6033,
199    OPC_ORI = 0x6013,
200    OPC_REM = 0x2006033,
201    OPC_REMU = 0x2007033,
202    OPC_SB = 0x23,
203    OPC_SD = 0x3023,
204    OPC_SH = 0x1023,
205    OPC_SLL = 0x1033,
206    OPC_SLLI = 0x1013,
207    OPC_SLT = 0x2033,
208    OPC_SLTI = 0x2013,
209    OPC_SLTIU = 0x3013,
210    OPC_SLTU = 0x3033,
211    OPC_SRA = 0x40005033,
212    OPC_SRAI = 0x40005013,
213    OPC_SRL = 0x5033,
214    OPC_SRLI = 0x5013,
215    OPC_SUB = 0x40000033,
216    OPC_SW = 0x2023,
217    OPC_XOR = 0x4033,
218    OPC_XORI = 0x4013,
219
220    OPC_ADDIW = 0x1b,
221    OPC_ADDW = 0x3b,
222    OPC_DIVUW = 0x200503b,
223    OPC_DIVW = 0x200403b,
224    OPC_MULW = 0x200003b,
225    OPC_REMUW = 0x200703b,
226    OPC_REMW = 0x200603b,
227    OPC_SLLIW = 0x101b,
228    OPC_SLLW = 0x103b,
229    OPC_SRAIW = 0x4000501b,
230    OPC_SRAW = 0x4000503b,
231    OPC_SRLIW = 0x501b,
232    OPC_SRLW = 0x503b,
233    OPC_SUBW = 0x4000003b,
234
235    OPC_FENCE = 0x0000000f,
236    OPC_NOP   = OPC_ADDI,   /* nop = addi r0,r0,0 */
237} RISCVInsn;
238
239/*
240 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
241 */
242
243/* Type-R */
244
245static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2)
246{
247    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20;
248}
249
250/* Type-I */
251
252static int32_t encode_imm12(uint32_t imm)
253{
254    return (imm & 0xfff) << 20;
255}
256
257static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm)
258{
259    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm);
260}
261
262/* Type-S */
263
264static int32_t encode_simm12(uint32_t imm)
265{
266    int32_t ret = 0;
267
268    ret |= (imm & 0xFE0) << 20;
269    ret |= (imm & 0x1F) << 7;
270
271    return ret;
272}
273
274static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
275{
276    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm);
277}
278
279/* Type-SB */
280
281static int32_t encode_sbimm12(uint32_t imm)
282{
283    int32_t ret = 0;
284
285    ret |= (imm & 0x1000) << 19;
286    ret |= (imm & 0x7e0) << 20;
287    ret |= (imm & 0x1e) << 7;
288    ret |= (imm & 0x800) >> 4;
289
290    return ret;
291}
292
293static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
294{
295    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm);
296}
297
298/* Type-U */
299
300static int32_t encode_uimm20(uint32_t imm)
301{
302    return imm & 0xfffff000;
303}
304
305static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm)
306{
307    return opc | (rd & 0x1f) << 7 | encode_uimm20(imm);
308}
309
310/* Type-UJ */
311
312static int32_t encode_ujimm20(uint32_t imm)
313{
314    int32_t ret = 0;
315
316    ret |= (imm & 0x0007fe) << (21 - 1);
317    ret |= (imm & 0x000800) << (20 - 11);
318    ret |= (imm & 0x0ff000) << (12 - 12);
319    ret |= (imm & 0x100000) << (31 - 20);
320
321    return ret;
322}
323
324static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
325{
326    return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
327}
328
329/*
330 * RISC-V instruction emitters
331 */
332
333static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc,
334                            TCGReg rd, TCGReg rs1, TCGReg rs2)
335{
336    tcg_out32(s, encode_r(opc, rd, rs1, rs2));
337}
338
339static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc,
340                            TCGReg rd, TCGReg rs1, TCGArg imm)
341{
342    tcg_out32(s, encode_i(opc, rd, rs1, imm));
343}
344
345static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc,
346                              TCGReg rs1, TCGReg rs2, uint32_t imm)
347{
348    tcg_out32(s, encode_s(opc, rs1, rs2, imm));
349}
350
351static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc,
352                               TCGReg rs1, TCGReg rs2, uint32_t imm)
353{
354    tcg_out32(s, encode_sb(opc, rs1, rs2, imm));
355}
356
357static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc,
358                              TCGReg rd, uint32_t imm)
359{
360    tcg_out32(s, encode_u(opc, rd, imm));
361}
362
363static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc,
364                             TCGReg rd, uint32_t imm)
365{
366    tcg_out32(s, encode_uj(opc, rd, imm));
367}
368
369static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
370{
371    int i;
372    for (i = 0; i < count; ++i) {
373        p[i] = OPC_NOP;
374    }
375}
376
377/*
378 * Relocations
379 */
380
381static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
382{
383    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
384    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
385
386    tcg_debug_assert((offset & 1) == 0);
387    if (offset == sextreg(offset, 0, 12)) {
388        *src_rw |= encode_sbimm12(offset);
389        return true;
390    }
391
392    return false;
393}
394
395static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
396{
397    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
398    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
399
400    tcg_debug_assert((offset & 1) == 0);
401    if (offset == sextreg(offset, 0, 20)) {
402        *src_rw |= encode_ujimm20(offset);
403        return true;
404    }
405
406    return false;
407}
408
409static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
410{
411    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
412    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
413    int32_t lo = sextreg(offset, 0, 12);
414    int32_t hi = offset - lo;
415
416    if (offset == hi + lo) {
417        src_rw[0] |= encode_uimm20(hi);
418        src_rw[1] |= encode_imm12(lo);
419        return true;
420    }
421
422    return false;
423}
424
425static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
426                        intptr_t value, intptr_t addend)
427{
428    tcg_debug_assert(addend == 0);
429    switch (type) {
430    case R_RISCV_BRANCH:
431        return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
432    case R_RISCV_JAL:
433        return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
434    case R_RISCV_CALL:
435        return reloc_call(code_ptr, (tcg_insn_unit *)value);
436    default:
437        g_assert_not_reached();
438    }
439}
440
441/*
442 * TCG intrinsics
443 */
444
445static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
446{
447    if (ret == arg) {
448        return true;
449    }
450    switch (type) {
451    case TCG_TYPE_I32:
452    case TCG_TYPE_I64:
453        tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
454        break;
455    default:
456        g_assert_not_reached();
457    }
458    return true;
459}
460
461static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
462                         tcg_target_long val)
463{
464    tcg_target_long lo, hi, tmp;
465    int shift, ret;
466
467    if (type == TCG_TYPE_I32) {
468        val = (int32_t)val;
469    }
470
471    lo = sextreg(val, 0, 12);
472    if (val == lo) {
473        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo);
474        return;
475    }
476
477    hi = val - lo;
478    if (val == (int32_t)val) {
479        tcg_out_opc_upper(s, OPC_LUI, rd, hi);
480        if (lo != 0) {
481            tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
482        }
483        return;
484    }
485
486    tmp = tcg_pcrel_diff(s, (void *)val);
487    if (tmp == (int32_t)tmp) {
488        tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
489        tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
490        ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
491        tcg_debug_assert(ret == true);
492        return;
493    }
494
495    /* Look for a single 20-bit section.  */
496    shift = ctz64(val);
497    tmp = val >> shift;
498    if (tmp == sextreg(tmp, 0, 20)) {
499        tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12);
500        if (shift > 12) {
501            tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12);
502        } else {
503            tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift);
504        }
505        return;
506    }
507
508    /* Look for a few high zero bits, with lots of bits set in the middle.  */
509    shift = clz64(val);
510    tmp = val << shift;
511    if (tmp == sextreg(tmp, 12, 20) << 12) {
512        tcg_out_opc_upper(s, OPC_LUI, rd, tmp);
513        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
514        return;
515    } else if (tmp == sextreg(tmp, 0, 12)) {
516        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp);
517        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
518        return;
519    }
520
521    /* Drop into the constant pool.  */
522    new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0);
523    tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
524    tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
525}
526
527static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
528{
529    return false;
530}
531
532static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
533                             tcg_target_long imm)
534{
535    /* This function is only used for passing structs by reference. */
536    g_assert_not_reached();
537}
538
539static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
540{
541    tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff);
542}
543
544static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
545{
546    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
547    tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
548}
549
550static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
551{
552    tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
553    tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
554}
555
556static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
557{
558    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
559    tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
560}
561
562static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
563{
564    tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
565    tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
566}
567
568static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
569{
570    tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
571}
572
573static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
574{
575    if (ret != arg) {
576        tcg_out_ext32s(s, ret, arg);
577    }
578}
579
580static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
581{
582    tcg_out_ext32u(s, ret, arg);
583}
584
585static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
586{
587    tcg_out_ext32s(s, ret, arg);
588}
589
590static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
591                         TCGReg addr, intptr_t offset)
592{
593    intptr_t imm12 = sextreg(offset, 0, 12);
594
595    if (offset != imm12) {
596        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
597
598        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
599            imm12 = sextreg(diff, 0, 12);
600            tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12);
601        } else {
602            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
603            if (addr != TCG_REG_ZERO) {
604                tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr);
605            }
606        }
607        addr = TCG_REG_TMP2;
608    }
609
610    switch (opc) {
611    case OPC_SB:
612    case OPC_SH:
613    case OPC_SW:
614    case OPC_SD:
615        tcg_out_opc_store(s, opc, addr, data, imm12);
616        break;
617    case OPC_LB:
618    case OPC_LBU:
619    case OPC_LH:
620    case OPC_LHU:
621    case OPC_LW:
622    case OPC_LWU:
623    case OPC_LD:
624        tcg_out_opc_imm(s, opc, data, addr, imm12);
625        break;
626    default:
627        g_assert_not_reached();
628    }
629}
630
631static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
632                       TCGReg arg1, intptr_t arg2)
633{
634    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
635    tcg_out_ldst(s, insn, arg, arg1, arg2);
636}
637
638static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
639                       TCGReg arg1, intptr_t arg2)
640{
641    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
642    tcg_out_ldst(s, insn, arg, arg1, arg2);
643}
644
645static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
646                        TCGReg base, intptr_t ofs)
647{
648    if (val == 0) {
649        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
650        return true;
651    }
652    return false;
653}
654
655static void tcg_out_addsub2(TCGContext *s,
656                            TCGReg rl, TCGReg rh,
657                            TCGReg al, TCGReg ah,
658                            TCGArg bl, TCGArg bh,
659                            bool cbl, bool cbh, bool is_sub, bool is32bit)
660{
661    const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
662    const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
663    const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
664    TCGReg th = TCG_REG_TMP1;
665
666    /* If we have a negative constant such that negating it would
667       make the high part zero, we can (usually) eliminate one insn.  */
668    if (cbl && cbh && bh == -1 && bl != 0) {
669        bl = -bl;
670        bh = 0;
671        is_sub = !is_sub;
672    }
673
674    /* By operating on the high part first, we get to use the final
675       carry operation to move back from the temporary.  */
676    if (!cbh) {
677        tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
678    } else if (bh != 0 || ah == rl) {
679        tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
680    } else {
681        th = ah;
682    }
683
684    /* Note that tcg optimization should eliminate the bl == 0 case.  */
685    if (is_sub) {
686        if (cbl) {
687            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
688            tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
689        } else {
690            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
691            tcg_out_opc_reg(s, opc_sub, rl, al, bl);
692        }
693        tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
694    } else {
695        if (cbl) {
696            tcg_out_opc_imm(s, opc_addi, rl, al, bl);
697            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
698        } else if (al == bl) {
699            /*
700             * If the input regs overlap, this is a simple doubling
701             * and carry-out is the input msb.  This special case is
702             * required when the output reg overlaps the input,
703             * but we might as well use it always.
704             */
705            tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
706            tcg_out_opc_reg(s, opc_add, rl, al, al);
707        } else {
708            tcg_out_opc_reg(s, opc_add, rl, al, bl);
709            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
710                            rl, (rl == bl ? al : bl));
711        }
712        tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
713    }
714}
715
716static const struct {
717    RISCVInsn op;
718    bool swap;
719} tcg_brcond_to_riscv[] = {
720    [TCG_COND_EQ] =  { OPC_BEQ,  false },
721    [TCG_COND_NE] =  { OPC_BNE,  false },
722    [TCG_COND_LT] =  { OPC_BLT,  false },
723    [TCG_COND_GE] =  { OPC_BGE,  false },
724    [TCG_COND_LE] =  { OPC_BGE,  true  },
725    [TCG_COND_GT] =  { OPC_BLT,  true  },
726    [TCG_COND_LTU] = { OPC_BLTU, false },
727    [TCG_COND_GEU] = { OPC_BGEU, false },
728    [TCG_COND_LEU] = { OPC_BGEU, true  },
729    [TCG_COND_GTU] = { OPC_BLTU, true  }
730};
731
732static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
733                           TCGReg arg2, TCGLabel *l)
734{
735    RISCVInsn op = tcg_brcond_to_riscv[cond].op;
736
737    tcg_debug_assert(op != 0);
738
739    if (tcg_brcond_to_riscv[cond].swap) {
740        TCGReg t = arg1;
741        arg1 = arg2;
742        arg2 = t;
743    }
744
745    tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
746    tcg_out_opc_branch(s, op, arg1, arg2, 0);
747}
748
749static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
750                            TCGReg arg1, TCGReg arg2)
751{
752    switch (cond) {
753    case TCG_COND_EQ:
754        tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2);
755        tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1);
756        break;
757    case TCG_COND_NE:
758        tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2);
759        tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret);
760        break;
761    case TCG_COND_LT:
762        tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
763        break;
764    case TCG_COND_GE:
765        tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
766        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
767        break;
768    case TCG_COND_LE:
769        tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
770        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
771        break;
772    case TCG_COND_GT:
773        tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1);
774        break;
775    case TCG_COND_LTU:
776        tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
777        break;
778    case TCG_COND_GEU:
779        tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
780        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
781        break;
782    case TCG_COND_LEU:
783        tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
784        tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1);
785        break;
786    case TCG_COND_GTU:
787        tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1);
788        break;
789    default:
790         g_assert_not_reached();
791         break;
792     }
793}
794
795static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
796{
797    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
798    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
799    int ret;
800
801    tcg_debug_assert((offset & 1) == 0);
802    if (offset == sextreg(offset, 0, 20)) {
803        /* short jump: -2097150 to 2097152 */
804        tcg_out_opc_jump(s, OPC_JAL, link, offset);
805    } else if (offset == (int32_t)offset) {
806        /* long jump: -2147483646 to 2147483648 */
807        tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
808        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
809        ret = reloc_call(s->code_ptr - 2, arg);
810        tcg_debug_assert(ret == true);
811    } else {
812        /* far jump: 64-bit */
813        tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
814        tcg_target_long base = (tcg_target_long)arg - imm;
815        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
816        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
817    }
818}
819
820static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
821                         const TCGHelperInfo *info)
822{
823    tcg_out_call_int(s, arg, false);
824}
825
826static void tcg_out_mb(TCGContext *s, TCGArg a0)
827{
828    tcg_insn_unit insn = OPC_FENCE;
829
830    if (a0 & TCG_MO_LD_LD) {
831        insn |= 0x02200000;
832    }
833    if (a0 & TCG_MO_ST_LD) {
834        insn |= 0x01200000;
835    }
836    if (a0 & TCG_MO_LD_ST) {
837        insn |= 0x02100000;
838    }
839    if (a0 & TCG_MO_ST_ST) {
840        insn |= 0x02200000;
841    }
842    tcg_out32(s, insn);
843}
844
845/*
846 * Load/store and TLB
847 */
848
849static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
850{
851    tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
852    bool ok = reloc_jimm20(s->code_ptr - 1, target);
853    tcg_debug_assert(ok);
854}
855
856bool tcg_target_has_memory_bswap(MemOp memop)
857{
858    return false;
859}
860
861/* We have three temps, we might as well expose them. */
862static const TCGLdstHelperParam ldst_helper_param = {
863    .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 }
864};
865
866static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
867{
868    MemOp opc = get_memop(l->oi);
869
870    /* resolve label address */
871    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
872        return false;
873    }
874
875    /* call load helper */
876    tcg_out_ld_helper_args(s, l, &ldst_helper_param);
877    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false);
878    tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param);
879
880    tcg_out_goto(s, l->raddr);
881    return true;
882}
883
884static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
885{
886    MemOp opc = get_memop(l->oi);
887
888    /* resolve label address */
889    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
890        return false;
891    }
892
893    /* call store helper */
894    tcg_out_st_helper_args(s, l, &ldst_helper_param);
895    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
896
897    tcg_out_goto(s, l->raddr);
898    return true;
899}
900
901/*
902 * For softmmu, perform the TLB load and compare.
903 * For useronly, perform any required alignment tests.
904 * In both cases, return a TCGLabelQemuLdst structure if the slow path
905 * is required and fill in @h with the host address for the fast path.
906 */
907static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
908                                           TCGReg addr_reg, MemOpIdx oi,
909                                           bool is_ld)
910{
911    TCGLabelQemuLdst *ldst = NULL;
912    MemOp opc = get_memop(oi);
913    TCGAtomAlign aa;
914    unsigned a_mask;
915
916    aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
917    a_mask = (1u << aa.align) - 1;
918
919#ifdef CONFIG_SOFTMMU
920    unsigned s_bits = opc & MO_SIZE;
921    unsigned s_mask = (1u << s_bits) - 1;
922    int mem_index = get_mmuidx(oi);
923    int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
924    int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
925    int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
926    int compare_mask;
927    TCGReg addr_adj;
928
929    ldst = new_ldst_label(s);
930    ldst->is_ld = is_ld;
931    ldst->oi = oi;
932    ldst->addrlo_reg = addr_reg;
933
934    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
935    QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
936    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
937    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
938
939    tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
940                    s->page_bits - CPU_TLB_ENTRY_BITS);
941    tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
942    tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
943
944    /*
945     * For aligned accesses, we check the first byte and include the alignment
946     * bits within the address.  For unaligned access, we check that we don't
947     * cross pages using the address of the last byte of the access.
948     */
949    addr_adj = addr_reg;
950    if (a_mask < s_mask) {
951        addr_adj = TCG_REG_TMP0;
952        tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI,
953                        addr_adj, addr_reg, s_mask - a_mask);
954    }
955    compare_mask = s->page_mask | a_mask;
956    if (compare_mask == sextreg(compare_mask, 0, 12)) {
957        tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
958    } else {
959        tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
960        tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
961    }
962
963    /* Load the tlb comparator and the addend.  */
964    tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
965               is_ld ? offsetof(CPUTLBEntry, addr_read)
966                     : offsetof(CPUTLBEntry, addr_write));
967    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
968               offsetof(CPUTLBEntry, addend));
969
970    /* Compare masked address with the TLB entry. */
971    ldst->label_ptr[0] = s->code_ptr;
972    tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
973
974    /* TLB Hit - translate address using addend.  */
975    addr_adj = addr_reg;
976    if (TARGET_LONG_BITS == 32) {
977        addr_adj = TCG_REG_TMP0;
978        tcg_out_ext32u(s, addr_adj, addr_reg);
979    }
980    tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_adj);
981    *pbase = TCG_REG_TMP0;
982#else
983    if (a_mask) {
984        ldst = new_ldst_label(s);
985        ldst->is_ld = is_ld;
986        ldst->oi = oi;
987        ldst->addrlo_reg = addr_reg;
988
989        /* We are expecting alignment max 7, so we can always use andi. */
990        tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
991        tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
992
993        ldst->label_ptr[0] = s->code_ptr;
994        tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
995    }
996
997    TCGReg base = addr_reg;
998    if (TARGET_LONG_BITS == 32) {
999        tcg_out_ext32u(s, TCG_REG_TMP0, base);
1000        base = TCG_REG_TMP0;
1001    }
1002    if (guest_base != 0) {
1003        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
1004        base = TCG_REG_TMP0;
1005    }
1006    *pbase = base;
1007#endif
1008
1009    return ldst;
1010}
1011
1012static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
1013                                   TCGReg base, MemOp opc, TCGType type)
1014{
1015    /* Byte swapping is left to middle-end expansion. */
1016    tcg_debug_assert((opc & MO_BSWAP) == 0);
1017
1018    switch (opc & (MO_SSIZE)) {
1019    case MO_UB:
1020        tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
1021        break;
1022    case MO_SB:
1023        tcg_out_opc_imm(s, OPC_LB, val, base, 0);
1024        break;
1025    case MO_UW:
1026        tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
1027        break;
1028    case MO_SW:
1029        tcg_out_opc_imm(s, OPC_LH, val, base, 0);
1030        break;
1031    case MO_UL:
1032        if (type == TCG_TYPE_I64) {
1033            tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
1034            break;
1035        }
1036        /* FALLTHRU */
1037    case MO_SL:
1038        tcg_out_opc_imm(s, OPC_LW, val, base, 0);
1039        break;
1040    case MO_UQ:
1041        tcg_out_opc_imm(s, OPC_LD, val, base, 0);
1042        break;
1043    default:
1044        g_assert_not_reached();
1045    }
1046}
1047
1048static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1049                            MemOpIdx oi, TCGType data_type)
1050{
1051    TCGLabelQemuLdst *ldst;
1052    TCGReg base;
1053
1054    ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
1055    tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
1056
1057    if (ldst) {
1058        ldst->type = data_type;
1059        ldst->datalo_reg = data_reg;
1060        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1061    }
1062}
1063
1064static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
1065                                   TCGReg base, MemOp opc)
1066{
1067    /* Byte swapping is left to middle-end expansion. */
1068    tcg_debug_assert((opc & MO_BSWAP) == 0);
1069
1070    switch (opc & (MO_SSIZE)) {
1071    case MO_8:
1072        tcg_out_opc_store(s, OPC_SB, base, val, 0);
1073        break;
1074    case MO_16:
1075        tcg_out_opc_store(s, OPC_SH, base, val, 0);
1076        break;
1077    case MO_32:
1078        tcg_out_opc_store(s, OPC_SW, base, val, 0);
1079        break;
1080    case MO_64:
1081        tcg_out_opc_store(s, OPC_SD, base, val, 0);
1082        break;
1083    default:
1084        g_assert_not_reached();
1085    }
1086}
1087
1088static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1089                            MemOpIdx oi, TCGType data_type)
1090{
1091    TCGLabelQemuLdst *ldst;
1092    TCGReg base;
1093
1094    ldst = prepare_host_addr(s, &base, addr_reg, oi, false);
1095    tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
1096
1097    if (ldst) {
1098        ldst->type = data_type;
1099        ldst->datalo_reg = data_reg;
1100        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1101    }
1102}
1103
1104static const tcg_insn_unit *tb_ret_addr;
1105
1106static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1107{
1108    /* Reuse the zeroing that exists for goto_ptr.  */
1109    if (a0 == 0) {
1110        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1111    } else {
1112        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1113        tcg_out_call_int(s, tb_ret_addr, true);
1114    }
1115}
1116
1117static void tcg_out_goto_tb(TCGContext *s, int which)
1118{
1119    /* Direct branch will be patched by tb_target_set_jmp_target. */
1120    set_jmp_insn_offset(s, which);
1121    tcg_out32(s, OPC_JAL);
1122
1123    /* When branch is out of range, fall through to indirect. */
1124    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
1125               get_jmp_target_addr(s, which));
1126    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1127    set_jmp_reset_offset(s, which);
1128}
1129
1130void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1131                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1132{
1133    uintptr_t addr = tb->jmp_target_addr[n];
1134    ptrdiff_t offset = addr - jmp_rx;
1135    tcg_insn_unit insn;
1136
1137    /* Either directly branch, or fall through to indirect branch. */
1138    if (offset == sextreg(offset, 0, 20)) {
1139        insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
1140    } else {
1141        insn = OPC_NOP;
1142    }
1143    qatomic_set((uint32_t *)jmp_rw, insn);
1144    flush_idcache_range(jmp_rx, jmp_rw, 4);
1145}
1146
1147static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1148                       const TCGArg args[TCG_MAX_OP_ARGS],
1149                       const int const_args[TCG_MAX_OP_ARGS])
1150{
1151    TCGArg a0 = args[0];
1152    TCGArg a1 = args[1];
1153    TCGArg a2 = args[2];
1154    int c2 = const_args[2];
1155
1156    switch (opc) {
1157    case INDEX_op_goto_ptr:
1158        tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
1159        break;
1160
1161    case INDEX_op_br:
1162        tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
1163        tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1164        break;
1165
1166    case INDEX_op_ld8u_i32:
1167    case INDEX_op_ld8u_i64:
1168        tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
1169        break;
1170    case INDEX_op_ld8s_i32:
1171    case INDEX_op_ld8s_i64:
1172        tcg_out_ldst(s, OPC_LB, a0, a1, a2);
1173        break;
1174    case INDEX_op_ld16u_i32:
1175    case INDEX_op_ld16u_i64:
1176        tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
1177        break;
1178    case INDEX_op_ld16s_i32:
1179    case INDEX_op_ld16s_i64:
1180        tcg_out_ldst(s, OPC_LH, a0, a1, a2);
1181        break;
1182    case INDEX_op_ld32u_i64:
1183        tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
1184        break;
1185    case INDEX_op_ld_i32:
1186    case INDEX_op_ld32s_i64:
1187        tcg_out_ldst(s, OPC_LW, a0, a1, a2);
1188        break;
1189    case INDEX_op_ld_i64:
1190        tcg_out_ldst(s, OPC_LD, a0, a1, a2);
1191        break;
1192
1193    case INDEX_op_st8_i32:
1194    case INDEX_op_st8_i64:
1195        tcg_out_ldst(s, OPC_SB, a0, a1, a2);
1196        break;
1197    case INDEX_op_st16_i32:
1198    case INDEX_op_st16_i64:
1199        tcg_out_ldst(s, OPC_SH, a0, a1, a2);
1200        break;
1201    case INDEX_op_st_i32:
1202    case INDEX_op_st32_i64:
1203        tcg_out_ldst(s, OPC_SW, a0, a1, a2);
1204        break;
1205    case INDEX_op_st_i64:
1206        tcg_out_ldst(s, OPC_SD, a0, a1, a2);
1207        break;
1208
1209    case INDEX_op_add_i32:
1210        if (c2) {
1211            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
1212        } else {
1213            tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
1214        }
1215        break;
1216    case INDEX_op_add_i64:
1217        if (c2) {
1218            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
1219        } else {
1220            tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
1221        }
1222        break;
1223
1224    case INDEX_op_sub_i32:
1225        if (c2) {
1226            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
1227        } else {
1228            tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
1229        }
1230        break;
1231    case INDEX_op_sub_i64:
1232        if (c2) {
1233            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
1234        } else {
1235            tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
1236        }
1237        break;
1238
1239    case INDEX_op_and_i32:
1240    case INDEX_op_and_i64:
1241        if (c2) {
1242            tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
1243        } else {
1244            tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
1245        }
1246        break;
1247
1248    case INDEX_op_or_i32:
1249    case INDEX_op_or_i64:
1250        if (c2) {
1251            tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
1252        } else {
1253            tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
1254        }
1255        break;
1256
1257    case INDEX_op_xor_i32:
1258    case INDEX_op_xor_i64:
1259        if (c2) {
1260            tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
1261        } else {
1262            tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
1263        }
1264        break;
1265
1266    case INDEX_op_not_i32:
1267    case INDEX_op_not_i64:
1268        tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
1269        break;
1270
1271    case INDEX_op_neg_i32:
1272        tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
1273        break;
1274    case INDEX_op_neg_i64:
1275        tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
1276        break;
1277
1278    case INDEX_op_mul_i32:
1279        tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
1280        break;
1281    case INDEX_op_mul_i64:
1282        tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1283        break;
1284
1285    case INDEX_op_div_i32:
1286        tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
1287        break;
1288    case INDEX_op_div_i64:
1289        tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
1290        break;
1291
1292    case INDEX_op_divu_i32:
1293        tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
1294        break;
1295    case INDEX_op_divu_i64:
1296        tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
1297        break;
1298
1299    case INDEX_op_rem_i32:
1300        tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
1301        break;
1302    case INDEX_op_rem_i64:
1303        tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
1304        break;
1305
1306    case INDEX_op_remu_i32:
1307        tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
1308        break;
1309    case INDEX_op_remu_i64:
1310        tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
1311        break;
1312
1313    case INDEX_op_shl_i32:
1314        if (c2) {
1315            tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
1316        } else {
1317            tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
1318        }
1319        break;
1320    case INDEX_op_shl_i64:
1321        if (c2) {
1322            tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
1323        } else {
1324            tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
1325        }
1326        break;
1327
1328    case INDEX_op_shr_i32:
1329        if (c2) {
1330            tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
1331        } else {
1332            tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
1333        }
1334        break;
1335    case INDEX_op_shr_i64:
1336        if (c2) {
1337            tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
1338        } else {
1339            tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
1340        }
1341        break;
1342
1343    case INDEX_op_sar_i32:
1344        if (c2) {
1345            tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
1346        } else {
1347            tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
1348        }
1349        break;
1350    case INDEX_op_sar_i64:
1351        if (c2) {
1352            tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
1353        } else {
1354            tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
1355        }
1356        break;
1357
1358    case INDEX_op_add2_i32:
1359        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1360                        const_args[4], const_args[5], false, true);
1361        break;
1362    case INDEX_op_add2_i64:
1363        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1364                        const_args[4], const_args[5], false, false);
1365        break;
1366    case INDEX_op_sub2_i32:
1367        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1368                        const_args[4], const_args[5], true, true);
1369        break;
1370    case INDEX_op_sub2_i64:
1371        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1372                        const_args[4], const_args[5], true, false);
1373        break;
1374
1375    case INDEX_op_brcond_i32:
1376    case INDEX_op_brcond_i64:
1377        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1378        break;
1379
1380    case INDEX_op_setcond_i32:
1381    case INDEX_op_setcond_i64:
1382        tcg_out_setcond(s, args[3], a0, a1, a2);
1383        break;
1384
1385    case INDEX_op_qemu_ld_a32_i32:
1386    case INDEX_op_qemu_ld_a64_i32:
1387        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1388        break;
1389    case INDEX_op_qemu_ld_a32_i64:
1390    case INDEX_op_qemu_ld_a64_i64:
1391        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1392        break;
1393    case INDEX_op_qemu_st_a32_i32:
1394    case INDEX_op_qemu_st_a64_i32:
1395        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1396        break;
1397    case INDEX_op_qemu_st_a32_i64:
1398    case INDEX_op_qemu_st_a64_i64:
1399        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1400        break;
1401
1402    case INDEX_op_extrh_i64_i32:
1403        tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
1404        break;
1405
1406    case INDEX_op_mulsh_i32:
1407    case INDEX_op_mulsh_i64:
1408        tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
1409        break;
1410
1411    case INDEX_op_muluh_i32:
1412    case INDEX_op_muluh_i64:
1413        tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
1414        break;
1415
1416    case INDEX_op_mb:
1417        tcg_out_mb(s, a0);
1418        break;
1419
1420    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1421    case INDEX_op_mov_i64:
1422    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1423    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1424    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1425    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
1426    case INDEX_op_ext8s_i64:
1427    case INDEX_op_ext8u_i32:
1428    case INDEX_op_ext8u_i64:
1429    case INDEX_op_ext16s_i32:
1430    case INDEX_op_ext16s_i64:
1431    case INDEX_op_ext16u_i32:
1432    case INDEX_op_ext16u_i64:
1433    case INDEX_op_ext32s_i64:
1434    case INDEX_op_ext32u_i64:
1435    case INDEX_op_ext_i32_i64:
1436    case INDEX_op_extu_i32_i64:
1437    case INDEX_op_extrl_i64_i32:
1438    default:
1439        g_assert_not_reached();
1440    }
1441}
1442
1443static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1444{
1445    switch (op) {
1446    case INDEX_op_goto_ptr:
1447        return C_O0_I1(r);
1448
1449    case INDEX_op_ld8u_i32:
1450    case INDEX_op_ld8s_i32:
1451    case INDEX_op_ld16u_i32:
1452    case INDEX_op_ld16s_i32:
1453    case INDEX_op_ld_i32:
1454    case INDEX_op_not_i32:
1455    case INDEX_op_neg_i32:
1456    case INDEX_op_ld8u_i64:
1457    case INDEX_op_ld8s_i64:
1458    case INDEX_op_ld16u_i64:
1459    case INDEX_op_ld16s_i64:
1460    case INDEX_op_ld32s_i64:
1461    case INDEX_op_ld32u_i64:
1462    case INDEX_op_ld_i64:
1463    case INDEX_op_not_i64:
1464    case INDEX_op_neg_i64:
1465    case INDEX_op_ext8u_i32:
1466    case INDEX_op_ext8u_i64:
1467    case INDEX_op_ext16u_i32:
1468    case INDEX_op_ext16u_i64:
1469    case INDEX_op_ext32u_i64:
1470    case INDEX_op_extu_i32_i64:
1471    case INDEX_op_ext8s_i32:
1472    case INDEX_op_ext8s_i64:
1473    case INDEX_op_ext16s_i32:
1474    case INDEX_op_ext16s_i64:
1475    case INDEX_op_ext32s_i64:
1476    case INDEX_op_extrl_i64_i32:
1477    case INDEX_op_extrh_i64_i32:
1478    case INDEX_op_ext_i32_i64:
1479        return C_O1_I1(r, r);
1480
1481    case INDEX_op_st8_i32:
1482    case INDEX_op_st16_i32:
1483    case INDEX_op_st_i32:
1484    case INDEX_op_st8_i64:
1485    case INDEX_op_st16_i64:
1486    case INDEX_op_st32_i64:
1487    case INDEX_op_st_i64:
1488        return C_O0_I2(rZ, r);
1489
1490    case INDEX_op_add_i32:
1491    case INDEX_op_and_i32:
1492    case INDEX_op_or_i32:
1493    case INDEX_op_xor_i32:
1494    case INDEX_op_add_i64:
1495    case INDEX_op_and_i64:
1496    case INDEX_op_or_i64:
1497    case INDEX_op_xor_i64:
1498        return C_O1_I2(r, r, rI);
1499
1500    case INDEX_op_sub_i32:
1501    case INDEX_op_sub_i64:
1502        return C_O1_I2(r, rZ, rN);
1503
1504    case INDEX_op_mul_i32:
1505    case INDEX_op_mulsh_i32:
1506    case INDEX_op_muluh_i32:
1507    case INDEX_op_div_i32:
1508    case INDEX_op_divu_i32:
1509    case INDEX_op_rem_i32:
1510    case INDEX_op_remu_i32:
1511    case INDEX_op_setcond_i32:
1512    case INDEX_op_mul_i64:
1513    case INDEX_op_mulsh_i64:
1514    case INDEX_op_muluh_i64:
1515    case INDEX_op_div_i64:
1516    case INDEX_op_divu_i64:
1517    case INDEX_op_rem_i64:
1518    case INDEX_op_remu_i64:
1519    case INDEX_op_setcond_i64:
1520        return C_O1_I2(r, rZ, rZ);
1521
1522    case INDEX_op_shl_i32:
1523    case INDEX_op_shr_i32:
1524    case INDEX_op_sar_i32:
1525    case INDEX_op_shl_i64:
1526    case INDEX_op_shr_i64:
1527    case INDEX_op_sar_i64:
1528        return C_O1_I2(r, r, ri);
1529
1530    case INDEX_op_brcond_i32:
1531    case INDEX_op_brcond_i64:
1532        return C_O0_I2(rZ, rZ);
1533
1534    case INDEX_op_add2_i32:
1535    case INDEX_op_add2_i64:
1536    case INDEX_op_sub2_i32:
1537    case INDEX_op_sub2_i64:
1538        return C_O2_I4(r, r, rZ, rZ, rM, rM);
1539
1540    case INDEX_op_qemu_ld_a32_i32:
1541    case INDEX_op_qemu_ld_a64_i32:
1542    case INDEX_op_qemu_ld_a32_i64:
1543    case INDEX_op_qemu_ld_a64_i64:
1544        return C_O1_I1(r, r);
1545    case INDEX_op_qemu_st_a32_i32:
1546    case INDEX_op_qemu_st_a64_i32:
1547    case INDEX_op_qemu_st_a32_i64:
1548    case INDEX_op_qemu_st_a64_i64:
1549        return C_O0_I2(rZ, r);
1550
1551    default:
1552        g_assert_not_reached();
1553    }
1554}
1555
1556static const int tcg_target_callee_save_regs[] = {
1557    TCG_REG_S0,       /* used for the global env (TCG_AREG0) */
1558    TCG_REG_S1,
1559    TCG_REG_S2,
1560    TCG_REG_S3,
1561    TCG_REG_S4,
1562    TCG_REG_S5,
1563    TCG_REG_S6,
1564    TCG_REG_S7,
1565    TCG_REG_S8,
1566    TCG_REG_S9,
1567    TCG_REG_S10,
1568    TCG_REG_S11,
1569    TCG_REG_RA,       /* should be last for ABI compliance */
1570};
1571
1572/* Stack frame parameters.  */
1573#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
1574#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1575#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1576#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1577                     + TCG_TARGET_STACK_ALIGN - 1) \
1578                    & -TCG_TARGET_STACK_ALIGN)
1579#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1580
1581/* We're expecting to be able to use an immediate for frame allocation.  */
1582QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
1583
1584/* Generate global QEMU prologue and epilogue code */
1585static void tcg_target_qemu_prologue(TCGContext *s)
1586{
1587    int i;
1588
1589    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
1590
1591    /* TB prologue */
1592    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
1593    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1594        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1595                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1596    }
1597
1598#if !defined(CONFIG_SOFTMMU)
1599    tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
1600    tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1601#endif
1602
1603    /* Call generated code */
1604    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1605    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
1606
1607    /* Return path for goto_ptr. Set return value to 0 */
1608    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1609    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
1610
1611    /* TB epilogue */
1612    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
1613    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1614        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1615                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1616    }
1617
1618    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
1619    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
1620}
1621
1622static void tcg_target_init(TCGContext *s)
1623{
1624    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
1625    tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
1626
1627    tcg_target_call_clobber_regs = -1u;
1628    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
1629    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
1630    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
1631    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
1632    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
1633    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
1634    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
1635    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
1636    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
1637    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
1638    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10);
1639    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11);
1640
1641    s->reserved_regs = 0;
1642    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
1643    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
1644    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
1645    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
1646    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1647    tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
1648    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
1649}
1650
1651typedef struct {
1652    DebugFrameHeader h;
1653    uint8_t fde_def_cfa[4];
1654    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1655} DebugFrame;
1656
1657#define ELF_HOST_MACHINE EM_RISCV
1658
1659static const DebugFrame debug_frame = {
1660    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
1661    .h.cie.id = -1,
1662    .h.cie.version = 1,
1663    .h.cie.code_align = 1,
1664    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
1665    .h.cie.return_column = TCG_REG_RA,
1666
1667    /* Total FDE size does not include the "len" member.  */
1668    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1669
1670    .fde_def_cfa = {
1671        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ... */
1672        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
1673        (FRAME_SIZE >> 7)
1674    },
1675    .fde_reg_ofs = {
1676        0x80 + 9,  12,                  /* DW_CFA_offset, s1,  -96 */
1677        0x80 + 18, 11,                  /* DW_CFA_offset, s2,  -88 */
1678        0x80 + 19, 10,                  /* DW_CFA_offset, s3,  -80 */
1679        0x80 + 20, 9,                   /* DW_CFA_offset, s4,  -72 */
1680        0x80 + 21, 8,                   /* DW_CFA_offset, s5,  -64 */
1681        0x80 + 22, 7,                   /* DW_CFA_offset, s6,  -56 */
1682        0x80 + 23, 6,                   /* DW_CFA_offset, s7,  -48 */
1683        0x80 + 24, 5,                   /* DW_CFA_offset, s8,  -40 */
1684        0x80 + 25, 4,                   /* DW_CFA_offset, s9,  -32 */
1685        0x80 + 26, 3,                   /* DW_CFA_offset, s10, -24 */
1686        0x80 + 27, 2,                   /* DW_CFA_offset, s11, -16 */
1687        0x80 + 1 , 1,                   /* DW_CFA_offset, ra,  -8 */
1688    }
1689};
1690
1691void tcg_register_jit(const void *buf, size_t buf_size)
1692{
1693    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1694}
1695