xref: /openbmc/qemu/tcg/riscv/tcg-target.c.inc (revision c85cad81)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2018 SiFive, Inc
5 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
6 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
7 * Copyright (c) 2008 Fabrice Bellard
8 *
9 * Based on i386/tcg-target.c and mips/tcg-target.c
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
28 */
29
30#include "../tcg-ldst.c.inc"
31#include "../tcg-pool.c.inc"
32
33#ifdef CONFIG_DEBUG_TCG
34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
35    "zero",
36    "ra",
37    "sp",
38    "gp",
39    "tp",
40    "t0",
41    "t1",
42    "t2",
43    "s0",
44    "s1",
45    "a0",
46    "a1",
47    "a2",
48    "a3",
49    "a4",
50    "a5",
51    "a6",
52    "a7",
53    "s2",
54    "s3",
55    "s4",
56    "s5",
57    "s6",
58    "s7",
59    "s8",
60    "s9",
61    "s10",
62    "s11",
63    "t3",
64    "t4",
65    "t5",
66    "t6"
67};
68#endif
69
70static const int tcg_target_reg_alloc_order[] = {
71    /* Call saved registers */
72    /* TCG_REG_S0 reservered for TCG_AREG0 */
73    TCG_REG_S1,
74    TCG_REG_S2,
75    TCG_REG_S3,
76    TCG_REG_S4,
77    TCG_REG_S5,
78    TCG_REG_S6,
79    TCG_REG_S7,
80    TCG_REG_S8,
81    TCG_REG_S9,
82    TCG_REG_S10,
83    TCG_REG_S11,
84
85    /* Call clobbered registers */
86    TCG_REG_T0,
87    TCG_REG_T1,
88    TCG_REG_T2,
89    TCG_REG_T3,
90    TCG_REG_T4,
91    TCG_REG_T5,
92    TCG_REG_T6,
93
94    /* Argument registers */
95    TCG_REG_A0,
96    TCG_REG_A1,
97    TCG_REG_A2,
98    TCG_REG_A3,
99    TCG_REG_A4,
100    TCG_REG_A5,
101    TCG_REG_A6,
102    TCG_REG_A7,
103};
104
105static const int tcg_target_call_iarg_regs[] = {
106    TCG_REG_A0,
107    TCG_REG_A1,
108    TCG_REG_A2,
109    TCG_REG_A3,
110    TCG_REG_A4,
111    TCG_REG_A5,
112    TCG_REG_A6,
113    TCG_REG_A7,
114};
115
116#ifndef have_zbb
117bool have_zbb;
118#endif
119#if defined(__riscv_arch_test) && defined(__riscv_zba)
120# define have_zba true
121#else
122static bool have_zba;
123#endif
124#if defined(__riscv_arch_test) && defined(__riscv_zicond)
125# define have_zicond true
126#else
127static bool have_zicond;
128#endif
129
130static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
131{
132    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
133    tcg_debug_assert(slot >= 0 && slot <= 1);
134    return TCG_REG_A0 + slot;
135}
136
137#define TCG_CT_CONST_ZERO  0x100
138#define TCG_CT_CONST_S12   0x200
139#define TCG_CT_CONST_N12   0x400
140#define TCG_CT_CONST_M12   0x800
141#define TCG_CT_CONST_J12  0x1000
142
143#define ALL_GENERAL_REGS   MAKE_64BIT_MASK(0, 32)
144
145#define sextreg  sextract64
146
147/* test if a constant matches the constraint */
148static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
149{
150    if (ct & TCG_CT_CONST) {
151        return 1;
152    }
153    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
154        return 1;
155    }
156    /*
157     * Sign extended from 12 bits: [-0x800, 0x7ff].
158     * Used for most arithmetic, as this is the isa field.
159     */
160    if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) {
161        return 1;
162    }
163    /*
164     * Sign extended from 12 bits, negated: [-0x7ff, 0x800].
165     * Used for subtraction, where a constant must be handled by ADDI.
166     */
167    if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) {
168        return 1;
169    }
170    /*
171     * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff].
172     * Used by addsub2 and movcond, which may need the negative value,
173     * and requires the modified constant to be representable.
174     */
175    if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) {
176        return 1;
177    }
178    /*
179     * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff].
180     * Used to map ANDN back to ANDI, etc.
181     */
182    if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) {
183        return 1;
184    }
185    return 0;
186}
187
188/*
189 * RISC-V Base ISA opcodes (IM)
190 */
191
192typedef enum {
193    OPC_ADD = 0x33,
194    OPC_ADDI = 0x13,
195    OPC_AND = 0x7033,
196    OPC_ANDI = 0x7013,
197    OPC_AUIPC = 0x17,
198    OPC_BEQ = 0x63,
199    OPC_BGE = 0x5063,
200    OPC_BGEU = 0x7063,
201    OPC_BLT = 0x4063,
202    OPC_BLTU = 0x6063,
203    OPC_BNE = 0x1063,
204    OPC_DIV = 0x2004033,
205    OPC_DIVU = 0x2005033,
206    OPC_JAL = 0x6f,
207    OPC_JALR = 0x67,
208    OPC_LB = 0x3,
209    OPC_LBU = 0x4003,
210    OPC_LD = 0x3003,
211    OPC_LH = 0x1003,
212    OPC_LHU = 0x5003,
213    OPC_LUI = 0x37,
214    OPC_LW = 0x2003,
215    OPC_LWU = 0x6003,
216    OPC_MUL = 0x2000033,
217    OPC_MULH = 0x2001033,
218    OPC_MULHSU = 0x2002033,
219    OPC_MULHU = 0x2003033,
220    OPC_OR = 0x6033,
221    OPC_ORI = 0x6013,
222    OPC_REM = 0x2006033,
223    OPC_REMU = 0x2007033,
224    OPC_SB = 0x23,
225    OPC_SD = 0x3023,
226    OPC_SH = 0x1023,
227    OPC_SLL = 0x1033,
228    OPC_SLLI = 0x1013,
229    OPC_SLT = 0x2033,
230    OPC_SLTI = 0x2013,
231    OPC_SLTIU = 0x3013,
232    OPC_SLTU = 0x3033,
233    OPC_SRA = 0x40005033,
234    OPC_SRAI = 0x40005013,
235    OPC_SRL = 0x5033,
236    OPC_SRLI = 0x5013,
237    OPC_SUB = 0x40000033,
238    OPC_SW = 0x2023,
239    OPC_XOR = 0x4033,
240    OPC_XORI = 0x4013,
241
242    OPC_ADDIW = 0x1b,
243    OPC_ADDW = 0x3b,
244    OPC_DIVUW = 0x200503b,
245    OPC_DIVW = 0x200403b,
246    OPC_MULW = 0x200003b,
247    OPC_REMUW = 0x200703b,
248    OPC_REMW = 0x200603b,
249    OPC_SLLIW = 0x101b,
250    OPC_SLLW = 0x103b,
251    OPC_SRAIW = 0x4000501b,
252    OPC_SRAW = 0x4000503b,
253    OPC_SRLIW = 0x501b,
254    OPC_SRLW = 0x503b,
255    OPC_SUBW = 0x4000003b,
256
257    OPC_FENCE = 0x0000000f,
258    OPC_NOP   = OPC_ADDI,   /* nop = addi r0,r0,0 */
259
260    /* Zba: Bit manipulation extension, address generation */
261    OPC_ADD_UW = 0x0800003b,
262
263    /* Zbb: Bit manipulation extension, basic bit manipulaton */
264    OPC_ANDN   = 0x40007033,
265    OPC_CLZ    = 0x60001013,
266    OPC_CLZW   = 0x6000101b,
267    OPC_CPOP   = 0x60201013,
268    OPC_CPOPW  = 0x6020101b,
269    OPC_CTZ    = 0x60101013,
270    OPC_CTZW   = 0x6010101b,
271    OPC_ORN    = 0x40006033,
272    OPC_REV8   = 0x6b805013,
273    OPC_ROL    = 0x60001033,
274    OPC_ROLW   = 0x6000103b,
275    OPC_ROR    = 0x60005033,
276    OPC_RORW   = 0x6000503b,
277    OPC_RORI   = 0x60005013,
278    OPC_RORIW  = 0x6000501b,
279    OPC_SEXT_B = 0x60401013,
280    OPC_SEXT_H = 0x60501013,
281    OPC_XNOR   = 0x40004033,
282    OPC_ZEXT_H = 0x0800403b,
283
284    /* Zicond: integer conditional operations */
285    OPC_CZERO_EQZ = 0x0e005033,
286    OPC_CZERO_NEZ = 0x0e007033,
287} RISCVInsn;
288
289/*
290 * RISC-V immediate and instruction encoders (excludes 16-bit RVC)
291 */
292
293/* Type-R */
294
295static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2)
296{
297    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20;
298}
299
300/* Type-I */
301
302static int32_t encode_imm12(uint32_t imm)
303{
304    return (imm & 0xfff) << 20;
305}
306
307static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm)
308{
309    return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm);
310}
311
312/* Type-S */
313
314static int32_t encode_simm12(uint32_t imm)
315{
316    int32_t ret = 0;
317
318    ret |= (imm & 0xFE0) << 20;
319    ret |= (imm & 0x1F) << 7;
320
321    return ret;
322}
323
324static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
325{
326    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm);
327}
328
329/* Type-SB */
330
331static int32_t encode_sbimm12(uint32_t imm)
332{
333    int32_t ret = 0;
334
335    ret |= (imm & 0x1000) << 19;
336    ret |= (imm & 0x7e0) << 20;
337    ret |= (imm & 0x1e) << 7;
338    ret |= (imm & 0x800) >> 4;
339
340    return ret;
341}
342
343static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm)
344{
345    return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm);
346}
347
348/* Type-U */
349
350static int32_t encode_uimm20(uint32_t imm)
351{
352    return imm & 0xfffff000;
353}
354
355static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm)
356{
357    return opc | (rd & 0x1f) << 7 | encode_uimm20(imm);
358}
359
360/* Type-UJ */
361
362static int32_t encode_ujimm20(uint32_t imm)
363{
364    int32_t ret = 0;
365
366    ret |= (imm & 0x0007fe) << (21 - 1);
367    ret |= (imm & 0x000800) << (20 - 11);
368    ret |= (imm & 0x0ff000) << (12 - 12);
369    ret |= (imm & 0x100000) << (31 - 20);
370
371    return ret;
372}
373
374static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
375{
376    return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
377}
378
379/*
380 * RISC-V instruction emitters
381 */
382
383static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc,
384                            TCGReg rd, TCGReg rs1, TCGReg rs2)
385{
386    tcg_out32(s, encode_r(opc, rd, rs1, rs2));
387}
388
389static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc,
390                            TCGReg rd, TCGReg rs1, TCGArg imm)
391{
392    tcg_out32(s, encode_i(opc, rd, rs1, imm));
393}
394
395static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc,
396                              TCGReg rs1, TCGReg rs2, uint32_t imm)
397{
398    tcg_out32(s, encode_s(opc, rs1, rs2, imm));
399}
400
401static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc,
402                               TCGReg rs1, TCGReg rs2, uint32_t imm)
403{
404    tcg_out32(s, encode_sb(opc, rs1, rs2, imm));
405}
406
407static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc,
408                              TCGReg rd, uint32_t imm)
409{
410    tcg_out32(s, encode_u(opc, rd, imm));
411}
412
413static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc,
414                             TCGReg rd, uint32_t imm)
415{
416    tcg_out32(s, encode_uj(opc, rd, imm));
417}
418
419static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
420{
421    int i;
422    for (i = 0; i < count; ++i) {
423        p[i] = OPC_NOP;
424    }
425}
426
427/*
428 * Relocations
429 */
430
431static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
432{
433    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
434    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
435
436    tcg_debug_assert((offset & 1) == 0);
437    if (offset == sextreg(offset, 0, 12)) {
438        *src_rw |= encode_sbimm12(offset);
439        return true;
440    }
441
442    return false;
443}
444
445static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
446{
447    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
448    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
449
450    tcg_debug_assert((offset & 1) == 0);
451    if (offset == sextreg(offset, 0, 20)) {
452        *src_rw |= encode_ujimm20(offset);
453        return true;
454    }
455
456    return false;
457}
458
459static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
460{
461    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
462    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
463    int32_t lo = sextreg(offset, 0, 12);
464    int32_t hi = offset - lo;
465
466    if (offset == hi + lo) {
467        src_rw[0] |= encode_uimm20(hi);
468        src_rw[1] |= encode_imm12(lo);
469        return true;
470    }
471
472    return false;
473}
474
475static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
476                        intptr_t value, intptr_t addend)
477{
478    tcg_debug_assert(addend == 0);
479    switch (type) {
480    case R_RISCV_BRANCH:
481        return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
482    case R_RISCV_JAL:
483        return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
484    case R_RISCV_CALL:
485        return reloc_call(code_ptr, (tcg_insn_unit *)value);
486    default:
487        g_assert_not_reached();
488    }
489}
490
491/*
492 * TCG intrinsics
493 */
494
495static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
496{
497    if (ret == arg) {
498        return true;
499    }
500    switch (type) {
501    case TCG_TYPE_I32:
502    case TCG_TYPE_I64:
503        tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0);
504        break;
505    default:
506        g_assert_not_reached();
507    }
508    return true;
509}
510
511static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
512                         tcg_target_long val)
513{
514    tcg_target_long lo, hi, tmp;
515    int shift, ret;
516
517    if (type == TCG_TYPE_I32) {
518        val = (int32_t)val;
519    }
520
521    lo = sextreg(val, 0, 12);
522    if (val == lo) {
523        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo);
524        return;
525    }
526
527    hi = val - lo;
528    if (val == (int32_t)val) {
529        tcg_out_opc_upper(s, OPC_LUI, rd, hi);
530        if (lo != 0) {
531            tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
532        }
533        return;
534    }
535
536    tmp = tcg_pcrel_diff(s, (void *)val);
537    if (tmp == (int32_t)tmp) {
538        tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
539        tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
540        ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
541        tcg_debug_assert(ret == true);
542        return;
543    }
544
545    /* Look for a single 20-bit section.  */
546    shift = ctz64(val);
547    tmp = val >> shift;
548    if (tmp == sextreg(tmp, 0, 20)) {
549        tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12);
550        if (shift > 12) {
551            tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12);
552        } else {
553            tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift);
554        }
555        return;
556    }
557
558    /* Look for a few high zero bits, with lots of bits set in the middle.  */
559    shift = clz64(val);
560    tmp = val << shift;
561    if (tmp == sextreg(tmp, 12, 20) << 12) {
562        tcg_out_opc_upper(s, OPC_LUI, rd, tmp);
563        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
564        return;
565    } else if (tmp == sextreg(tmp, 0, 12)) {
566        tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp);
567        tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift);
568        return;
569    }
570
571    /* Drop into the constant pool.  */
572    new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0);
573    tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
574    tcg_out_opc_imm(s, OPC_LD, rd, rd, 0);
575}
576
577static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
578{
579    return false;
580}
581
582static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
583                             tcg_target_long imm)
584{
585    /* This function is only used for passing structs by reference. */
586    g_assert_not_reached();
587}
588
589static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
590{
591    tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff);
592}
593
594static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
595{
596    if (have_zbb) {
597        tcg_out_opc_reg(s, OPC_ZEXT_H, ret, arg, TCG_REG_ZERO);
598    } else {
599        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
600        tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16);
601    }
602}
603
604static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
605{
606    if (have_zba) {
607        tcg_out_opc_reg(s, OPC_ADD_UW, ret, arg, TCG_REG_ZERO);
608    } else {
609        tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32);
610        tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32);
611    }
612}
613
614static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
615{
616    if (have_zbb) {
617        tcg_out_opc_imm(s, OPC_SEXT_B, ret, arg, 0);
618    } else {
619        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24);
620        tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24);
621    }
622}
623
624static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
625{
626    if (have_zbb) {
627        tcg_out_opc_imm(s, OPC_SEXT_H, ret, arg, 0);
628    } else {
629        tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16);
630        tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16);
631    }
632}
633
634static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
635{
636    tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0);
637}
638
639static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
640{
641    if (ret != arg) {
642        tcg_out_ext32s(s, ret, arg);
643    }
644}
645
646static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
647{
648    tcg_out_ext32u(s, ret, arg);
649}
650
651static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
652{
653    tcg_out_ext32s(s, ret, arg);
654}
655
656static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
657                         TCGReg addr, intptr_t offset)
658{
659    intptr_t imm12 = sextreg(offset, 0, 12);
660
661    if (offset != imm12) {
662        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
663
664        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
665            imm12 = sextreg(diff, 0, 12);
666            tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12);
667        } else {
668            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
669            if (addr != TCG_REG_ZERO) {
670                tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr);
671            }
672        }
673        addr = TCG_REG_TMP2;
674    }
675
676    switch (opc) {
677    case OPC_SB:
678    case OPC_SH:
679    case OPC_SW:
680    case OPC_SD:
681        tcg_out_opc_store(s, opc, addr, data, imm12);
682        break;
683    case OPC_LB:
684    case OPC_LBU:
685    case OPC_LH:
686    case OPC_LHU:
687    case OPC_LW:
688    case OPC_LWU:
689    case OPC_LD:
690        tcg_out_opc_imm(s, opc, data, addr, imm12);
691        break;
692    default:
693        g_assert_not_reached();
694    }
695}
696
697static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
698                       TCGReg arg1, intptr_t arg2)
699{
700    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
701    tcg_out_ldst(s, insn, arg, arg1, arg2);
702}
703
704static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
705                       TCGReg arg1, intptr_t arg2)
706{
707    RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
708    tcg_out_ldst(s, insn, arg, arg1, arg2);
709}
710
711static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
712                        TCGReg base, intptr_t ofs)
713{
714    if (val == 0) {
715        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
716        return true;
717    }
718    return false;
719}
720
721static void tcg_out_addsub2(TCGContext *s,
722                            TCGReg rl, TCGReg rh,
723                            TCGReg al, TCGReg ah,
724                            TCGArg bl, TCGArg bh,
725                            bool cbl, bool cbh, bool is_sub, bool is32bit)
726{
727    const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD;
728    const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI;
729    const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB;
730    TCGReg th = TCG_REG_TMP1;
731
732    /* If we have a negative constant such that negating it would
733       make the high part zero, we can (usually) eliminate one insn.  */
734    if (cbl && cbh && bh == -1 && bl != 0) {
735        bl = -bl;
736        bh = 0;
737        is_sub = !is_sub;
738    }
739
740    /* By operating on the high part first, we get to use the final
741       carry operation to move back from the temporary.  */
742    if (!cbh) {
743        tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh);
744    } else if (bh != 0 || ah == rl) {
745        tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh));
746    } else {
747        th = ah;
748    }
749
750    /* Note that tcg optimization should eliminate the bl == 0 case.  */
751    if (is_sub) {
752        if (cbl) {
753            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl);
754            tcg_out_opc_imm(s, opc_addi, rl, al, -bl);
755        } else {
756            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl);
757            tcg_out_opc_reg(s, opc_sub, rl, al, bl);
758        }
759        tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0);
760    } else {
761        if (cbl) {
762            tcg_out_opc_imm(s, opc_addi, rl, al, bl);
763            tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl);
764        } else if (al == bl) {
765            /*
766             * If the input regs overlap, this is a simple doubling
767             * and carry-out is the input msb.  This special case is
768             * required when the output reg overlaps the input,
769             * but we might as well use it always.
770             */
771            tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0);
772            tcg_out_opc_reg(s, opc_add, rl, al, al);
773        } else {
774            tcg_out_opc_reg(s, opc_add, rl, al, bl);
775            tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0,
776                            rl, (rl == bl ? al : bl));
777        }
778        tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0);
779    }
780}
781
782static const struct {
783    RISCVInsn op;
784    bool swap;
785} tcg_brcond_to_riscv[] = {
786    [TCG_COND_EQ] =  { OPC_BEQ,  false },
787    [TCG_COND_NE] =  { OPC_BNE,  false },
788    [TCG_COND_LT] =  { OPC_BLT,  false },
789    [TCG_COND_GE] =  { OPC_BGE,  false },
790    [TCG_COND_LE] =  { OPC_BGE,  true  },
791    [TCG_COND_GT] =  { OPC_BLT,  true  },
792    [TCG_COND_LTU] = { OPC_BLTU, false },
793    [TCG_COND_GEU] = { OPC_BGEU, false },
794    [TCG_COND_LEU] = { OPC_BGEU, true  },
795    [TCG_COND_GTU] = { OPC_BLTU, true  }
796};
797
798static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
799                           TCGReg arg2, TCGLabel *l)
800{
801    RISCVInsn op = tcg_brcond_to_riscv[cond].op;
802
803    tcg_debug_assert(op != 0);
804
805    if (tcg_brcond_to_riscv[cond].swap) {
806        TCGReg t = arg1;
807        arg1 = arg2;
808        arg2 = t;
809    }
810
811    tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
812    tcg_out_opc_branch(s, op, arg1, arg2, 0);
813}
814
815#define SETCOND_INV    TCG_TARGET_NB_REGS
816#define SETCOND_NEZ    (SETCOND_INV << 1)
817#define SETCOND_FLAGS  (SETCOND_INV | SETCOND_NEZ)
818
819static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
820                               TCGReg arg1, tcg_target_long arg2, bool c2)
821{
822    int flags = 0;
823
824    switch (cond) {
825    case TCG_COND_EQ:    /* -> NE  */
826    case TCG_COND_GE:    /* -> LT  */
827    case TCG_COND_GEU:   /* -> LTU */
828    case TCG_COND_GT:    /* -> LE  */
829    case TCG_COND_GTU:   /* -> LEU */
830        cond = tcg_invert_cond(cond);
831        flags ^= SETCOND_INV;
832        break;
833    default:
834        break;
835    }
836
837    switch (cond) {
838    case TCG_COND_LE:
839    case TCG_COND_LEU:
840        /*
841         * If we have a constant input, the most efficient way to implement
842         * LE is by adding 1 and using LT.  Watch out for wrap around for LEU.
843         * We don't need to care for this for LE because the constant input
844         * is constrained to signed 12-bit, and 0x800 is representable in the
845         * temporary register.
846         */
847        if (c2) {
848            if (cond == TCG_COND_LEU) {
849                /* unsigned <= -1 is true */
850                if (arg2 == -1) {
851                    tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
852                    return ret;
853                }
854                cond = TCG_COND_LTU;
855            } else {
856                cond = TCG_COND_LT;
857            }
858            tcg_debug_assert(arg2 <= 0x7ff);
859            if (++arg2 == 0x800) {
860                tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
861                arg2 = TCG_REG_TMP0;
862                c2 = false;
863            }
864        } else {
865            TCGReg tmp = arg2;
866            arg2 = arg1;
867            arg1 = tmp;
868            cond = tcg_swap_cond(cond);    /* LE -> GE */
869            cond = tcg_invert_cond(cond);  /* GE -> LT */
870            flags ^= SETCOND_INV;
871        }
872        break;
873    default:
874        break;
875    }
876
877    switch (cond) {
878    case TCG_COND_NE:
879        flags |= SETCOND_NEZ;
880        if (!c2) {
881            tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2);
882        } else if (arg2 == 0) {
883            ret = arg1;
884        } else {
885            tcg_out_opc_imm(s, OPC_XORI, ret, arg1, arg2);
886        }
887        break;
888
889    case TCG_COND_LT:
890        if (c2) {
891            tcg_out_opc_imm(s, OPC_SLTI, ret, arg1, arg2);
892        } else {
893            tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2);
894        }
895        break;
896
897    case TCG_COND_LTU:
898        if (c2) {
899            tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, arg2);
900        } else {
901            tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2);
902        }
903        break;
904
905    default:
906        g_assert_not_reached();
907    }
908
909    return ret | flags;
910}
911
912static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
913                            TCGReg arg1, tcg_target_long arg2, bool c2)
914{
915    int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
916
917    if (tmpflags != ret) {
918        TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
919
920        switch (tmpflags & SETCOND_FLAGS) {
921        case SETCOND_INV:
922            /* Intermediate result is boolean: simply invert. */
923            tcg_out_opc_imm(s, OPC_XORI, ret, tmp, 1);
924            break;
925        case SETCOND_NEZ:
926            /* Intermediate result is zero/non-zero: test != 0. */
927            tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
928            break;
929        case SETCOND_NEZ | SETCOND_INV:
930            /* Intermediate result is zero/non-zero: test == 0. */
931            tcg_out_opc_imm(s, OPC_SLTIU, ret, tmp, 1);
932            break;
933        default:
934            g_assert_not_reached();
935        }
936    }
937}
938
939static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
940                                   int val1, bool c_val1,
941                                   int val2, bool c_val2)
942{
943    if (val1 == 0) {
944        if (c_val2) {
945            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val2);
946            val2 = TCG_REG_TMP1;
947        }
948        tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, val2, test_ne);
949        return;
950    }
951
952    if (val2 == 0) {
953        if (c_val1) {
954            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1);
955            val1 = TCG_REG_TMP1;
956        }
957        tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, val1, test_ne);
958        return;
959    }
960
961    if (c_val2) {
962        if (c_val1) {
963            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP1, val1 - val2);
964        } else {
965            tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val1, -val2);
966        }
967        tcg_out_opc_reg(s, OPC_CZERO_EQZ, ret, TCG_REG_TMP1, test_ne);
968        tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val2);
969        return;
970    }
971
972    if (c_val1) {
973        tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP1, val2, -val1);
974        tcg_out_opc_reg(s, OPC_CZERO_NEZ, ret, TCG_REG_TMP1, test_ne);
975        tcg_out_opc_imm(s, OPC_ADDI, ret, ret, val1);
976        return;
977    }
978
979    tcg_out_opc_reg(s, OPC_CZERO_NEZ, TCG_REG_TMP1, val2, test_ne);
980    tcg_out_opc_reg(s, OPC_CZERO_EQZ, TCG_REG_TMP0, val1, test_ne);
981    tcg_out_opc_reg(s, OPC_OR, ret, TCG_REG_TMP0, TCG_REG_TMP1);
982}
983
984static void tcg_out_movcond_br1(TCGContext *s, TCGCond cond, TCGReg ret,
985                                TCGReg cmp1, TCGReg cmp2,
986                                int val, bool c_val)
987{
988    RISCVInsn op;
989    int disp = 8;
990
991    tcg_debug_assert((unsigned)cond < ARRAY_SIZE(tcg_brcond_to_riscv));
992    op = tcg_brcond_to_riscv[cond].op;
993    tcg_debug_assert(op != 0);
994
995    if (tcg_brcond_to_riscv[cond].swap) {
996        tcg_out_opc_branch(s, op, cmp2, cmp1, disp);
997    } else {
998        tcg_out_opc_branch(s, op, cmp1, cmp2, disp);
999    }
1000    if (c_val) {
1001        tcg_out_opc_imm(s, OPC_ADDI, ret, TCG_REG_ZERO, val);
1002    } else {
1003        tcg_out_opc_imm(s, OPC_ADDI, ret, val, 0);
1004    }
1005}
1006
1007static void tcg_out_movcond_br2(TCGContext *s, TCGCond cond, TCGReg ret,
1008                                TCGReg cmp1, TCGReg cmp2,
1009                                int val1, bool c_val1,
1010                                int val2, bool c_val2)
1011{
1012    TCGReg tmp;
1013
1014    /* TCG optimizer reorders to prefer ret matching val2. */
1015    if (!c_val2 && ret == val2) {
1016        cond = tcg_invert_cond(cond);
1017        tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val1, c_val1);
1018        return;
1019    }
1020
1021    if (!c_val1 && ret == val1) {
1022        tcg_out_movcond_br1(s, cond, ret, cmp1, cmp2, val2, c_val2);
1023        return;
1024    }
1025
1026    tmp = (ret == cmp1 || ret == cmp2 ? TCG_REG_TMP1 : ret);
1027    if (c_val1) {
1028        tcg_out_movi(s, TCG_TYPE_REG, tmp, val1);
1029    } else {
1030        tcg_out_mov(s, TCG_TYPE_REG, tmp, val1);
1031    }
1032    tcg_out_movcond_br1(s, cond, tmp, cmp1, cmp2, val2, c_val2);
1033    tcg_out_mov(s, TCG_TYPE_REG, ret, tmp);
1034}
1035
1036static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
1037                            TCGReg cmp1, int cmp2, bool c_cmp2,
1038                            TCGReg val1, bool c_val1,
1039                            TCGReg val2, bool c_val2)
1040{
1041    int tmpflags;
1042    TCGReg t;
1043
1044    if (!have_zicond && (!c_cmp2 || cmp2 == 0)) {
1045        tcg_out_movcond_br2(s, cond, ret, cmp1, cmp2,
1046                            val1, c_val1, val2, c_val2);
1047        return;
1048    }
1049
1050    tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, cmp1, cmp2, c_cmp2);
1051    t = tmpflags & ~SETCOND_FLAGS;
1052
1053    if (have_zicond) {
1054        if (tmpflags & SETCOND_INV) {
1055            tcg_out_movcond_zicond(s, ret, t, val2, c_val2, val1, c_val1);
1056        } else {
1057            tcg_out_movcond_zicond(s, ret, t, val1, c_val1, val2, c_val2);
1058        }
1059    } else {
1060        cond = tmpflags & SETCOND_INV ? TCG_COND_EQ : TCG_COND_NE;
1061        tcg_out_movcond_br2(s, cond, ret, t, TCG_REG_ZERO,
1062                            val1, c_val1, val2, c_val2);
1063    }
1064}
1065
1066static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
1067                         TCGReg ret, TCGReg src1, int src2, bool c_src2)
1068{
1069    tcg_out_opc_imm(s, insn, ret, src1, 0);
1070
1071    if (!c_src2 || src2 != (type == TCG_TYPE_I32 ? 32 : 64)) {
1072        /*
1073         * The requested zero result does not match the insn, so adjust.
1074         * Note that constraints put 'ret' in a new register, so the
1075         * computation above did not clobber either 'src1' or 'src2'.
1076         */
1077        tcg_out_movcond(s, TCG_COND_EQ, ret, src1, 0, true,
1078                        src2, c_src2, ret, false);
1079    }
1080}
1081
1082static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
1083{
1084    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
1085    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
1086    int ret;
1087
1088    tcg_debug_assert((offset & 1) == 0);
1089    if (offset == sextreg(offset, 0, 20)) {
1090        /* short jump: -2097150 to 2097152 */
1091        tcg_out_opc_jump(s, OPC_JAL, link, offset);
1092    } else if (offset == (int32_t)offset) {
1093        /* long jump: -2147483646 to 2147483648 */
1094        tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
1095        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
1096        ret = reloc_call(s->code_ptr - 2, arg);
1097        tcg_debug_assert(ret == true);
1098    } else {
1099        /* far jump: 64-bit */
1100        tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
1101        tcg_target_long base = (tcg_target_long)arg - imm;
1102        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
1103        tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
1104    }
1105}
1106
1107static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
1108                         const TCGHelperInfo *info)
1109{
1110    tcg_out_call_int(s, arg, false);
1111}
1112
1113static void tcg_out_mb(TCGContext *s, TCGArg a0)
1114{
1115    tcg_insn_unit insn = OPC_FENCE;
1116
1117    if (a0 & TCG_MO_LD_LD) {
1118        insn |= 0x02200000;
1119    }
1120    if (a0 & TCG_MO_ST_LD) {
1121        insn |= 0x01200000;
1122    }
1123    if (a0 & TCG_MO_LD_ST) {
1124        insn |= 0x02100000;
1125    }
1126    if (a0 & TCG_MO_ST_ST) {
1127        insn |= 0x02200000;
1128    }
1129    tcg_out32(s, insn);
1130}
1131
1132/*
1133 * Load/store and TLB
1134 */
1135
1136static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
1137{
1138    tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1139    bool ok = reloc_jimm20(s->code_ptr - 1, target);
1140    tcg_debug_assert(ok);
1141}
1142
1143bool tcg_target_has_memory_bswap(MemOp memop)
1144{
1145    return false;
1146}
1147
1148/* We have three temps, we might as well expose them. */
1149static const TCGLdstHelperParam ldst_helper_param = {
1150    .ntmp = 3, .tmp = { TCG_REG_TMP0, TCG_REG_TMP1, TCG_REG_TMP2 }
1151};
1152
1153static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1154{
1155    MemOp opc = get_memop(l->oi);
1156
1157    /* resolve label address */
1158    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1159        return false;
1160    }
1161
1162    /* call load helper */
1163    tcg_out_ld_helper_args(s, l, &ldst_helper_param);
1164    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SSIZE], false);
1165    tcg_out_ld_helper_ret(s, l, true, &ldst_helper_param);
1166
1167    tcg_out_goto(s, l->raddr);
1168    return true;
1169}
1170
1171static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1172{
1173    MemOp opc = get_memop(l->oi);
1174
1175    /* resolve label address */
1176    if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1177        return false;
1178    }
1179
1180    /* call store helper */
1181    tcg_out_st_helper_args(s, l, &ldst_helper_param);
1182    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
1183
1184    tcg_out_goto(s, l->raddr);
1185    return true;
1186}
1187
1188/* We expect to use a 12-bit negative offset from ENV.  */
1189#define MIN_TLB_MASK_TABLE_OFS  -(1 << 11)
1190
1191/*
1192 * For softmmu, perform the TLB load and compare.
1193 * For useronly, perform any required alignment tests.
1194 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1195 * is required and fill in @h with the host address for the fast path.
1196 */
1197static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
1198                                           TCGReg addr_reg, MemOpIdx oi,
1199                                           bool is_ld)
1200{
1201    TCGType addr_type = s->addr_type;
1202    TCGLabelQemuLdst *ldst = NULL;
1203    MemOp opc = get_memop(oi);
1204    TCGAtomAlign aa;
1205    unsigned a_mask;
1206
1207    aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1208    a_mask = (1u << aa.align) - 1;
1209
1210#ifdef CONFIG_SOFTMMU
1211    unsigned s_bits = opc & MO_SIZE;
1212    unsigned s_mask = (1u << s_bits) - 1;
1213    int mem_index = get_mmuidx(oi);
1214    int fast_ofs = tlb_mask_table_ofs(s, mem_index);
1215    int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1216    int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1217    int compare_mask;
1218    TCGReg addr_adj;
1219
1220    ldst = new_ldst_label(s);
1221    ldst->is_ld = is_ld;
1222    ldst->oi = oi;
1223    ldst->addrlo_reg = addr_reg;
1224
1225    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1226    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1227
1228    tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
1229                    s->page_bits - CPU_TLB_ENTRY_BITS);
1230    tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1231    tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1232
1233    /*
1234     * For aligned accesses, we check the first byte and include the alignment
1235     * bits within the address.  For unaligned access, we check that we don't
1236     * cross pages using the address of the last byte of the access.
1237     */
1238    addr_adj = addr_reg;
1239    if (a_mask < s_mask) {
1240        addr_adj = TCG_REG_TMP0;
1241        tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
1242                        addr_adj, addr_reg, s_mask - a_mask);
1243    }
1244    compare_mask = s->page_mask | a_mask;
1245    if (compare_mask == sextreg(compare_mask, 0, 12)) {
1246        tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
1247    } else {
1248        tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
1249        tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
1250    }
1251
1252    /* Load the tlb comparator and the addend.  */
1253    QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1254    tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1255               is_ld ? offsetof(CPUTLBEntry, addr_read)
1256                     : offsetof(CPUTLBEntry, addr_write));
1257    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1258               offsetof(CPUTLBEntry, addend));
1259
1260    /* Compare masked address with the TLB entry. */
1261    ldst->label_ptr[0] = s->code_ptr;
1262    tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1263
1264    /* TLB Hit - translate address using addend.  */
1265    if (addr_type != TCG_TYPE_I32) {
1266        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
1267    } else if (have_zba) {
1268        tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
1269    } else {
1270        tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
1271        tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, TCG_REG_TMP2);
1272    }
1273    *pbase = TCG_REG_TMP0;
1274#else
1275    TCGReg base;
1276
1277    if (a_mask) {
1278        ldst = new_ldst_label(s);
1279        ldst->is_ld = is_ld;
1280        ldst->oi = oi;
1281        ldst->addrlo_reg = addr_reg;
1282
1283        /* We are expecting alignment max 7, so we can always use andi. */
1284        tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
1285        tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
1286
1287        ldst->label_ptr[0] = s->code_ptr;
1288        tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1289    }
1290
1291    if (guest_base != 0) {
1292        base = TCG_REG_TMP0;
1293        if (addr_type != TCG_TYPE_I32) {
1294            tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG);
1295        } else if (have_zba) {
1296            tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG);
1297        } else {
1298            tcg_out_ext32u(s, base, addr_reg);
1299            tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
1300        }
1301    } else if (addr_type != TCG_TYPE_I32) {
1302        base = addr_reg;
1303    } else {
1304        base = TCG_REG_TMP0;
1305        tcg_out_ext32u(s, base, addr_reg);
1306    }
1307    *pbase = base;
1308#endif
1309
1310    return ldst;
1311}
1312
1313static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
1314                                   TCGReg base, MemOp opc, TCGType type)
1315{
1316    /* Byte swapping is left to middle-end expansion. */
1317    tcg_debug_assert((opc & MO_BSWAP) == 0);
1318
1319    switch (opc & (MO_SSIZE)) {
1320    case MO_UB:
1321        tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
1322        break;
1323    case MO_SB:
1324        tcg_out_opc_imm(s, OPC_LB, val, base, 0);
1325        break;
1326    case MO_UW:
1327        tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
1328        break;
1329    case MO_SW:
1330        tcg_out_opc_imm(s, OPC_LH, val, base, 0);
1331        break;
1332    case MO_UL:
1333        if (type == TCG_TYPE_I64) {
1334            tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
1335            break;
1336        }
1337        /* FALLTHRU */
1338    case MO_SL:
1339        tcg_out_opc_imm(s, OPC_LW, val, base, 0);
1340        break;
1341    case MO_UQ:
1342        tcg_out_opc_imm(s, OPC_LD, val, base, 0);
1343        break;
1344    default:
1345        g_assert_not_reached();
1346    }
1347}
1348
1349static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1350                            MemOpIdx oi, TCGType data_type)
1351{
1352    TCGLabelQemuLdst *ldst;
1353    TCGReg base;
1354
1355    ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
1356    tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
1357
1358    if (ldst) {
1359        ldst->type = data_type;
1360        ldst->datalo_reg = data_reg;
1361        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1362    }
1363}
1364
1365static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
1366                                   TCGReg base, MemOp opc)
1367{
1368    /* Byte swapping is left to middle-end expansion. */
1369    tcg_debug_assert((opc & MO_BSWAP) == 0);
1370
1371    switch (opc & (MO_SSIZE)) {
1372    case MO_8:
1373        tcg_out_opc_store(s, OPC_SB, base, val, 0);
1374        break;
1375    case MO_16:
1376        tcg_out_opc_store(s, OPC_SH, base, val, 0);
1377        break;
1378    case MO_32:
1379        tcg_out_opc_store(s, OPC_SW, base, val, 0);
1380        break;
1381    case MO_64:
1382        tcg_out_opc_store(s, OPC_SD, base, val, 0);
1383        break;
1384    default:
1385        g_assert_not_reached();
1386    }
1387}
1388
1389static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1390                            MemOpIdx oi, TCGType data_type)
1391{
1392    TCGLabelQemuLdst *ldst;
1393    TCGReg base;
1394
1395    ldst = prepare_host_addr(s, &base, addr_reg, oi, false);
1396    tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
1397
1398    if (ldst) {
1399        ldst->type = data_type;
1400        ldst->datalo_reg = data_reg;
1401        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1402    }
1403}
1404
1405static const tcg_insn_unit *tb_ret_addr;
1406
1407static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1408{
1409    /* Reuse the zeroing that exists for goto_ptr.  */
1410    if (a0 == 0) {
1411        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1412    } else {
1413        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1414        tcg_out_call_int(s, tb_ret_addr, true);
1415    }
1416}
1417
1418static void tcg_out_goto_tb(TCGContext *s, int which)
1419{
1420    /* Direct branch will be patched by tb_target_set_jmp_target. */
1421    set_jmp_insn_offset(s, which);
1422    tcg_out32(s, OPC_JAL);
1423
1424    /* When branch is out of range, fall through to indirect. */
1425    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
1426               get_jmp_target_addr(s, which));
1427    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1428    set_jmp_reset_offset(s, which);
1429}
1430
1431void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1432                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1433{
1434    uintptr_t addr = tb->jmp_target_addr[n];
1435    ptrdiff_t offset = addr - jmp_rx;
1436    tcg_insn_unit insn;
1437
1438    /* Either directly branch, or fall through to indirect branch. */
1439    if (offset == sextreg(offset, 0, 20)) {
1440        insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
1441    } else {
1442        insn = OPC_NOP;
1443    }
1444    qatomic_set((uint32_t *)jmp_rw, insn);
1445    flush_idcache_range(jmp_rx, jmp_rw, 4);
1446}
1447
1448static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1449                       const TCGArg args[TCG_MAX_OP_ARGS],
1450                       const int const_args[TCG_MAX_OP_ARGS])
1451{
1452    TCGArg a0 = args[0];
1453    TCGArg a1 = args[1];
1454    TCGArg a2 = args[2];
1455    int c2 = const_args[2];
1456
1457    switch (opc) {
1458    case INDEX_op_goto_ptr:
1459        tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
1460        break;
1461
1462    case INDEX_op_br:
1463        tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0);
1464        tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
1465        break;
1466
1467    case INDEX_op_ld8u_i32:
1468    case INDEX_op_ld8u_i64:
1469        tcg_out_ldst(s, OPC_LBU, a0, a1, a2);
1470        break;
1471    case INDEX_op_ld8s_i32:
1472    case INDEX_op_ld8s_i64:
1473        tcg_out_ldst(s, OPC_LB, a0, a1, a2);
1474        break;
1475    case INDEX_op_ld16u_i32:
1476    case INDEX_op_ld16u_i64:
1477        tcg_out_ldst(s, OPC_LHU, a0, a1, a2);
1478        break;
1479    case INDEX_op_ld16s_i32:
1480    case INDEX_op_ld16s_i64:
1481        tcg_out_ldst(s, OPC_LH, a0, a1, a2);
1482        break;
1483    case INDEX_op_ld32u_i64:
1484        tcg_out_ldst(s, OPC_LWU, a0, a1, a2);
1485        break;
1486    case INDEX_op_ld_i32:
1487    case INDEX_op_ld32s_i64:
1488        tcg_out_ldst(s, OPC_LW, a0, a1, a2);
1489        break;
1490    case INDEX_op_ld_i64:
1491        tcg_out_ldst(s, OPC_LD, a0, a1, a2);
1492        break;
1493
1494    case INDEX_op_st8_i32:
1495    case INDEX_op_st8_i64:
1496        tcg_out_ldst(s, OPC_SB, a0, a1, a2);
1497        break;
1498    case INDEX_op_st16_i32:
1499    case INDEX_op_st16_i64:
1500        tcg_out_ldst(s, OPC_SH, a0, a1, a2);
1501        break;
1502    case INDEX_op_st_i32:
1503    case INDEX_op_st32_i64:
1504        tcg_out_ldst(s, OPC_SW, a0, a1, a2);
1505        break;
1506    case INDEX_op_st_i64:
1507        tcg_out_ldst(s, OPC_SD, a0, a1, a2);
1508        break;
1509
1510    case INDEX_op_add_i32:
1511        if (c2) {
1512            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2);
1513        } else {
1514            tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2);
1515        }
1516        break;
1517    case INDEX_op_add_i64:
1518        if (c2) {
1519            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2);
1520        } else {
1521            tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2);
1522        }
1523        break;
1524
1525    case INDEX_op_sub_i32:
1526        if (c2) {
1527            tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2);
1528        } else {
1529            tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2);
1530        }
1531        break;
1532    case INDEX_op_sub_i64:
1533        if (c2) {
1534            tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2);
1535        } else {
1536            tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2);
1537        }
1538        break;
1539
1540    case INDEX_op_and_i32:
1541    case INDEX_op_and_i64:
1542        if (c2) {
1543            tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2);
1544        } else {
1545            tcg_out_opc_reg(s, OPC_AND, a0, a1, a2);
1546        }
1547        break;
1548
1549    case INDEX_op_or_i32:
1550    case INDEX_op_or_i64:
1551        if (c2) {
1552            tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2);
1553        } else {
1554            tcg_out_opc_reg(s, OPC_OR, a0, a1, a2);
1555        }
1556        break;
1557
1558    case INDEX_op_xor_i32:
1559    case INDEX_op_xor_i64:
1560        if (c2) {
1561            tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2);
1562        } else {
1563            tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2);
1564        }
1565        break;
1566
1567    case INDEX_op_andc_i32:
1568    case INDEX_op_andc_i64:
1569        if (c2) {
1570            tcg_out_opc_imm(s, OPC_ANDI, a0, a1, ~a2);
1571        } else {
1572            tcg_out_opc_reg(s, OPC_ANDN, a0, a1, a2);
1573        }
1574        break;
1575    case INDEX_op_orc_i32:
1576    case INDEX_op_orc_i64:
1577        if (c2) {
1578            tcg_out_opc_imm(s, OPC_ORI, a0, a1, ~a2);
1579        } else {
1580            tcg_out_opc_reg(s, OPC_ORN, a0, a1, a2);
1581        }
1582        break;
1583    case INDEX_op_eqv_i32:
1584    case INDEX_op_eqv_i64:
1585        if (c2) {
1586            tcg_out_opc_imm(s, OPC_XORI, a0, a1, ~a2);
1587        } else {
1588            tcg_out_opc_reg(s, OPC_XNOR, a0, a1, a2);
1589        }
1590        break;
1591
1592    case INDEX_op_not_i32:
1593    case INDEX_op_not_i64:
1594        tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1);
1595        break;
1596
1597    case INDEX_op_neg_i32:
1598        tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
1599        break;
1600    case INDEX_op_neg_i64:
1601        tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1);
1602        break;
1603
1604    case INDEX_op_mul_i32:
1605        tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2);
1606        break;
1607    case INDEX_op_mul_i64:
1608        tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2);
1609        break;
1610
1611    case INDEX_op_div_i32:
1612        tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2);
1613        break;
1614    case INDEX_op_div_i64:
1615        tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2);
1616        break;
1617
1618    case INDEX_op_divu_i32:
1619        tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2);
1620        break;
1621    case INDEX_op_divu_i64:
1622        tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2);
1623        break;
1624
1625    case INDEX_op_rem_i32:
1626        tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2);
1627        break;
1628    case INDEX_op_rem_i64:
1629        tcg_out_opc_reg(s, OPC_REM, a0, a1, a2);
1630        break;
1631
1632    case INDEX_op_remu_i32:
1633        tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2);
1634        break;
1635    case INDEX_op_remu_i64:
1636        tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2);
1637        break;
1638
1639    case INDEX_op_shl_i32:
1640        if (c2) {
1641            tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f);
1642        } else {
1643            tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2);
1644        }
1645        break;
1646    case INDEX_op_shl_i64:
1647        if (c2) {
1648            tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f);
1649        } else {
1650            tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2);
1651        }
1652        break;
1653
1654    case INDEX_op_shr_i32:
1655        if (c2) {
1656            tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f);
1657        } else {
1658            tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2);
1659        }
1660        break;
1661    case INDEX_op_shr_i64:
1662        if (c2) {
1663            tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f);
1664        } else {
1665            tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2);
1666        }
1667        break;
1668
1669    case INDEX_op_sar_i32:
1670        if (c2) {
1671            tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f);
1672        } else {
1673            tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2);
1674        }
1675        break;
1676    case INDEX_op_sar_i64:
1677        if (c2) {
1678            tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f);
1679        } else {
1680            tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2);
1681        }
1682        break;
1683
1684    case INDEX_op_rotl_i32:
1685        if (c2) {
1686            tcg_out_opc_imm(s, OPC_RORIW, a0, a1, -a2 & 0x1f);
1687        } else {
1688            tcg_out_opc_reg(s, OPC_ROLW, a0, a1, a2);
1689        }
1690        break;
1691    case INDEX_op_rotl_i64:
1692        if (c2) {
1693            tcg_out_opc_imm(s, OPC_RORI, a0, a1, -a2 & 0x3f);
1694        } else {
1695            tcg_out_opc_reg(s, OPC_ROL, a0, a1, a2);
1696        }
1697        break;
1698
1699    case INDEX_op_rotr_i32:
1700        if (c2) {
1701            tcg_out_opc_imm(s, OPC_RORIW, a0, a1, a2 & 0x1f);
1702        } else {
1703            tcg_out_opc_reg(s, OPC_RORW, a0, a1, a2);
1704        }
1705        break;
1706    case INDEX_op_rotr_i64:
1707        if (c2) {
1708            tcg_out_opc_imm(s, OPC_RORI, a0, a1, a2 & 0x3f);
1709        } else {
1710            tcg_out_opc_reg(s, OPC_ROR, a0, a1, a2);
1711        }
1712        break;
1713
1714    case INDEX_op_bswap64_i64:
1715        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1716        break;
1717    case INDEX_op_bswap32_i32:
1718        a2 = 0;
1719        /* fall through */
1720    case INDEX_op_bswap32_i64:
1721        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1722        if (a2 & TCG_BSWAP_OZ) {
1723            tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 32);
1724        } else {
1725            tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 32);
1726        }
1727        break;
1728    case INDEX_op_bswap16_i64:
1729    case INDEX_op_bswap16_i32:
1730        tcg_out_opc_imm(s, OPC_REV8, a0, a1, 0);
1731        if (a2 & TCG_BSWAP_OZ) {
1732            tcg_out_opc_imm(s, OPC_SRLI, a0, a0, 48);
1733        } else {
1734            tcg_out_opc_imm(s, OPC_SRAI, a0, a0, 48);
1735        }
1736        break;
1737
1738    case INDEX_op_ctpop_i32:
1739        tcg_out_opc_imm(s, OPC_CPOPW, a0, a1, 0);
1740        break;
1741    case INDEX_op_ctpop_i64:
1742        tcg_out_opc_imm(s, OPC_CPOP, a0, a1, 0);
1743        break;
1744
1745    case INDEX_op_clz_i32:
1746        tcg_out_cltz(s, TCG_TYPE_I32, OPC_CLZW, a0, a1, a2, c2);
1747        break;
1748    case INDEX_op_clz_i64:
1749        tcg_out_cltz(s, TCG_TYPE_I64, OPC_CLZ, a0, a1, a2, c2);
1750        break;
1751    case INDEX_op_ctz_i32:
1752        tcg_out_cltz(s, TCG_TYPE_I32, OPC_CTZW, a0, a1, a2, c2);
1753        break;
1754    case INDEX_op_ctz_i64:
1755        tcg_out_cltz(s, TCG_TYPE_I64, OPC_CTZ, a0, a1, a2, c2);
1756        break;
1757
1758    case INDEX_op_add2_i32:
1759        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1760                        const_args[4], const_args[5], false, true);
1761        break;
1762    case INDEX_op_add2_i64:
1763        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1764                        const_args[4], const_args[5], false, false);
1765        break;
1766    case INDEX_op_sub2_i32:
1767        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1768                        const_args[4], const_args[5], true, true);
1769        break;
1770    case INDEX_op_sub2_i64:
1771        tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5],
1772                        const_args[4], const_args[5], true, false);
1773        break;
1774
1775    case INDEX_op_brcond_i32:
1776    case INDEX_op_brcond_i64:
1777        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1778        break;
1779
1780    case INDEX_op_setcond_i32:
1781    case INDEX_op_setcond_i64:
1782        tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1783        break;
1784
1785    case INDEX_op_movcond_i32:
1786    case INDEX_op_movcond_i64:
1787        tcg_out_movcond(s, args[5], a0, a1, a2, c2,
1788                        args[3], const_args[3], args[4], const_args[4]);
1789        break;
1790
1791    case INDEX_op_qemu_ld_a32_i32:
1792    case INDEX_op_qemu_ld_a64_i32:
1793        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1794        break;
1795    case INDEX_op_qemu_ld_a32_i64:
1796    case INDEX_op_qemu_ld_a64_i64:
1797        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1798        break;
1799    case INDEX_op_qemu_st_a32_i32:
1800    case INDEX_op_qemu_st_a64_i32:
1801        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1802        break;
1803    case INDEX_op_qemu_st_a32_i64:
1804    case INDEX_op_qemu_st_a64_i64:
1805        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1806        break;
1807
1808    case INDEX_op_extrh_i64_i32:
1809        tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32);
1810        break;
1811
1812    case INDEX_op_mulsh_i32:
1813    case INDEX_op_mulsh_i64:
1814        tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2);
1815        break;
1816
1817    case INDEX_op_muluh_i32:
1818    case INDEX_op_muluh_i64:
1819        tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2);
1820        break;
1821
1822    case INDEX_op_mb:
1823        tcg_out_mb(s, a0);
1824        break;
1825
1826    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1827    case INDEX_op_mov_i64:
1828    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1829    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1830    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1831    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
1832    case INDEX_op_ext8s_i64:
1833    case INDEX_op_ext8u_i32:
1834    case INDEX_op_ext8u_i64:
1835    case INDEX_op_ext16s_i32:
1836    case INDEX_op_ext16s_i64:
1837    case INDEX_op_ext16u_i32:
1838    case INDEX_op_ext16u_i64:
1839    case INDEX_op_ext32s_i64:
1840    case INDEX_op_ext32u_i64:
1841    case INDEX_op_ext_i32_i64:
1842    case INDEX_op_extu_i32_i64:
1843    case INDEX_op_extrl_i64_i32:
1844    default:
1845        g_assert_not_reached();
1846    }
1847}
1848
1849static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1850{
1851    switch (op) {
1852    case INDEX_op_goto_ptr:
1853        return C_O0_I1(r);
1854
1855    case INDEX_op_ld8u_i32:
1856    case INDEX_op_ld8s_i32:
1857    case INDEX_op_ld16u_i32:
1858    case INDEX_op_ld16s_i32:
1859    case INDEX_op_ld_i32:
1860    case INDEX_op_not_i32:
1861    case INDEX_op_neg_i32:
1862    case INDEX_op_ld8u_i64:
1863    case INDEX_op_ld8s_i64:
1864    case INDEX_op_ld16u_i64:
1865    case INDEX_op_ld16s_i64:
1866    case INDEX_op_ld32s_i64:
1867    case INDEX_op_ld32u_i64:
1868    case INDEX_op_ld_i64:
1869    case INDEX_op_not_i64:
1870    case INDEX_op_neg_i64:
1871    case INDEX_op_ext8u_i32:
1872    case INDEX_op_ext8u_i64:
1873    case INDEX_op_ext16u_i32:
1874    case INDEX_op_ext16u_i64:
1875    case INDEX_op_ext32u_i64:
1876    case INDEX_op_extu_i32_i64:
1877    case INDEX_op_ext8s_i32:
1878    case INDEX_op_ext8s_i64:
1879    case INDEX_op_ext16s_i32:
1880    case INDEX_op_ext16s_i64:
1881    case INDEX_op_ext32s_i64:
1882    case INDEX_op_extrl_i64_i32:
1883    case INDEX_op_extrh_i64_i32:
1884    case INDEX_op_ext_i32_i64:
1885    case INDEX_op_bswap16_i32:
1886    case INDEX_op_bswap32_i32:
1887    case INDEX_op_bswap16_i64:
1888    case INDEX_op_bswap32_i64:
1889    case INDEX_op_bswap64_i64:
1890    case INDEX_op_ctpop_i32:
1891    case INDEX_op_ctpop_i64:
1892        return C_O1_I1(r, r);
1893
1894    case INDEX_op_st8_i32:
1895    case INDEX_op_st16_i32:
1896    case INDEX_op_st_i32:
1897    case INDEX_op_st8_i64:
1898    case INDEX_op_st16_i64:
1899    case INDEX_op_st32_i64:
1900    case INDEX_op_st_i64:
1901        return C_O0_I2(rZ, r);
1902
1903    case INDEX_op_add_i32:
1904    case INDEX_op_and_i32:
1905    case INDEX_op_or_i32:
1906    case INDEX_op_xor_i32:
1907    case INDEX_op_add_i64:
1908    case INDEX_op_and_i64:
1909    case INDEX_op_or_i64:
1910    case INDEX_op_xor_i64:
1911    case INDEX_op_setcond_i32:
1912    case INDEX_op_setcond_i64:
1913        return C_O1_I2(r, r, rI);
1914
1915    case INDEX_op_andc_i32:
1916    case INDEX_op_andc_i64:
1917    case INDEX_op_orc_i32:
1918    case INDEX_op_orc_i64:
1919    case INDEX_op_eqv_i32:
1920    case INDEX_op_eqv_i64:
1921        return C_O1_I2(r, r, rJ);
1922
1923    case INDEX_op_sub_i32:
1924    case INDEX_op_sub_i64:
1925        return C_O1_I2(r, rZ, rN);
1926
1927    case INDEX_op_mul_i32:
1928    case INDEX_op_mulsh_i32:
1929    case INDEX_op_muluh_i32:
1930    case INDEX_op_div_i32:
1931    case INDEX_op_divu_i32:
1932    case INDEX_op_rem_i32:
1933    case INDEX_op_remu_i32:
1934    case INDEX_op_mul_i64:
1935    case INDEX_op_mulsh_i64:
1936    case INDEX_op_muluh_i64:
1937    case INDEX_op_div_i64:
1938    case INDEX_op_divu_i64:
1939    case INDEX_op_rem_i64:
1940    case INDEX_op_remu_i64:
1941        return C_O1_I2(r, rZ, rZ);
1942
1943    case INDEX_op_shl_i32:
1944    case INDEX_op_shr_i32:
1945    case INDEX_op_sar_i32:
1946    case INDEX_op_rotl_i32:
1947    case INDEX_op_rotr_i32:
1948    case INDEX_op_shl_i64:
1949    case INDEX_op_shr_i64:
1950    case INDEX_op_sar_i64:
1951    case INDEX_op_rotl_i64:
1952    case INDEX_op_rotr_i64:
1953        return C_O1_I2(r, r, ri);
1954
1955    case INDEX_op_clz_i32:
1956    case INDEX_op_clz_i64:
1957    case INDEX_op_ctz_i32:
1958    case INDEX_op_ctz_i64:
1959        return C_N1_I2(r, r, rM);
1960
1961    case INDEX_op_brcond_i32:
1962    case INDEX_op_brcond_i64:
1963        return C_O0_I2(rZ, rZ);
1964
1965    case INDEX_op_movcond_i32:
1966    case INDEX_op_movcond_i64:
1967        return C_O1_I4(r, r, rI, rM, rM);
1968
1969    case INDEX_op_add2_i32:
1970    case INDEX_op_add2_i64:
1971    case INDEX_op_sub2_i32:
1972    case INDEX_op_sub2_i64:
1973        return C_O2_I4(r, r, rZ, rZ, rM, rM);
1974
1975    case INDEX_op_qemu_ld_a32_i32:
1976    case INDEX_op_qemu_ld_a64_i32:
1977    case INDEX_op_qemu_ld_a32_i64:
1978    case INDEX_op_qemu_ld_a64_i64:
1979        return C_O1_I1(r, r);
1980    case INDEX_op_qemu_st_a32_i32:
1981    case INDEX_op_qemu_st_a64_i32:
1982    case INDEX_op_qemu_st_a32_i64:
1983    case INDEX_op_qemu_st_a64_i64:
1984        return C_O0_I2(rZ, r);
1985
1986    default:
1987        g_assert_not_reached();
1988    }
1989}
1990
1991static const int tcg_target_callee_save_regs[] = {
1992    TCG_REG_S0,       /* used for the global env (TCG_AREG0) */
1993    TCG_REG_S1,
1994    TCG_REG_S2,
1995    TCG_REG_S3,
1996    TCG_REG_S4,
1997    TCG_REG_S5,
1998    TCG_REG_S6,
1999    TCG_REG_S7,
2000    TCG_REG_S8,
2001    TCG_REG_S9,
2002    TCG_REG_S10,
2003    TCG_REG_S11,
2004    TCG_REG_RA,       /* should be last for ABI compliance */
2005};
2006
2007/* Stack frame parameters.  */
2008#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
2009#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2010#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2011#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2012                     + TCG_TARGET_STACK_ALIGN - 1) \
2013                    & -TCG_TARGET_STACK_ALIGN)
2014#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2015
2016/* We're expecting to be able to use an immediate for frame allocation.  */
2017QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2018
2019/* Generate global QEMU prologue and epilogue code */
2020static void tcg_target_qemu_prologue(TCGContext *s)
2021{
2022    int i;
2023
2024    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2025
2026    /* TB prologue */
2027    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2028    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2029        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2030                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2031    }
2032
2033#if !defined(CONFIG_SOFTMMU)
2034    tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2035    tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2036#endif
2037
2038    /* Call generated code */
2039    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2040    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2041
2042    /* Return path for goto_ptr. Set return value to 0 */
2043    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2044    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2045
2046    /* TB epilogue */
2047    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2048    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2049        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2050                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2051    }
2052
2053    tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2054    tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
2055}
2056
2057static volatile sig_atomic_t got_sigill;
2058
2059static void sigill_handler(int signo, siginfo_t *si, void *data)
2060{
2061    /* Skip the faulty instruction */
2062    ucontext_t *uc = (ucontext_t *)data;
2063    uc->uc_mcontext.__gregs[REG_PC] += 4;
2064
2065    got_sigill = 1;
2066}
2067
2068static void tcg_target_detect_isa(void)
2069{
2070#if !defined(have_zba) || !defined(have_zbb) || !defined(have_zicond)
2071    /*
2072     * TODO: It is expected that this will be determinable via
2073     * linux riscv_hwprobe syscall, not yet merged.
2074     * In the meantime, test via sigill.
2075     */
2076
2077    struct sigaction sa_old, sa_new;
2078
2079    memset(&sa_new, 0, sizeof(sa_new));
2080    sa_new.sa_flags = SA_SIGINFO;
2081    sa_new.sa_sigaction = sigill_handler;
2082    sigaction(SIGILL, &sa_new, &sa_old);
2083
2084#ifndef have_zba
2085    /* Probe for Zba: add.uw zero,zero,zero. */
2086    got_sigill = 0;
2087    asm volatile(".insn r 0x3b, 0, 0x04, zero, zero, zero" : : : "memory");
2088    have_zba = !got_sigill;
2089#endif
2090
2091#ifndef have_zbb
2092    /* Probe for Zba: andn zero,zero,zero. */
2093    got_sigill = 0;
2094    asm volatile(".insn r 0x33, 7, 0x20, zero, zero, zero" : : : "memory");
2095    have_zbb = !got_sigill;
2096#endif
2097
2098#ifndef have_zicond
2099    /* Probe for Zicond: czero.eqz zero,zero,zero. */
2100    got_sigill = 0;
2101    asm volatile(".insn r 0x33, 5, 0x07, zero, zero, zero" : : : "memory");
2102    have_zicond = !got_sigill;
2103#endif
2104
2105    sigaction(SIGILL, &sa_old, NULL);
2106#endif
2107}
2108
2109static void tcg_target_init(TCGContext *s)
2110{
2111    tcg_target_detect_isa();
2112
2113    tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
2114    tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
2115
2116    tcg_target_call_clobber_regs = -1u;
2117    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2118    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2119    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2120    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2121    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2122    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2123    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2124    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2125    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2126    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2127    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10);
2128    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11);
2129
2130    s->reserved_regs = 0;
2131    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2132    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2133    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2134    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2135    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2136    tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
2137    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2138}
2139
2140typedef struct {
2141    DebugFrameHeader h;
2142    uint8_t fde_def_cfa[4];
2143    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2144} DebugFrame;
2145
2146#define ELF_HOST_MACHINE EM_RISCV
2147
2148static const DebugFrame debug_frame = {
2149    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2150    .h.cie.id = -1,
2151    .h.cie.version = 1,
2152    .h.cie.code_align = 1,
2153    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2154    .h.cie.return_column = TCG_REG_RA,
2155
2156    /* Total FDE size does not include the "len" member.  */
2157    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2158
2159    .fde_def_cfa = {
2160        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ... */
2161        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
2162        (FRAME_SIZE >> 7)
2163    },
2164    .fde_reg_ofs = {
2165        0x80 + 9,  12,                  /* DW_CFA_offset, s1,  -96 */
2166        0x80 + 18, 11,                  /* DW_CFA_offset, s2,  -88 */
2167        0x80 + 19, 10,                  /* DW_CFA_offset, s3,  -80 */
2168        0x80 + 20, 9,                   /* DW_CFA_offset, s4,  -72 */
2169        0x80 + 21, 8,                   /* DW_CFA_offset, s5,  -64 */
2170        0x80 + 22, 7,                   /* DW_CFA_offset, s6,  -56 */
2171        0x80 + 23, 6,                   /* DW_CFA_offset, s7,  -48 */
2172        0x80 + 24, 5,                   /* DW_CFA_offset, s8,  -40 */
2173        0x80 + 25, 4,                   /* DW_CFA_offset, s9,  -32 */
2174        0x80 + 26, 3,                   /* DW_CFA_offset, s10, -24 */
2175        0x80 + 27, 2,                   /* DW_CFA_offset, s11, -16 */
2176        0x80 + 1 , 1,                   /* DW_CFA_offset, ra,  -8 */
2177    }
2178};
2179
2180void tcg_register_jit(const void *buf, size_t buf_size)
2181{
2182    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2183}
2184