xref: /openbmc/qemu/tcg/loongarch64/tcg-target.c.inc (revision a363e1e179445102d7940e92d394d6c00c126f13)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
5 *
6 * Based on tcg/riscv/tcg-target.c.inc
7 *
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * THE SOFTWARE.
30 */
31
32#include <asm/hwcap.h>
33
34/* used for function call generation */
35#define TCG_REG_CALL_STACK              TCG_REG_SP
36#define TCG_TARGET_STACK_ALIGN          16
37#define TCG_TARGET_CALL_STACK_OFFSET    0
38#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_NORMAL
39#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_NORMAL
40#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_NORMAL
41#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
42
43#ifdef CONFIG_DEBUG_TCG
44static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
45    "zero",
46    "ra",
47    "tp",
48    "sp",
49    "a0",
50    "a1",
51    "a2",
52    "a3",
53    "a4",
54    "a5",
55    "a6",
56    "a7",
57    "t0",
58    "t1",
59    "t2",
60    "t3",
61    "t4",
62    "t5",
63    "t6",
64    "t7",
65    "t8",
66    "r21", /* reserved in the LP64* ABI, hence no ABI name */
67    "s9",
68    "s0",
69    "s1",
70    "s2",
71    "s3",
72    "s4",
73    "s5",
74    "s6",
75    "s7",
76    "s8",
77    "vr0",
78    "vr1",
79    "vr2",
80    "vr3",
81    "vr4",
82    "vr5",
83    "vr6",
84    "vr7",
85    "vr8",
86    "vr9",
87    "vr10",
88    "vr11",
89    "vr12",
90    "vr13",
91    "vr14",
92    "vr15",
93    "vr16",
94    "vr17",
95    "vr18",
96    "vr19",
97    "vr20",
98    "vr21",
99    "vr22",
100    "vr23",
101    "vr24",
102    "vr25",
103    "vr26",
104    "vr27",
105    "vr28",
106    "vr29",
107    "vr30",
108    "vr31",
109};
110#endif
111
112static const int tcg_target_reg_alloc_order[] = {
113    /* Registers preserved across calls */
114    /* TCG_REG_S0 reserved for TCG_AREG0 */
115    TCG_REG_S1,
116    TCG_REG_S2,
117    TCG_REG_S3,
118    TCG_REG_S4,
119    TCG_REG_S5,
120    TCG_REG_S6,
121    TCG_REG_S7,
122    TCG_REG_S8,
123    TCG_REG_S9,
124
125    /* Registers (potentially) clobbered across calls */
126    TCG_REG_T0,
127    TCG_REG_T1,
128    TCG_REG_T2,
129    TCG_REG_T3,
130    TCG_REG_T4,
131    TCG_REG_T5,
132    TCG_REG_T6,
133    TCG_REG_T7,
134    TCG_REG_T8,
135
136    /* Argument registers, opposite order of allocation.  */
137    TCG_REG_A7,
138    TCG_REG_A6,
139    TCG_REG_A5,
140    TCG_REG_A4,
141    TCG_REG_A3,
142    TCG_REG_A2,
143    TCG_REG_A1,
144    TCG_REG_A0,
145
146    /* Vector registers */
147    TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
148    TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
149    TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
150    TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
151    TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
152    TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
153    /* V24 - V31 are caller-saved, and skipped.  */
154};
155
156static const int tcg_target_call_iarg_regs[] = {
157    TCG_REG_A0,
158    TCG_REG_A1,
159    TCG_REG_A2,
160    TCG_REG_A3,
161    TCG_REG_A4,
162    TCG_REG_A5,
163    TCG_REG_A6,
164    TCG_REG_A7,
165};
166
167static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
168{
169    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
170    tcg_debug_assert(slot >= 0 && slot <= 1);
171    return TCG_REG_A0 + slot;
172}
173
174#define TCG_GUEST_BASE_REG TCG_REG_S1
175
176#define TCG_CT_CONST_S12   0x100
177#define TCG_CT_CONST_S32   0x200
178#define TCG_CT_CONST_U12   0x400
179#define TCG_CT_CONST_WSZ   0x800
180#define TCG_CT_CONST_VCMP  0x1000
181#define TCG_CT_CONST_VADD  0x2000
182
183#define ALL_GENERAL_REGS   MAKE_64BIT_MASK(0, 32)
184#define ALL_VECTOR_REGS    MAKE_64BIT_MASK(32, 32)
185
186static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
187{
188    return sextract64(val, pos, len);
189}
190
191/* test if a constant matches the constraint */
192static bool tcg_target_const_match(int64_t val, int ct,
193                                   TCGType type, TCGCond cond, int vece)
194{
195    if (ct & TCG_CT_CONST) {
196        return true;
197    }
198    if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
199        return true;
200    }
201    if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
202        return true;
203    }
204    if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
205        return true;
206    }
207    if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
208        return true;
209    }
210    if (ct & (TCG_CT_CONST_VCMP | TCG_CT_CONST_VADD)) {
211        int64_t vec_val = sextract64(val, 0, 8 << vece);
212        if (ct & TCG_CT_CONST_VCMP) {
213            switch (cond) {
214            case TCG_COND_EQ:
215            case TCG_COND_LE:
216            case TCG_COND_LT:
217                return -0x10 <= vec_val && vec_val <= 0x0f;
218            case TCG_COND_LEU:
219            case TCG_COND_LTU:
220                return 0x00 <= vec_val && vec_val <= 0x1f;
221            default:
222                return false;
223            }
224        }
225        if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
226            return true;
227        }
228    }
229    return false;
230}
231
232/*
233 * Relocations
234 */
235
236/*
237 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
238 * complicated; a whopping stack machine is needed to stuff the fields, at
239 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
240 * needed.
241 *
242 * Hence, define our own simpler relocation types. Numbers are chosen as to
243 * not collide with potential future additions to the true ELF relocation
244 * type enum.
245 */
246
247/* Field Sk16, shifted right by 2; suitable for conditional jumps */
248#define R_LOONGARCH_BR_SK16     256
249/* Field Sd10k16, shifted right by 2; suitable for B and BL */
250#define R_LOONGARCH_BR_SD10K16  257
251
252static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
253{
254    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
255    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
256
257    tcg_debug_assert((offset & 3) == 0);
258    offset >>= 2;
259    if (offset == sextreg(offset, 0, 16)) {
260        *src_rw = deposit64(*src_rw, 10, 16, offset);
261        return true;
262    }
263
264    return false;
265}
266
267static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
268                             const tcg_insn_unit *target)
269{
270    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
271    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
272
273    tcg_debug_assert((offset & 3) == 0);
274    offset >>= 2;
275    if (offset == sextreg(offset, 0, 26)) {
276        *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
277        *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
278        return true;
279    }
280
281    return false;
282}
283
284static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
285                        intptr_t value, intptr_t addend)
286{
287    tcg_debug_assert(addend == 0);
288    switch (type) {
289    case R_LOONGARCH_BR_SK16:
290        return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
291    case R_LOONGARCH_BR_SD10K16:
292        return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
293    default:
294        g_assert_not_reached();
295    }
296}
297
298#include "tcg-insn-defs.c.inc"
299
300/*
301 * TCG intrinsics
302 */
303
304static void tcg_out_mb(TCGContext *s, TCGArg a0)
305{
306    /* Baseline LoongArch only has the full barrier, unfortunately.  */
307    tcg_out_opc_dbar(s, 0);
308}
309
310static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
311{
312    if (ret == arg) {
313        return true;
314    }
315    switch (type) {
316    case TCG_TYPE_I32:
317    case TCG_TYPE_I64:
318        if (ret < TCG_REG_V0) {
319            if (arg < TCG_REG_V0) {
320                /*
321                 * Conventional register-register move used in LoongArch is
322                 * `or dst, src, zero`.
323                 */
324                tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
325            } else {
326                tcg_out_opc_movfr2gr_d(s, ret, arg);
327            }
328        } else {
329            if (arg < TCG_REG_V0) {
330                tcg_out_opc_movgr2fr_d(s, ret, arg);
331            } else {
332                tcg_out_opc_fmov_d(s, ret, arg);
333            }
334        }
335        break;
336    case TCG_TYPE_V64:
337    case TCG_TYPE_V128:
338        tcg_out_opc_vori_b(s, ret, arg, 0);
339        break;
340    case TCG_TYPE_V256:
341        tcg_out_opc_xvori_b(s, ret, arg, 0);
342        break;
343    default:
344        g_assert_not_reached();
345    }
346    return true;
347}
348
349/* Loads a 32-bit immediate into rd, sign-extended.  */
350static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
351{
352    tcg_target_long lo = sextreg(val, 0, 12);
353    tcg_target_long hi12 = sextreg(val, 12, 20);
354
355    /* Single-instruction cases.  */
356    if (hi12 == 0) {
357        /* val fits in uimm12: ori rd, zero, val */
358        tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
359        return;
360    }
361    if (hi12 == sextreg(lo, 12, 20)) {
362        /* val fits in simm12: addi.w rd, zero, val */
363        tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
364        return;
365    }
366
367    /* High bits must be set; load with lu12i.w + optional ori.  */
368    tcg_out_opc_lu12i_w(s, rd, hi12);
369    if (lo != 0) {
370        tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
371    }
372}
373
374static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
375                         tcg_target_long val)
376{
377    /*
378     * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
379     * with dedicated instructions for filling the respective bitfields
380     * below:
381     *
382     *        6                   5                   4               3
383     *  3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
384     * +-----------------------+---------------------------------------+...
385     * |          hi52         |                  hi32                 |
386     * +-----------------------+---------------------------------------+...
387     *       3                   2                   1
388     *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
389     * ...+-------------------------------------+-------------------------+
390     *    |                 hi12                |            lo           |
391     * ...+-------------------------------------+-------------------------+
392     *
393     * Check if val belong to one of the several fast cases, before falling
394     * back to the slow path.
395     */
396
397    intptr_t src_rx, pc_offset;
398    tcg_target_long hi12, hi32, hi52;
399
400    /* Value fits in signed i32.  */
401    if (type == TCG_TYPE_I32 || val == (int32_t)val) {
402        tcg_out_movi_i32(s, rd, val);
403        return;
404    }
405
406    /* PC-relative cases.  */
407    src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
408    if ((val & 3) == 0) {
409        pc_offset = val - src_rx;
410        if (pc_offset == sextreg(pc_offset, 0, 22)) {
411            /* Single pcaddu2i.  */
412            tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
413            return;
414        }
415    }
416
417    pc_offset = (val >> 12) - (src_rx >> 12);
418    if (pc_offset == sextreg(pc_offset, 0, 20)) {
419        /* Load with pcalau12i + ori.  */
420        tcg_target_long val_lo = val & 0xfff;
421        tcg_out_opc_pcalau12i(s, rd, pc_offset);
422        if (val_lo != 0) {
423            tcg_out_opc_ori(s, rd, rd, val_lo);
424        }
425        return;
426    }
427
428    hi12 = sextreg(val, 12, 20);
429    hi32 = sextreg(val, 32, 20);
430    hi52 = sextreg(val, 52, 12);
431
432    /* Single cu52i.d case.  */
433    if ((hi52 != 0) && (ctz64(val) >= 52)) {
434        tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
435        return;
436    }
437
438    /* Slow path.  Initialize the low 32 bits, then concat high bits.  */
439    tcg_out_movi_i32(s, rd, val);
440
441    /* Load hi32 and hi52 explicitly when they are unexpected values. */
442    if (hi32 != sextreg(hi12, 20, 20)) {
443        tcg_out_opc_cu32i_d(s, rd, hi32);
444    }
445
446    if (hi52 != sextreg(hi32, 20, 12)) {
447        tcg_out_opc_cu52i_d(s, rd, rd, hi52);
448    }
449}
450
451static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
452                         TCGReg rs, tcg_target_long imm)
453{
454    tcg_target_long lo12 = sextreg(imm, 0, 12);
455    tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
456
457    /*
458     * Note that there's a hole in between hi16 and lo12:
459     *
460     *       3                   2                   1                   0
461     *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
462     * ...+-------------------------------+-------+-----------------------+
463     *    |             hi16              |       |          lo12         |
464     * ...+-------------------------------+-------+-----------------------+
465     *
466     * For bits within that hole, it's more efficient to use LU12I and ADD.
467     */
468    if (imm == (hi16 << 16) + lo12) {
469        if (hi16) {
470            tcg_out_opc_addu16i_d(s, rd, rs, hi16);
471            rs = rd;
472        }
473        if (type == TCG_TYPE_I32) {
474            tcg_out_opc_addi_w(s, rd, rs, lo12);
475        } else if (lo12) {
476            tcg_out_opc_addi_d(s, rd, rs, lo12);
477        } else {
478            tcg_out_mov(s, type, rd, rs);
479        }
480    } else {
481        tcg_out_movi(s, type, TCG_REG_TMP0, imm);
482        if (type == TCG_TYPE_I32) {
483            tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
484        } else {
485            tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
486        }
487    }
488}
489
490static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
491{
492    return false;
493}
494
495static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
496                             tcg_target_long imm)
497{
498    /* This function is only used for passing structs by reference. */
499    g_assert_not_reached();
500}
501
502static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
503{
504    tcg_out_opc_andi(s, ret, arg, 0xff);
505}
506
507static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
508{
509    tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
510}
511
512static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
513{
514    tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
515}
516
517static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
518{
519    tcg_out_opc_sext_b(s, ret, arg);
520}
521
522static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
523{
524    tcg_out_opc_sext_h(s, ret, arg);
525}
526
527static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
528{
529    tcg_out_opc_addi_w(s, ret, arg, 0);
530}
531
532static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
533{
534    if (ret != arg) {
535        tcg_out_ext32s(s, ret, arg);
536    }
537}
538
539static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
540{
541    tcg_out_ext32u(s, ret, arg);
542}
543
544static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
545{
546    tcg_out_ext32s(s, ret, arg);
547}
548
549#define SETCOND_INV    TCG_TARGET_NB_REGS
550#define SETCOND_NEZ    (SETCOND_INV << 1)
551#define SETCOND_FLAGS  (SETCOND_INV | SETCOND_NEZ)
552
553static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
554                               TCGReg arg1, tcg_target_long arg2, bool c2)
555{
556    int flags = 0;
557
558    switch (cond) {
559    case TCG_COND_EQ:    /* -> NE  */
560    case TCG_COND_GE:    /* -> LT  */
561    case TCG_COND_GEU:   /* -> LTU */
562    case TCG_COND_GT:    /* -> LE  */
563    case TCG_COND_GTU:   /* -> LEU */
564        cond = tcg_invert_cond(cond);
565        flags ^= SETCOND_INV;
566        break;
567    default:
568        break;
569    }
570
571    switch (cond) {
572    case TCG_COND_LE:
573    case TCG_COND_LEU:
574        /*
575         * If we have a constant input, the most efficient way to implement
576         * LE is by adding 1 and using LT.  Watch out for wrap around for LEU.
577         * We don't need to care for this for LE because the constant input
578         * is still constrained to int32_t, and INT32_MAX+1 is representable
579         * in the 64-bit temporary register.
580         */
581        if (c2) {
582            if (cond == TCG_COND_LEU) {
583                /* unsigned <= -1 is true */
584                if (arg2 == -1) {
585                    tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
586                    return ret;
587                }
588                cond = TCG_COND_LTU;
589            } else {
590                cond = TCG_COND_LT;
591            }
592            arg2 += 1;
593        } else {
594            TCGReg tmp = arg2;
595            arg2 = arg1;
596            arg1 = tmp;
597            cond = tcg_swap_cond(cond);    /* LE -> GE */
598            cond = tcg_invert_cond(cond);  /* GE -> LT */
599            flags ^= SETCOND_INV;
600        }
601        break;
602    default:
603        break;
604    }
605
606    switch (cond) {
607    case TCG_COND_NE:
608        flags |= SETCOND_NEZ;
609        if (!c2) {
610            tcg_out_opc_xor(s, ret, arg1, arg2);
611        } else if (arg2 == 0) {
612            ret = arg1;
613        } else if (arg2 >= 0 && arg2 <= 0xfff) {
614            tcg_out_opc_xori(s, ret, arg1, arg2);
615        } else {
616            tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
617        }
618        break;
619
620    case TCG_COND_LT:
621    case TCG_COND_LTU:
622        if (c2) {
623            if (arg2 >= -0x800 && arg2 <= 0x7ff) {
624                if (cond == TCG_COND_LT) {
625                    tcg_out_opc_slti(s, ret, arg1, arg2);
626                } else {
627                    tcg_out_opc_sltui(s, ret, arg1, arg2);
628                }
629                break;
630            }
631            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
632            arg2 = TCG_REG_TMP0;
633        }
634        if (cond == TCG_COND_LT) {
635            tcg_out_opc_slt(s, ret, arg1, arg2);
636        } else {
637            tcg_out_opc_sltu(s, ret, arg1, arg2);
638        }
639        break;
640
641    default:
642        g_assert_not_reached();
643    }
644
645    return ret | flags;
646}
647
648static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
649                            TCGReg arg1, tcg_target_long arg2,
650                            bool c2, bool neg)
651{
652    int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
653    TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
654
655    if (neg) {
656        /* If intermediate result is zero/non-zero: test != 0. */
657        if (tmpflags & SETCOND_NEZ) {
658            tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
659            tmp = ret;
660        }
661        /* Produce the 0/-1 result. */
662        if (tmpflags & SETCOND_INV) {
663            tcg_out_opc_addi_d(s, ret, tmp, -1);
664        } else {
665            tcg_out_opc_sub_d(s, ret, TCG_REG_ZERO, tmp);
666        }
667    } else {
668        switch (tmpflags & SETCOND_FLAGS) {
669        case 0:
670            tcg_debug_assert(tmp == ret);
671            break;
672        case SETCOND_INV:
673            /* Intermediate result is boolean: simply invert. */
674            tcg_out_opc_xori(s, ret, tmp, 1);
675            break;
676        case SETCOND_NEZ:
677            /* Intermediate result is zero/non-zero: test != 0. */
678            tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
679            break;
680        case SETCOND_NEZ | SETCOND_INV:
681            /* Intermediate result is zero/non-zero: test == 0. */
682            tcg_out_opc_sltui(s, ret, tmp, 1);
683            break;
684        default:
685            g_assert_not_reached();
686        }
687    }
688}
689
690static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
691                         TCGReg dest, TCGReg arg1, TCGReg arg2)
692{
693    tcg_out_setcond(s, cond, dest, arg1, arg2, false, false);
694}
695
696static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
697                          TCGReg dest, TCGReg arg1, tcg_target_long arg2)
698{
699    tcg_out_setcond(s, cond, dest, arg1, arg2, true, false);
700}
701
702static const TCGOutOpSetcond outop_setcond = {
703    .base.static_constraint = C_O1_I2(r, r, rJ),
704    .out_rrr = tgen_setcond,
705    .out_rri = tgen_setcondi,
706};
707
708static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
709                            TCGReg dest, TCGReg arg1, TCGReg arg2)
710{
711    tcg_out_setcond(s, cond, dest, arg1, arg2, false, true);
712}
713
714static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
715                             TCGReg dest, TCGReg arg1, tcg_target_long arg2)
716{
717    tcg_out_setcond(s, cond, dest, arg1, arg2, true, true);
718}
719
720static const TCGOutOpSetcond outop_negsetcond = {
721    .base.static_constraint = C_O1_I2(r, r, rJ),
722    .out_rrr = tgen_negsetcond,
723    .out_rri = tgen_negsetcondi,
724};
725
726static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
727                            TCGReg c1, tcg_target_long c2, bool const2,
728                            TCGReg v1, TCGReg v2)
729{
730    int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
731    TCGReg t;
732
733    /* Standardize the test below to t != 0. */
734    if (tmpflags & SETCOND_INV) {
735        t = v1, v1 = v2, v2 = t;
736    }
737
738    t = tmpflags & ~SETCOND_FLAGS;
739    if (v1 == TCG_REG_ZERO) {
740        tcg_out_opc_masknez(s, ret, v2, t);
741    } else if (v2 == TCG_REG_ZERO) {
742        tcg_out_opc_maskeqz(s, ret, v1, t);
743    } else {
744        tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
745        tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
746        tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
747    }
748}
749
750/*
751 * Branch helpers
752 */
753
754static const struct {
755    LoongArchInsn op;
756    bool swap;
757} tcg_brcond_to_loongarch[] = {
758    [TCG_COND_EQ] =  { OPC_BEQ,  false },
759    [TCG_COND_NE] =  { OPC_BNE,  false },
760    [TCG_COND_LT] =  { OPC_BGT,  true  },
761    [TCG_COND_GE] =  { OPC_BLE,  true  },
762    [TCG_COND_LE] =  { OPC_BLE,  false },
763    [TCG_COND_GT] =  { OPC_BGT,  false },
764    [TCG_COND_LTU] = { OPC_BGTU, true  },
765    [TCG_COND_GEU] = { OPC_BLEU, true  },
766    [TCG_COND_LEU] = { OPC_BLEU, false },
767    [TCG_COND_GTU] = { OPC_BGTU, false }
768};
769
770static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
771                           TCGReg arg2, TCGLabel *l)
772{
773    LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
774
775    tcg_debug_assert(op != 0);
776
777    if (tcg_brcond_to_loongarch[cond].swap) {
778        TCGReg t = arg1;
779        arg1 = arg2;
780        arg2 = t;
781    }
782
783    /* all conditional branch insns belong to DJSk16-format */
784    tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
785    tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
786}
787
788static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
789{
790    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
791    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
792
793    tcg_debug_assert((offset & 3) == 0);
794    if (offset == sextreg(offset, 0, 28)) {
795        /* short jump: +/- 256MiB */
796        if (tail) {
797            tcg_out_opc_b(s, offset >> 2);
798        } else {
799            tcg_out_opc_bl(s, offset >> 2);
800        }
801    } else if (offset == sextreg(offset, 0, 38)) {
802        /* long jump: +/- 256GiB */
803        tcg_target_long lo = sextreg(offset, 0, 18);
804        tcg_target_long hi = offset - lo;
805        tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
806        tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
807    } else {
808        /* far jump: 64-bit */
809        tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
810        tcg_target_long hi = (tcg_target_long)arg - lo;
811        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
812        tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
813    }
814}
815
816static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
817                         const TCGHelperInfo *info)
818{
819    tcg_out_call_int(s, arg, false);
820}
821
822/*
823 * Load/store helpers
824 */
825
826static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
827                         TCGReg addr, intptr_t offset)
828{
829    intptr_t imm12 = sextreg(offset, 0, 12);
830
831    if (offset != imm12) {
832        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
833
834        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
835            imm12 = sextreg(diff, 0, 12);
836            tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
837        } else {
838            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
839            if (addr != TCG_REG_ZERO) {
840                tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
841            }
842        }
843        addr = TCG_REG_TMP2;
844    }
845
846    switch (opc) {
847    case OPC_LD_B:
848    case OPC_LD_BU:
849    case OPC_LD_H:
850    case OPC_LD_HU:
851    case OPC_LD_W:
852    case OPC_LD_WU:
853    case OPC_LD_D:
854    case OPC_ST_B:
855    case OPC_ST_H:
856    case OPC_ST_W:
857    case OPC_ST_D:
858        tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
859        break;
860    case OPC_FLD_S:
861    case OPC_FLD_D:
862    case OPC_FST_S:
863    case OPC_FST_D:
864        tcg_out32(s, encode_fdjsk12_insn(opc, data, addr, imm12));
865        break;
866    default:
867        g_assert_not_reached();
868    }
869}
870
871static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest,
872                       TCGReg base, intptr_t offset)
873{
874    switch (type) {
875    case TCG_TYPE_I32:
876        if (dest < TCG_REG_V0) {
877            tcg_out_ldst(s, OPC_LD_W, dest, base, offset);
878        } else {
879            tcg_out_ldst(s, OPC_FLD_S, dest, base, offset);
880        }
881        break;
882    case TCG_TYPE_I64:
883    case TCG_TYPE_V64:
884        if (dest < TCG_REG_V0) {
885            tcg_out_ldst(s, OPC_LD_D, dest, base, offset);
886        } else {
887            tcg_out_ldst(s, OPC_FLD_D, dest, base, offset);
888        }
889        break;
890    case TCG_TYPE_V128:
891        if (-0x800 <= offset && offset <= 0x7ff) {
892            tcg_out_opc_vld(s, dest, base, offset);
893        } else {
894            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
895            tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0);
896        }
897        break;
898    case TCG_TYPE_V256:
899        if (-0x800 <= offset && offset <= 0x7ff) {
900            tcg_out_opc_xvld(s, dest, base, offset);
901        } else {
902            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
903            tcg_out_opc_xvldx(s, dest, base, TCG_REG_TMP0);
904        }
905        break;
906    default:
907        g_assert_not_reached();
908    }
909}
910
911static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
912                       TCGReg base, intptr_t offset)
913{
914    switch (type) {
915    case TCG_TYPE_I32:
916        if (src < TCG_REG_V0) {
917            tcg_out_ldst(s, OPC_ST_W, src, base, offset);
918        } else {
919            tcg_out_ldst(s, OPC_FST_S, src, base, offset);
920        }
921        break;
922    case TCG_TYPE_I64:
923    case TCG_TYPE_V64:
924        if (src < TCG_REG_V0) {
925            tcg_out_ldst(s, OPC_ST_D, src, base, offset);
926        } else {
927            tcg_out_ldst(s, OPC_FST_D, src, base, offset);
928        }
929        break;
930    case TCG_TYPE_V128:
931        if (-0x800 <= offset && offset <= 0x7ff) {
932            tcg_out_opc_vst(s, src, base, offset);
933        } else {
934            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
935            tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0);
936        }
937        break;
938    case TCG_TYPE_V256:
939        if (-0x800 <= offset && offset <= 0x7ff) {
940            tcg_out_opc_xvst(s, src, base, offset);
941        } else {
942            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
943            tcg_out_opc_xvstx(s, src, base, TCG_REG_TMP0);
944        }
945        break;
946    default:
947        g_assert_not_reached();
948    }
949}
950
951static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
952                        TCGReg base, intptr_t ofs)
953{
954    if (val == 0) {
955        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
956        return true;
957    }
958    return false;
959}
960
961/*
962 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
963 */
964
965static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
966{
967    tcg_out_opc_b(s, 0);
968    return reloc_br_sd10k16(s->code_ptr - 1, target);
969}
970
971static const TCGLdstHelperParam ldst_helper_param = {
972    .ntmp = 1, .tmp = { TCG_REG_TMP0 }
973};
974
975static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
976{
977    MemOp opc = get_memop(l->oi);
978
979    /* resolve label address */
980    if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
981        return false;
982    }
983
984    tcg_out_ld_helper_args(s, l, &ldst_helper_param);
985    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false);
986    tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
987    return tcg_out_goto(s, l->raddr);
988}
989
990static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
991{
992    MemOp opc = get_memop(l->oi);
993
994    /* resolve label address */
995    if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
996        return false;
997    }
998
999    tcg_out_st_helper_args(s, l, &ldst_helper_param);
1000    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
1001    return tcg_out_goto(s, l->raddr);
1002}
1003
1004typedef struct {
1005    TCGReg base;
1006    TCGReg index;
1007    TCGAtomAlign aa;
1008} HostAddress;
1009
1010bool tcg_target_has_memory_bswap(MemOp memop)
1011{
1012    return false;
1013}
1014
1015/* We expect to use a 12-bit negative offset from ENV.  */
1016#define MIN_TLB_MASK_TABLE_OFS  -(1 << 11)
1017
1018/*
1019 * For system-mode, perform the TLB load and compare.
1020 * For user-mode, perform any required alignment tests.
1021 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1022 * is required and fill in @h with the host address for the fast path.
1023 */
1024static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1025                                           TCGReg addr_reg, MemOpIdx oi,
1026                                           bool is_ld)
1027{
1028    TCGType addr_type = s->addr_type;
1029    TCGLabelQemuLdst *ldst = NULL;
1030    MemOp opc = get_memop(oi);
1031    MemOp a_bits;
1032
1033    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1034    a_bits = h->aa.align;
1035
1036    if (tcg_use_softmmu) {
1037        unsigned s_bits = opc & MO_SIZE;
1038        int mem_index = get_mmuidx(oi);
1039        int fast_ofs = tlb_mask_table_ofs(s, mem_index);
1040        int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1041        int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1042
1043        ldst = new_ldst_label(s);
1044        ldst->is_ld = is_ld;
1045        ldst->oi = oi;
1046        ldst->addr_reg = addr_reg;
1047
1048        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1049        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1050
1051        tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
1052                           s->page_bits - CPU_TLB_ENTRY_BITS);
1053        tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1054        tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1055
1056        /* Load the tlb comparator and the addend.  */
1057        QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1058        tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1059                   is_ld ? offsetof(CPUTLBEntry, addr_read)
1060                         : offsetof(CPUTLBEntry, addr_write));
1061        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1062                   offsetof(CPUTLBEntry, addend));
1063
1064        /*
1065         * For aligned accesses, we check the first byte and include the
1066         * alignment bits within the address.  For unaligned access, we
1067         * check that we don't cross pages using the address of the last
1068         * byte of the access.
1069         */
1070        if (a_bits < s_bits) {
1071            unsigned a_mask = (1u << a_bits) - 1;
1072            unsigned s_mask = (1u << s_bits) - 1;
1073            tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
1074        } else {
1075            tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
1076        }
1077        tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
1078                              a_bits, s->page_bits - 1);
1079
1080        /* Compare masked address with the TLB entry.  */
1081        ldst->label_ptr[0] = s->code_ptr;
1082        tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1083
1084        h->index = TCG_REG_TMP2;
1085    } else {
1086        if (a_bits) {
1087            ldst = new_ldst_label(s);
1088
1089            ldst->is_ld = is_ld;
1090            ldst->oi = oi;
1091            ldst->addr_reg = addr_reg;
1092
1093            /*
1094             * Without micro-architecture details, we don't know which of
1095             * bstrpick or andi is faster, so use bstrpick as it's not
1096             * constrained by imm field width. Not to say alignments >= 2^12
1097             * are going to happen any time soon.
1098             */
1099            tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
1100
1101            ldst->label_ptr[0] = s->code_ptr;
1102            tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1103        }
1104
1105        h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1106    }
1107
1108    if (addr_type == TCG_TYPE_I32) {
1109        h->base = TCG_REG_TMP0;
1110        tcg_out_ext32u(s, h->base, addr_reg);
1111    } else {
1112        h->base = addr_reg;
1113    }
1114
1115    return ldst;
1116}
1117
1118static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
1119                                    TCGReg rd, HostAddress h)
1120{
1121    /* Byte swapping is left to middle-end expansion.  */
1122    tcg_debug_assert((opc & MO_BSWAP) == 0);
1123
1124    switch (opc & MO_SSIZE) {
1125    case MO_UB:
1126        tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
1127        break;
1128    case MO_SB:
1129        tcg_out_opc_ldx_b(s, rd, h.base, h.index);
1130        break;
1131    case MO_UW:
1132        tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
1133        break;
1134    case MO_SW:
1135        tcg_out_opc_ldx_h(s, rd, h.base, h.index);
1136        break;
1137    case MO_UL:
1138        if (type == TCG_TYPE_I64) {
1139            tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
1140            break;
1141        }
1142        /* fallthrough */
1143    case MO_SL:
1144        tcg_out_opc_ldx_w(s, rd, h.base, h.index);
1145        break;
1146    case MO_UQ:
1147        tcg_out_opc_ldx_d(s, rd, h.base, h.index);
1148        break;
1149    default:
1150        g_assert_not_reached();
1151    }
1152}
1153
1154static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1155                            MemOpIdx oi, TCGType data_type)
1156{
1157    TCGLabelQemuLdst *ldst;
1158    HostAddress h;
1159
1160    ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1161    tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
1162
1163    if (ldst) {
1164        ldst->type = data_type;
1165        ldst->datalo_reg = data_reg;
1166        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1167    }
1168}
1169
1170static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
1171                                    TCGReg rd, HostAddress h)
1172{
1173    /* Byte swapping is left to middle-end expansion.  */
1174    tcg_debug_assert((opc & MO_BSWAP) == 0);
1175
1176    switch (opc & MO_SIZE) {
1177    case MO_8:
1178        tcg_out_opc_stx_b(s, rd, h.base, h.index);
1179        break;
1180    case MO_16:
1181        tcg_out_opc_stx_h(s, rd, h.base, h.index);
1182        break;
1183    case MO_32:
1184        tcg_out_opc_stx_w(s, rd, h.base, h.index);
1185        break;
1186    case MO_64:
1187        tcg_out_opc_stx_d(s, rd, h.base, h.index);
1188        break;
1189    default:
1190        g_assert_not_reached();
1191    }
1192}
1193
1194static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1195                            MemOpIdx oi, TCGType data_type)
1196{
1197    TCGLabelQemuLdst *ldst;
1198    HostAddress h;
1199
1200    ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1201    tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
1202
1203    if (ldst) {
1204        ldst->type = data_type;
1205        ldst->datalo_reg = data_reg;
1206        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1207    }
1208}
1209
1210static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
1211                                   TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1212{
1213    TCGLabelQemuLdst *ldst;
1214    HostAddress h;
1215
1216    ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1217
1218    if (h.aa.atom == MO_128) {
1219        /*
1220         * Use VLDX/VSTX when 128-bit atomicity is required.
1221         * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
1222         */
1223        if (is_ld) {
1224            tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
1225            tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
1226            tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
1227        } else {
1228            tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
1229            tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
1230            tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
1231        }
1232    } else {
1233        /* Otherwise use a pair of LD/ST. */
1234        TCGReg base = h.base;
1235        if (h.index != TCG_REG_ZERO) {
1236            base = TCG_REG_TMP0;
1237            tcg_out_opc_add_d(s, base, h.base, h.index);
1238        }
1239        if (is_ld) {
1240            tcg_debug_assert(base != data_lo);
1241            tcg_out_opc_ld_d(s, data_lo, base, 0);
1242            tcg_out_opc_ld_d(s, data_hi, base, 8);
1243        } else {
1244            tcg_out_opc_st_d(s, data_lo, base, 0);
1245            tcg_out_opc_st_d(s, data_hi, base, 8);
1246        }
1247    }
1248
1249    if (ldst) {
1250        ldst->type = TCG_TYPE_I128;
1251        ldst->datalo_reg = data_lo;
1252        ldst->datahi_reg = data_hi;
1253        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1254    }
1255}
1256
1257/*
1258 * Entry-points
1259 */
1260
1261static const tcg_insn_unit *tb_ret_addr;
1262
1263static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1264{
1265    /* Reuse the zeroing that exists for goto_ptr.  */
1266    if (a0 == 0) {
1267        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1268    } else {
1269        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1270        tcg_out_call_int(s, tb_ret_addr, true);
1271    }
1272}
1273
1274static void tcg_out_goto_tb(TCGContext *s, int which)
1275{
1276    /*
1277     * Direct branch, or load indirect address, to be patched
1278     * by tb_target_set_jmp_target.  Check indirect load offset
1279     * in range early, regardless of direct branch distance,
1280     * via assert within tcg_out_opc_pcaddu2i.
1281     */
1282    uintptr_t i_addr = get_jmp_target_addr(s, which);
1283    intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1284
1285    set_jmp_insn_offset(s, which);
1286    tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1287
1288    /* Finish the load and indirect branch. */
1289    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1290    tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1291    set_jmp_reset_offset(s, which);
1292}
1293
1294void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1295                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1296{
1297    uintptr_t d_addr = tb->jmp_target_addr[n];
1298    ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1299    tcg_insn_unit insn;
1300
1301    /* Either directly branch, or load slot address for indirect branch. */
1302    if (d_disp == sextreg(d_disp, 0, 26)) {
1303        insn = encode_sd10k16_insn(OPC_B, d_disp);
1304    } else {
1305        uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1306        intptr_t i_disp = i_addr - jmp_rx;
1307        insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1308    }
1309
1310    qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1311    flush_idcache_range(jmp_rx, jmp_rw, 4);
1312}
1313
1314
1315static void tgen_add(TCGContext *s, TCGType type,
1316                     TCGReg a0, TCGReg a1, TCGReg a2)
1317{
1318    if (type == TCG_TYPE_I32) {
1319        tcg_out_opc_add_w(s, a0, a1, a2);
1320    } else {
1321        tcg_out_opc_add_d(s, a0, a1, a2);
1322    }
1323}
1324
1325static const TCGOutOpBinary outop_add = {
1326    .base.static_constraint = C_O1_I2(r, r, rJ),
1327    .out_rrr = tgen_add,
1328    .out_rri = tcg_out_addi,
1329};
1330
1331static void tgen_and(TCGContext *s, TCGType type,
1332                     TCGReg a0, TCGReg a1, TCGReg a2)
1333{
1334    tcg_out_opc_and(s, a0, a1, a2);
1335}
1336
1337static void tgen_andi(TCGContext *s, TCGType type,
1338                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1339{
1340    tcg_out_opc_andi(s, a0, a1, a2);
1341}
1342
1343static const TCGOutOpBinary outop_and = {
1344    .base.static_constraint = C_O1_I2(r, r, rU),
1345    .out_rrr = tgen_and,
1346    .out_rri = tgen_andi,
1347};
1348
1349static void tgen_andc(TCGContext *s, TCGType type,
1350                      TCGReg a0, TCGReg a1, TCGReg a2)
1351{
1352    tcg_out_opc_andn(s, a0, a1, a2);
1353}
1354
1355static const TCGOutOpBinary outop_andc = {
1356    .base.static_constraint = C_O1_I2(r, r, r),
1357    .out_rrr = tgen_andc,
1358};
1359
1360static void tgen_clzi(TCGContext *s, TCGType type,
1361                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1362{
1363    /* a2 is constrained to exactly the type width. */
1364    if (type == TCG_TYPE_I32) {
1365        tcg_out_opc_clz_w(s, a0, a1);
1366    } else {
1367        tcg_out_opc_clz_d(s, a0, a1);
1368    }
1369}
1370
1371static void tgen_clz(TCGContext *s, TCGType type,
1372                     TCGReg a0, TCGReg a1, TCGReg a2)
1373{
1374    tgen_clzi(s, type, TCG_REG_TMP0, a1, /* ignored */ 0);
1375    /* a0 = a1 ? REG_TMP0 : a2 */
1376    tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
1377    tcg_out_opc_masknez(s, a0, a2, a1);
1378    tcg_out_opc_or(s, a0, a0, TCG_REG_TMP0);
1379}
1380
1381static const TCGOutOpBinary outop_clz = {
1382    .base.static_constraint = C_O1_I2(r, r, rW),
1383    .out_rrr = tgen_clz,
1384    .out_rri = tgen_clzi,
1385};
1386
1387static const TCGOutOpUnary outop_ctpop = {
1388    .base.static_constraint = C_NotImplemented,
1389};
1390
1391static void tgen_ctzi(TCGContext *s, TCGType type,
1392                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1393{
1394    /* a2 is constrained to exactly the type width. */
1395    if (type == TCG_TYPE_I32) {
1396        tcg_out_opc_ctz_w(s, a0, a1);
1397    } else {
1398        tcg_out_opc_ctz_d(s, a0, a1);
1399    }
1400}
1401
1402static void tgen_ctz(TCGContext *s, TCGType type,
1403                     TCGReg a0, TCGReg a1, TCGReg a2)
1404{
1405    tgen_ctzi(s, type, TCG_REG_TMP0, a1, /* ignored */ 0);
1406    /* a0 = a1 ? REG_TMP0 : a2 */
1407    tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
1408    tcg_out_opc_masknez(s, a0, a2, a1);
1409    tcg_out_opc_or(s, a0, a0, TCG_REG_TMP0);
1410}
1411
1412static const TCGOutOpBinary outop_ctz = {
1413    .base.static_constraint = C_O1_I2(r, r, rW),
1414    .out_rrr = tgen_ctz,
1415    .out_rri = tgen_ctzi,
1416};
1417
1418static void tgen_divs(TCGContext *s, TCGType type,
1419                      TCGReg a0, TCGReg a1, TCGReg a2)
1420{
1421    if (type == TCG_TYPE_I32) {
1422        tcg_out_opc_div_w(s, a0, a1, a2);
1423    } else {
1424        tcg_out_opc_div_d(s, a0, a1, a2);
1425    }
1426}
1427
1428static const TCGOutOpBinary outop_divs = {
1429    .base.static_constraint = C_O1_I2(r, r, r),
1430    .out_rrr = tgen_divs,
1431};
1432
1433static const TCGOutOpDivRem outop_divs2 = {
1434    .base.static_constraint = C_NotImplemented,
1435};
1436
1437static void tgen_divu(TCGContext *s, TCGType type,
1438                      TCGReg a0, TCGReg a1, TCGReg a2)
1439{
1440    if (type == TCG_TYPE_I32) {
1441        tcg_out_opc_div_wu(s, a0, a1, a2);
1442    } else {
1443        tcg_out_opc_div_du(s, a0, a1, a2);
1444    }
1445}
1446
1447static const TCGOutOpBinary outop_divu = {
1448    .base.static_constraint = C_O1_I2(r, r, r),
1449    .out_rrr = tgen_divu,
1450};
1451
1452static const TCGOutOpDivRem outop_divu2 = {
1453    .base.static_constraint = C_NotImplemented,
1454};
1455
1456static const TCGOutOpBinary outop_eqv = {
1457    .base.static_constraint = C_NotImplemented,
1458};
1459
1460static void tgen_mul(TCGContext *s, TCGType type,
1461                     TCGReg a0, TCGReg a1, TCGReg a2)
1462{
1463    if (type == TCG_TYPE_I32) {
1464        tcg_out_opc_mul_w(s, a0, a1, a2);
1465    } else {
1466        tcg_out_opc_mul_d(s, a0, a1, a2);
1467    }
1468}
1469
1470static const TCGOutOpBinary outop_mul = {
1471    .base.static_constraint = C_O1_I2(r, r, r),
1472    .out_rrr = tgen_mul,
1473};
1474
1475static const TCGOutOpMul2 outop_muls2 = {
1476    .base.static_constraint = C_NotImplemented,
1477};
1478
1479static void tgen_mulsh(TCGContext *s, TCGType type,
1480                       TCGReg a0, TCGReg a1, TCGReg a2)
1481{
1482    if (type == TCG_TYPE_I32) {
1483        tcg_out_opc_mulh_w(s, a0, a1, a2);
1484    } else {
1485        tcg_out_opc_mulh_d(s, a0, a1, a2);
1486    }
1487}
1488
1489static const TCGOutOpBinary outop_mulsh = {
1490    .base.static_constraint = C_O1_I2(r, r, r),
1491    .out_rrr = tgen_mulsh,
1492};
1493
1494static const TCGOutOpMul2 outop_mulu2 = {
1495    .base.static_constraint = C_NotImplemented,
1496};
1497
1498static void tgen_muluh(TCGContext *s, TCGType type,
1499                       TCGReg a0, TCGReg a1, TCGReg a2)
1500{
1501    if (type == TCG_TYPE_I32) {
1502        tcg_out_opc_mulh_wu(s, a0, a1, a2);
1503    } else {
1504        tcg_out_opc_mulh_du(s, a0, a1, a2);
1505    }
1506}
1507
1508static const TCGOutOpBinary outop_muluh = {
1509    .base.static_constraint = C_O1_I2(r, r, r),
1510    .out_rrr = tgen_muluh,
1511};
1512
1513static const TCGOutOpBinary outop_nand = {
1514    .base.static_constraint = C_NotImplemented,
1515};
1516
1517static void tgen_nor(TCGContext *s, TCGType type,
1518                      TCGReg a0, TCGReg a1, TCGReg a2)
1519{
1520    tcg_out_opc_nor(s, a0, a1, a2);
1521}
1522
1523static const TCGOutOpBinary outop_nor = {
1524    .base.static_constraint = C_O1_I2(r, r, r),
1525    .out_rrr = tgen_nor,
1526};
1527
1528static void tgen_or(TCGContext *s, TCGType type,
1529                    TCGReg a0, TCGReg a1, TCGReg a2)
1530{
1531    tcg_out_opc_or(s, a0, a1, a2);
1532}
1533
1534static void tgen_ori(TCGContext *s, TCGType type,
1535                     TCGReg a0, TCGReg a1, tcg_target_long a2)
1536{
1537    tcg_out_opc_ori(s, a0, a1, a2);
1538}
1539
1540static const TCGOutOpBinary outop_or = {
1541    .base.static_constraint = C_O1_I2(r, r, rU),
1542    .out_rrr = tgen_or,
1543    .out_rri = tgen_ori,
1544};
1545
1546static void tgen_orc(TCGContext *s, TCGType type,
1547                     TCGReg a0, TCGReg a1, TCGReg a2)
1548{
1549    tcg_out_opc_orn(s, a0, a1, a2);
1550}
1551
1552static const TCGOutOpBinary outop_orc = {
1553    .base.static_constraint = C_O1_I2(r, r, r),
1554    .out_rrr = tgen_orc,
1555};
1556
1557static void tgen_rems(TCGContext *s, TCGType type,
1558                      TCGReg a0, TCGReg a1, TCGReg a2)
1559{
1560    if (type == TCG_TYPE_I32) {
1561        tcg_out_opc_mod_w(s, a0, a1, a2);
1562    } else {
1563        tcg_out_opc_mod_d(s, a0, a1, a2);
1564    }
1565}
1566
1567static const TCGOutOpBinary outop_rems = {
1568    .base.static_constraint = C_O1_I2(r, r, r),
1569    .out_rrr = tgen_rems,
1570};
1571
1572static void tgen_remu(TCGContext *s, TCGType type,
1573                      TCGReg a0, TCGReg a1, TCGReg a2)
1574{
1575    if (type == TCG_TYPE_I32) {
1576        tcg_out_opc_mod_wu(s, a0, a1, a2);
1577    } else {
1578        tcg_out_opc_mod_du(s, a0, a1, a2);
1579    }
1580}
1581
1582static const TCGOutOpBinary outop_remu = {
1583    .base.static_constraint = C_O1_I2(r, r, r),
1584    .out_rrr = tgen_remu,
1585};
1586
1587static const TCGOutOpBinary outop_rotl = {
1588    .base.static_constraint = C_NotImplemented,
1589};
1590
1591static void tgen_rotr(TCGContext *s, TCGType type,
1592                      TCGReg a0, TCGReg a1, TCGReg a2)
1593{
1594    if (type == TCG_TYPE_I32) {
1595        tcg_out_opc_rotr_w(s, a0, a1, a2);
1596    } else {
1597        tcg_out_opc_rotr_d(s, a0, a1, a2);
1598    }
1599}
1600
1601static void tgen_rotri(TCGContext *s, TCGType type,
1602                       TCGReg a0, TCGReg a1, tcg_target_long a2)
1603{
1604    if (type == TCG_TYPE_I32) {
1605        tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1606    } else {
1607        tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1608    }
1609}
1610
1611static const TCGOutOpBinary outop_rotr = {
1612    .base.static_constraint = C_O1_I2(r, r, ri),
1613    .out_rrr = tgen_rotr,
1614    .out_rri = tgen_rotri,
1615};
1616
1617static void tgen_sar(TCGContext *s, TCGType type,
1618                     TCGReg a0, TCGReg a1, TCGReg a2)
1619{
1620    if (type == TCG_TYPE_I32) {
1621        tcg_out_opc_sra_w(s, a0, a1, a2);
1622    } else {
1623        tcg_out_opc_sra_d(s, a0, a1, a2);
1624    }
1625}
1626
1627static void tgen_sari(TCGContext *s, TCGType type,
1628                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1629{
1630    if (type == TCG_TYPE_I32) {
1631        tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1632    } else {
1633        tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1634    }
1635}
1636
1637static const TCGOutOpBinary outop_sar = {
1638    .base.static_constraint = C_O1_I2(r, r, ri),
1639    .out_rrr = tgen_sar,
1640    .out_rri = tgen_sari,
1641};
1642
1643static void tgen_shl(TCGContext *s, TCGType type,
1644                     TCGReg a0, TCGReg a1, TCGReg a2)
1645{
1646    if (type == TCG_TYPE_I32) {
1647        tcg_out_opc_sll_w(s, a0, a1, a2);
1648    } else {
1649        tcg_out_opc_sll_d(s, a0, a1, a2);
1650    }
1651}
1652
1653static void tgen_shli(TCGContext *s, TCGType type,
1654                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1655{
1656    if (type == TCG_TYPE_I32) {
1657        tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1658    } else {
1659        tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1660    }
1661}
1662
1663static const TCGOutOpBinary outop_shl = {
1664    .base.static_constraint = C_O1_I2(r, r, ri),
1665    .out_rrr = tgen_shl,
1666    .out_rri = tgen_shli,
1667};
1668
1669static void tgen_shr(TCGContext *s, TCGType type,
1670                     TCGReg a0, TCGReg a1, TCGReg a2)
1671{
1672    if (type == TCG_TYPE_I32) {
1673        tcg_out_opc_srl_w(s, a0, a1, a2);
1674    } else {
1675        tcg_out_opc_srl_d(s, a0, a1, a2);
1676    }
1677}
1678
1679static void tgen_shri(TCGContext *s, TCGType type,
1680                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1681{
1682    if (type == TCG_TYPE_I32) {
1683        tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1684    } else {
1685        tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1686    }
1687}
1688
1689static const TCGOutOpBinary outop_shr = {
1690    .base.static_constraint = C_O1_I2(r, r, ri),
1691    .out_rrr = tgen_shr,
1692    .out_rri = tgen_shri,
1693};
1694
1695static void tgen_sub(TCGContext *s, TCGType type,
1696                     TCGReg a0, TCGReg a1, TCGReg a2)
1697{
1698    if (type == TCG_TYPE_I32) {
1699        tcg_out_opc_sub_w(s, a0, a1, a2);
1700    } else {
1701        tcg_out_opc_sub_d(s, a0, a1, a2);
1702    }
1703}
1704
1705static const TCGOutOpSubtract outop_sub = {
1706    .base.static_constraint = C_O1_I2(r, r, r),
1707    .out_rrr = tgen_sub,
1708};
1709
1710static void tgen_xor(TCGContext *s, TCGType type,
1711                     TCGReg a0, TCGReg a1, TCGReg a2)
1712{
1713    tcg_out_opc_xor(s, a0, a1, a2);
1714}
1715
1716static void tgen_xori(TCGContext *s, TCGType type,
1717                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1718{
1719    tcg_out_opc_xori(s, a0, a1, a2);
1720}
1721
1722static const TCGOutOpBinary outop_xor = {
1723    .base.static_constraint = C_O1_I2(r, r, rU),
1724    .out_rrr = tgen_xor,
1725    .out_rri = tgen_xori,
1726};
1727
1728static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
1729{
1730    tgen_sub(s, type, a0, TCG_REG_ZERO, a1);
1731}
1732
1733static const TCGOutOpUnary outop_neg = {
1734    .base.static_constraint = C_O1_I1(r, r),
1735    .out_rr = tgen_neg,
1736};
1737
1738static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
1739{
1740    tgen_nor(s, type, a0, a1, TCG_REG_ZERO);
1741}
1742
1743static const TCGOutOpUnary outop_not = {
1744    .base.static_constraint = C_O1_I1(r, r),
1745    .out_rr = tgen_not,
1746};
1747
1748
1749static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1750                       const TCGArg args[TCG_MAX_OP_ARGS],
1751                       const int const_args[TCG_MAX_OP_ARGS])
1752{
1753    TCGArg a0 = args[0];
1754    TCGArg a1 = args[1];
1755    TCGArg a2 = args[2];
1756    TCGArg a3 = args[3];
1757    int c2 = const_args[2];
1758
1759    switch (opc) {
1760    case INDEX_op_mb:
1761        tcg_out_mb(s, a0);
1762        break;
1763
1764    case INDEX_op_goto_ptr:
1765        tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1766        break;
1767
1768    case INDEX_op_br:
1769        tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1770                      0);
1771        tcg_out_opc_b(s, 0);
1772        break;
1773
1774    case INDEX_op_brcond_i32:
1775    case INDEX_op_brcond_i64:
1776        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1777        break;
1778
1779    case INDEX_op_extrh_i64_i32:
1780        tcg_out_opc_srai_d(s, a0, a1, 32);
1781        break;
1782
1783    case INDEX_op_extract_i32:
1784        if (a2 == 0 && args[3] <= 12) {
1785            tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
1786        } else {
1787            tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1788        }
1789        break;
1790    case INDEX_op_extract_i64:
1791        if (a2 == 0 && args[3] <= 12) {
1792            tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
1793        } else {
1794            tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1795        }
1796        break;
1797
1798    case INDEX_op_sextract_i64:
1799        if (a2 + args[3] == 32) {
1800            if (a2 == 0) {
1801                tcg_out_ext32s(s, a0, a1);
1802            } else {
1803                tcg_out_opc_srai_w(s, a0, a1, a2);
1804            }
1805            break;
1806        }
1807        /* FALLTHRU */
1808    case INDEX_op_sextract_i32:
1809        if (a2 == 0 && args[3] == 8) {
1810            tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
1811        } else if (a2 == 0 && args[3] == 16) {
1812            tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
1813        } else {
1814            g_assert_not_reached();
1815        }
1816        break;
1817
1818    case INDEX_op_deposit_i32:
1819        tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1820        break;
1821    case INDEX_op_deposit_i64:
1822        tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1823        break;
1824
1825    case INDEX_op_bswap16_i32:
1826    case INDEX_op_bswap16_i64:
1827        tcg_out_opc_revb_2h(s, a0, a1);
1828        if (a2 & TCG_BSWAP_OS) {
1829            tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1830        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1831            tcg_out_ext16u(s, a0, a0);
1832        }
1833        break;
1834
1835    case INDEX_op_bswap32_i32:
1836        /* All 32-bit values are computed sign-extended in the register.  */
1837        a2 = TCG_BSWAP_OS;
1838        /* fallthrough */
1839    case INDEX_op_bswap32_i64:
1840        tcg_out_opc_revb_2w(s, a0, a1);
1841        if (a2 & TCG_BSWAP_OS) {
1842            tcg_out_ext32s(s, a0, a0);
1843        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1844            tcg_out_ext32u(s, a0, a0);
1845        }
1846        break;
1847
1848    case INDEX_op_bswap64_i64:
1849        tcg_out_opc_revb_d(s, a0, a1);
1850        break;
1851
1852    case INDEX_op_movcond_i32:
1853    case INDEX_op_movcond_i64:
1854        tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1855        break;
1856
1857    case INDEX_op_ld8s_i32:
1858    case INDEX_op_ld8s_i64:
1859        tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1860        break;
1861    case INDEX_op_ld8u_i32:
1862    case INDEX_op_ld8u_i64:
1863        tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1864        break;
1865    case INDEX_op_ld16s_i32:
1866    case INDEX_op_ld16s_i64:
1867        tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1868        break;
1869    case INDEX_op_ld16u_i32:
1870    case INDEX_op_ld16u_i64:
1871        tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1872        break;
1873    case INDEX_op_ld_i32:
1874    case INDEX_op_ld32s_i64:
1875        tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1876        break;
1877    case INDEX_op_ld32u_i64:
1878        tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1879        break;
1880    case INDEX_op_ld_i64:
1881        tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1882        break;
1883
1884    case INDEX_op_st8_i32:
1885    case INDEX_op_st8_i64:
1886        tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1887        break;
1888    case INDEX_op_st16_i32:
1889    case INDEX_op_st16_i64:
1890        tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1891        break;
1892    case INDEX_op_st_i32:
1893    case INDEX_op_st32_i64:
1894        tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1895        break;
1896    case INDEX_op_st_i64:
1897        tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1898        break;
1899
1900    case INDEX_op_qemu_ld_i32:
1901        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1902        break;
1903    case INDEX_op_qemu_ld_i64:
1904        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1905        break;
1906    case INDEX_op_qemu_ld_i128:
1907        tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
1908        break;
1909    case INDEX_op_qemu_st_i32:
1910        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1911        break;
1912    case INDEX_op_qemu_st_i64:
1913        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1914        break;
1915    case INDEX_op_qemu_st_i128:
1916        tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
1917        break;
1918
1919    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1920    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1921    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1922    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
1923    case INDEX_op_extu_i32_i64:
1924    case INDEX_op_extrl_i64_i32:
1925    default:
1926        g_assert_not_reached();
1927    }
1928}
1929
1930static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1931                            TCGReg rd, TCGReg rs)
1932{
1933    static const LoongArchInsn repl_insn[2][4] = {
1934        { OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H,
1935          OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D },
1936        { OPC_XVREPLGR2VR_B, OPC_XVREPLGR2VR_H,
1937          OPC_XVREPLGR2VR_W, OPC_XVREPLGR2VR_D },
1938    };
1939    bool lasx = type == TCG_TYPE_V256;
1940
1941    tcg_debug_assert(vece <= MO_64);
1942    tcg_out32(s, encode_vdj_insn(repl_insn[lasx][vece], rd, rs));
1943    return true;
1944}
1945
1946static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1947                             TCGReg r, TCGReg base, intptr_t offset)
1948{
1949    bool lasx = type == TCG_TYPE_V256;
1950
1951    /* Handle imm overflow and division (vldrepl.d imm is divided by 8). */
1952    if (offset < -0x800 || offset > 0x7ff ||
1953        (offset & ((1 << vece) - 1)) != 0) {
1954        tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
1955        base = TCG_REG_TMP0;
1956        offset = 0;
1957    }
1958    offset >>= vece;
1959
1960    switch (vece) {
1961    case MO_8:
1962        if (lasx) {
1963            tcg_out_opc_xvldrepl_b(s, r, base, offset);
1964        } else {
1965            tcg_out_opc_vldrepl_b(s, r, base, offset);
1966        }
1967        break;
1968    case MO_16:
1969        if (lasx) {
1970            tcg_out_opc_xvldrepl_h(s, r, base, offset);
1971        } else {
1972            tcg_out_opc_vldrepl_h(s, r, base, offset);
1973        }
1974        break;
1975    case MO_32:
1976        if (lasx) {
1977            tcg_out_opc_xvldrepl_w(s, r, base, offset);
1978        } else {
1979            tcg_out_opc_vldrepl_w(s, r, base, offset);
1980        }
1981        break;
1982    case MO_64:
1983        if (lasx) {
1984            tcg_out_opc_xvldrepl_d(s, r, base, offset);
1985        } else {
1986            tcg_out_opc_vldrepl_d(s, r, base, offset);
1987        }
1988        break;
1989    default:
1990        g_assert_not_reached();
1991    }
1992    return true;
1993}
1994
1995static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1996                             TCGReg rd, int64_t v64)
1997{
1998    /* Try vldi if imm can fit */
1999    int64_t value = sextract64(v64, 0, 8 << vece);
2000    if (-0x200 <= value && value <= 0x1FF) {
2001        uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
2002
2003        if (type == TCG_TYPE_V256) {
2004            tcg_out_opc_xvldi(s, rd, imm);
2005        } else {
2006            tcg_out_opc_vldi(s, rd, imm);
2007        }
2008        return;
2009    }
2010
2011    /* TODO: vldi patterns when imm 12 is set */
2012
2013    tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
2014    tcg_out_dup_vec(s, type, vece, rd, TCG_REG_TMP0);
2015}
2016
2017static void tcg_out_addsub_vec(TCGContext *s, bool lasx, unsigned vece,
2018                               TCGArg a0, TCGArg a1, TCGArg a2,
2019                               bool a2_is_const, bool is_add)
2020{
2021    static const LoongArchInsn add_vec_insn[2][4] = {
2022        { OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D },
2023        { OPC_XVADD_B, OPC_XVADD_H, OPC_XVADD_W, OPC_XVADD_D },
2024    };
2025    static const LoongArchInsn add_vec_imm_insn[2][4] = {
2026        { OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU },
2027        { OPC_XVADDI_BU, OPC_XVADDI_HU, OPC_XVADDI_WU, OPC_XVADDI_DU },
2028    };
2029    static const LoongArchInsn sub_vec_insn[2][4] = {
2030        { OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D },
2031        { OPC_XVSUB_B, OPC_XVSUB_H, OPC_XVSUB_W, OPC_XVSUB_D },
2032    };
2033    static const LoongArchInsn sub_vec_imm_insn[2][4] = {
2034        { OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU },
2035        { OPC_XVSUBI_BU, OPC_XVSUBI_HU, OPC_XVSUBI_WU, OPC_XVSUBI_DU },
2036    };
2037    LoongArchInsn insn;
2038
2039    if (a2_is_const) {
2040        int64_t value = sextract64(a2, 0, 8 << vece);
2041
2042        if (!is_add) {
2043            value = -value;
2044        }
2045        if (value < 0) {
2046            insn = sub_vec_imm_insn[lasx][vece];
2047            value = -value;
2048        } else {
2049            insn = add_vec_imm_insn[lasx][vece];
2050        }
2051
2052        /* Constraint TCG_CT_CONST_VADD ensures validity. */
2053        tcg_debug_assert(0 <= value && value <= 0x1f);
2054
2055        tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
2056        return;
2057    }
2058
2059    if (is_add) {
2060        insn = add_vec_insn[lasx][vece];
2061    } else {
2062        insn = sub_vec_insn[lasx][vece];
2063    }
2064    tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
2065}
2066
2067static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2068                           unsigned vecl, unsigned vece,
2069                           const TCGArg args[TCG_MAX_OP_ARGS],
2070                           const int const_args[TCG_MAX_OP_ARGS])
2071{
2072    TCGType type = vecl + TCG_TYPE_V64;
2073    bool lasx = type == TCG_TYPE_V256;
2074    TCGArg a0, a1, a2, a3;
2075    LoongArchInsn insn;
2076
2077    static const LoongArchInsn cmp_vec_insn[16][2][4] = {
2078        [TCG_COND_EQ] = {
2079            { OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D },
2080            { OPC_XVSEQ_B, OPC_XVSEQ_H, OPC_XVSEQ_W, OPC_XVSEQ_D },
2081        },
2082        [TCG_COND_LE] = {
2083            { OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D },
2084            { OPC_XVSLE_B, OPC_XVSLE_H, OPC_XVSLE_W, OPC_XVSLE_D },
2085        },
2086        [TCG_COND_LEU] = {
2087            { OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU },
2088            { OPC_XVSLE_BU, OPC_XVSLE_HU, OPC_XVSLE_WU, OPC_XVSLE_DU },
2089        },
2090        [TCG_COND_LT] = {
2091            { OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D },
2092            { OPC_XVSLT_B, OPC_XVSLT_H, OPC_XVSLT_W, OPC_XVSLT_D },
2093        },
2094        [TCG_COND_LTU] = {
2095            { OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU },
2096            { OPC_XVSLT_BU, OPC_XVSLT_HU, OPC_XVSLT_WU, OPC_XVSLT_DU },
2097        }
2098    };
2099    static const LoongArchInsn cmp_vec_imm_insn[16][2][4] = {
2100        [TCG_COND_EQ] = {
2101            { OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D },
2102            { OPC_XVSEQI_B, OPC_XVSEQI_H, OPC_XVSEQI_W, OPC_XVSEQI_D },
2103        },
2104        [TCG_COND_LE] = {
2105            { OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D },
2106            { OPC_XVSLEI_B, OPC_XVSLEI_H, OPC_XVSLEI_W, OPC_XVSLEI_D },
2107        },
2108        [TCG_COND_LEU] = {
2109            { OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU },
2110            { OPC_XVSLEI_BU, OPC_XVSLEI_HU, OPC_XVSLEI_WU, OPC_XVSLEI_DU },
2111        },
2112        [TCG_COND_LT] = {
2113            { OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D },
2114            { OPC_XVSLTI_B, OPC_XVSLTI_H, OPC_XVSLTI_W, OPC_XVSLTI_D },
2115        },
2116        [TCG_COND_LTU] = {
2117            { OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU },
2118            { OPC_XVSLTI_BU, OPC_XVSLTI_HU, OPC_XVSLTI_WU, OPC_XVSLTI_DU },
2119        }
2120    };
2121    static const LoongArchInsn neg_vec_insn[2][4] = {
2122        { OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D },
2123        { OPC_XVNEG_B, OPC_XVNEG_H, OPC_XVNEG_W, OPC_XVNEG_D },
2124    };
2125    static const LoongArchInsn mul_vec_insn[2][4] = {
2126        { OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D },
2127        { OPC_XVMUL_B, OPC_XVMUL_H, OPC_XVMUL_W, OPC_XVMUL_D },
2128    };
2129    static const LoongArchInsn smin_vec_insn[2][4] = {
2130        { OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D },
2131        { OPC_XVMIN_B, OPC_XVMIN_H, OPC_XVMIN_W, OPC_XVMIN_D },
2132    };
2133    static const LoongArchInsn umin_vec_insn[2][4] = {
2134        { OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU },
2135        { OPC_XVMIN_BU, OPC_XVMIN_HU, OPC_XVMIN_WU, OPC_XVMIN_DU },
2136    };
2137    static const LoongArchInsn smax_vec_insn[2][4] = {
2138        { OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D },
2139        { OPC_XVMAX_B, OPC_XVMAX_H, OPC_XVMAX_W, OPC_XVMAX_D },
2140    };
2141    static const LoongArchInsn umax_vec_insn[2][4] = {
2142        { OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU },
2143        { OPC_XVMAX_BU, OPC_XVMAX_HU, OPC_XVMAX_WU, OPC_XVMAX_DU },
2144    };
2145    static const LoongArchInsn ssadd_vec_insn[2][4] = {
2146        { OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D },
2147        { OPC_XVSADD_B, OPC_XVSADD_H, OPC_XVSADD_W, OPC_XVSADD_D },
2148    };
2149    static const LoongArchInsn usadd_vec_insn[2][4] = {
2150        { OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU },
2151        { OPC_XVSADD_BU, OPC_XVSADD_HU, OPC_XVSADD_WU, OPC_XVSADD_DU },
2152    };
2153    static const LoongArchInsn sssub_vec_insn[2][4] = {
2154        { OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D },
2155        { OPC_XVSSUB_B, OPC_XVSSUB_H, OPC_XVSSUB_W, OPC_XVSSUB_D },
2156    };
2157    static const LoongArchInsn ussub_vec_insn[2][4] = {
2158        { OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU },
2159        { OPC_XVSSUB_BU, OPC_XVSSUB_HU, OPC_XVSSUB_WU, OPC_XVSSUB_DU },
2160    };
2161    static const LoongArchInsn shlv_vec_insn[2][4] = {
2162        { OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D },
2163        { OPC_XVSLL_B, OPC_XVSLL_H, OPC_XVSLL_W, OPC_XVSLL_D },
2164    };
2165    static const LoongArchInsn shrv_vec_insn[2][4] = {
2166        { OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D },
2167        { OPC_XVSRL_B, OPC_XVSRL_H, OPC_XVSRL_W, OPC_XVSRL_D },
2168    };
2169    static const LoongArchInsn sarv_vec_insn[2][4] = {
2170        { OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D },
2171        { OPC_XVSRA_B, OPC_XVSRA_H, OPC_XVSRA_W, OPC_XVSRA_D },
2172    };
2173    static const LoongArchInsn shli_vec_insn[2][4] = {
2174        { OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D },
2175        { OPC_XVSLLI_B, OPC_XVSLLI_H, OPC_XVSLLI_W, OPC_XVSLLI_D },
2176    };
2177    static const LoongArchInsn shri_vec_insn[2][4] = {
2178        { OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D },
2179        { OPC_XVSRLI_B, OPC_XVSRLI_H, OPC_XVSRLI_W, OPC_XVSRLI_D },
2180    };
2181    static const LoongArchInsn sari_vec_insn[2][4] = {
2182        { OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D },
2183        { OPC_XVSRAI_B, OPC_XVSRAI_H, OPC_XVSRAI_W, OPC_XVSRAI_D },
2184    };
2185    static const LoongArchInsn rotrv_vec_insn[2][4] = {
2186        { OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D },
2187        { OPC_XVROTR_B, OPC_XVROTR_H, OPC_XVROTR_W, OPC_XVROTR_D },
2188    };
2189    static const LoongArchInsn rotri_vec_insn[2][4] = {
2190        { OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D },
2191        { OPC_XVROTRI_B, OPC_XVROTRI_H, OPC_XVROTRI_W, OPC_XVROTRI_D },
2192    };
2193
2194    a0 = args[0];
2195    a1 = args[1];
2196    a2 = args[2];
2197    a3 = args[3];
2198
2199    switch (opc) {
2200    case INDEX_op_st_vec:
2201        tcg_out_st(s, type, a0, a1, a2);
2202        break;
2203    case INDEX_op_ld_vec:
2204        tcg_out_ld(s, type, a0, a1, a2);
2205        break;
2206    case INDEX_op_and_vec:
2207        insn = lasx ? OPC_XVAND_V : OPC_VAND_V;
2208        goto vdvjvk;
2209    case INDEX_op_andc_vec:
2210        /*
2211         * vandn vd, vj, vk: vd = vk & ~vj
2212         * andc_vec vd, vj, vk: vd = vj & ~vk
2213         * vj and vk are swapped
2214         */
2215        a1 = a2;
2216        a2 = args[1];
2217        insn = lasx ? OPC_XVANDN_V : OPC_VANDN_V;
2218        goto vdvjvk;
2219    case INDEX_op_or_vec:
2220        insn = lasx ? OPC_XVOR_V : OPC_VOR_V;
2221        goto vdvjvk;
2222    case INDEX_op_orc_vec:
2223        insn = lasx ? OPC_XVORN_V : OPC_VORN_V;
2224        goto vdvjvk;
2225    case INDEX_op_xor_vec:
2226        insn = lasx ? OPC_XVXOR_V : OPC_VXOR_V;
2227        goto vdvjvk;
2228    case INDEX_op_not_vec:
2229        a2 = a1;
2230        /* fall through */
2231    case INDEX_op_nor_vec:
2232        insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V;
2233        goto vdvjvk;
2234    case INDEX_op_cmp_vec:
2235        {
2236            TCGCond cond = args[3];
2237
2238            if (const_args[2]) {
2239                /*
2240                 * cmp_vec dest, src, value
2241                 * Try vseqi/vslei/vslti
2242                 */
2243                int64_t value = sextract64(a2, 0, 8 << vece);
2244                switch (cond) {
2245                case TCG_COND_EQ:
2246                case TCG_COND_LE:
2247                case TCG_COND_LT:
2248                    insn = cmp_vec_imm_insn[cond][lasx][vece];
2249                    tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
2250                    break;
2251                case TCG_COND_LEU:
2252                case TCG_COND_LTU:
2253                    insn = cmp_vec_imm_insn[cond][lasx][vece];
2254                    tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
2255                    break;
2256                default:
2257                    g_assert_not_reached();
2258                }
2259                break;
2260            }
2261
2262            insn = cmp_vec_insn[cond][lasx][vece];
2263            if (insn == 0) {
2264                TCGArg t;
2265                t = a1, a1 = a2, a2 = t;
2266                cond = tcg_swap_cond(cond);
2267                insn = cmp_vec_insn[cond][lasx][vece];
2268                tcg_debug_assert(insn != 0);
2269            }
2270        }
2271        goto vdvjvk;
2272    case INDEX_op_add_vec:
2273        tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], true);
2274        break;
2275    case INDEX_op_sub_vec:
2276        tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], false);
2277        break;
2278    case INDEX_op_neg_vec:
2279        tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], a0, a1));
2280        break;
2281    case INDEX_op_mul_vec:
2282        insn = mul_vec_insn[lasx][vece];
2283        goto vdvjvk;
2284    case INDEX_op_smin_vec:
2285        insn = smin_vec_insn[lasx][vece];
2286        goto vdvjvk;
2287    case INDEX_op_smax_vec:
2288        insn = smax_vec_insn[lasx][vece];
2289        goto vdvjvk;
2290    case INDEX_op_umin_vec:
2291        insn = umin_vec_insn[lasx][vece];
2292        goto vdvjvk;
2293    case INDEX_op_umax_vec:
2294        insn = umax_vec_insn[lasx][vece];
2295        goto vdvjvk;
2296    case INDEX_op_ssadd_vec:
2297        insn = ssadd_vec_insn[lasx][vece];
2298        goto vdvjvk;
2299    case INDEX_op_usadd_vec:
2300        insn = usadd_vec_insn[lasx][vece];
2301        goto vdvjvk;
2302    case INDEX_op_sssub_vec:
2303        insn = sssub_vec_insn[lasx][vece];
2304        goto vdvjvk;
2305    case INDEX_op_ussub_vec:
2306        insn = ussub_vec_insn[lasx][vece];
2307        goto vdvjvk;
2308    case INDEX_op_shlv_vec:
2309        insn = shlv_vec_insn[lasx][vece];
2310        goto vdvjvk;
2311    case INDEX_op_shrv_vec:
2312        insn = shrv_vec_insn[lasx][vece];
2313        goto vdvjvk;
2314    case INDEX_op_sarv_vec:
2315        insn = sarv_vec_insn[lasx][vece];
2316        goto vdvjvk;
2317    case INDEX_op_rotlv_vec:
2318        /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
2319        tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece],
2320                                      TCG_VEC_TMP0, a2));
2321        a2 = TCG_VEC_TMP0;
2322        /* fall through */
2323    case INDEX_op_rotrv_vec:
2324        insn = rotrv_vec_insn[lasx][vece];
2325        goto vdvjvk;
2326    case INDEX_op_shli_vec:
2327        insn = shli_vec_insn[lasx][vece];
2328        goto vdvjukN;
2329    case INDEX_op_shri_vec:
2330        insn = shri_vec_insn[lasx][vece];
2331        goto vdvjukN;
2332    case INDEX_op_sari_vec:
2333        insn = sari_vec_insn[lasx][vece];
2334        goto vdvjukN;
2335    case INDEX_op_rotli_vec:
2336        /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
2337        a2 = extract32(-a2, 0, 3 + vece);
2338        insn = rotri_vec_insn[lasx][vece];
2339        goto vdvjukN;
2340    case INDEX_op_bitsel_vec:
2341        /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
2342        if (lasx) {
2343            tcg_out_opc_xvbitsel_v(s, a0, a3, a2, a1);
2344        } else {
2345            tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
2346        }
2347        break;
2348    case INDEX_op_dupm_vec:
2349        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2350        break;
2351    default:
2352        g_assert_not_reached();
2353    vdvjvk:
2354        tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
2355        break;
2356    vdvjukN:
2357        switch (vece) {
2358        case MO_8:
2359            tcg_out32(s, encode_vdvjuk3_insn(insn, a0, a1, a2));
2360            break;
2361        case MO_16:
2362            tcg_out32(s, encode_vdvjuk4_insn(insn, a0, a1, a2));
2363            break;
2364        case MO_32:
2365            tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, a2));
2366            break;
2367        case MO_64:
2368            tcg_out32(s, encode_vdvjuk6_insn(insn, a0, a1, a2));
2369            break;
2370        default:
2371            g_assert_not_reached();
2372        }
2373        break;
2374    }
2375}
2376
2377int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2378{
2379    switch (opc) {
2380    case INDEX_op_ld_vec:
2381    case INDEX_op_st_vec:
2382    case INDEX_op_dup_vec:
2383    case INDEX_op_dupm_vec:
2384    case INDEX_op_cmp_vec:
2385    case INDEX_op_add_vec:
2386    case INDEX_op_sub_vec:
2387    case INDEX_op_and_vec:
2388    case INDEX_op_andc_vec:
2389    case INDEX_op_or_vec:
2390    case INDEX_op_orc_vec:
2391    case INDEX_op_xor_vec:
2392    case INDEX_op_nor_vec:
2393    case INDEX_op_not_vec:
2394    case INDEX_op_neg_vec:
2395    case INDEX_op_mul_vec:
2396    case INDEX_op_smin_vec:
2397    case INDEX_op_smax_vec:
2398    case INDEX_op_umin_vec:
2399    case INDEX_op_umax_vec:
2400    case INDEX_op_ssadd_vec:
2401    case INDEX_op_usadd_vec:
2402    case INDEX_op_sssub_vec:
2403    case INDEX_op_ussub_vec:
2404    case INDEX_op_shlv_vec:
2405    case INDEX_op_shrv_vec:
2406    case INDEX_op_sarv_vec:
2407    case INDEX_op_bitsel_vec:
2408        return 1;
2409    default:
2410        return 0;
2411    }
2412}
2413
2414void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2415                       TCGArg a0, ...)
2416{
2417    g_assert_not_reached();
2418}
2419
2420static TCGConstraintSetIndex
2421tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
2422{
2423    switch (op) {
2424    case INDEX_op_goto_ptr:
2425        return C_O0_I1(r);
2426
2427    case INDEX_op_st8_i32:
2428    case INDEX_op_st8_i64:
2429    case INDEX_op_st16_i32:
2430    case INDEX_op_st16_i64:
2431    case INDEX_op_st32_i64:
2432    case INDEX_op_st_i32:
2433    case INDEX_op_st_i64:
2434    case INDEX_op_qemu_st_i32:
2435    case INDEX_op_qemu_st_i64:
2436        return C_O0_I2(rz, r);
2437
2438    case INDEX_op_qemu_ld_i128:
2439        return C_N2_I1(r, r, r);
2440
2441    case INDEX_op_qemu_st_i128:
2442        return C_O0_I3(r, r, r);
2443
2444    case INDEX_op_brcond_i32:
2445    case INDEX_op_brcond_i64:
2446        return C_O0_I2(rz, rz);
2447
2448    case INDEX_op_extu_i32_i64:
2449    case INDEX_op_extrl_i64_i32:
2450    case INDEX_op_extrh_i64_i32:
2451    case INDEX_op_ext_i32_i64:
2452    case INDEX_op_extract_i32:
2453    case INDEX_op_extract_i64:
2454    case INDEX_op_sextract_i32:
2455    case INDEX_op_sextract_i64:
2456    case INDEX_op_bswap16_i32:
2457    case INDEX_op_bswap16_i64:
2458    case INDEX_op_bswap32_i32:
2459    case INDEX_op_bswap32_i64:
2460    case INDEX_op_bswap64_i64:
2461    case INDEX_op_ld8s_i32:
2462    case INDEX_op_ld8s_i64:
2463    case INDEX_op_ld8u_i32:
2464    case INDEX_op_ld8u_i64:
2465    case INDEX_op_ld16s_i32:
2466    case INDEX_op_ld16s_i64:
2467    case INDEX_op_ld16u_i32:
2468    case INDEX_op_ld16u_i64:
2469    case INDEX_op_ld32s_i64:
2470    case INDEX_op_ld32u_i64:
2471    case INDEX_op_ld_i32:
2472    case INDEX_op_ld_i64:
2473    case INDEX_op_qemu_ld_i32:
2474    case INDEX_op_qemu_ld_i64:
2475        return C_O1_I1(r, r);
2476
2477    case INDEX_op_deposit_i32:
2478    case INDEX_op_deposit_i64:
2479        /* Must deposit into the same register as input */
2480        return C_O1_I2(r, 0, rz);
2481
2482    case INDEX_op_movcond_i32:
2483    case INDEX_op_movcond_i64:
2484        return C_O1_I4(r, rz, rJ, rz, rz);
2485
2486    case INDEX_op_ld_vec:
2487    case INDEX_op_dupm_vec:
2488    case INDEX_op_dup_vec:
2489        return C_O1_I1(w, r);
2490
2491    case INDEX_op_st_vec:
2492        return C_O0_I2(w, r);
2493
2494    case INDEX_op_cmp_vec:
2495        return C_O1_I2(w, w, wM);
2496
2497    case INDEX_op_add_vec:
2498    case INDEX_op_sub_vec:
2499        return C_O1_I2(w, w, wA);
2500
2501    case INDEX_op_and_vec:
2502    case INDEX_op_andc_vec:
2503    case INDEX_op_or_vec:
2504    case INDEX_op_orc_vec:
2505    case INDEX_op_xor_vec:
2506    case INDEX_op_nor_vec:
2507    case INDEX_op_mul_vec:
2508    case INDEX_op_smin_vec:
2509    case INDEX_op_smax_vec:
2510    case INDEX_op_umin_vec:
2511    case INDEX_op_umax_vec:
2512    case INDEX_op_ssadd_vec:
2513    case INDEX_op_usadd_vec:
2514    case INDEX_op_sssub_vec:
2515    case INDEX_op_ussub_vec:
2516    case INDEX_op_shlv_vec:
2517    case INDEX_op_shrv_vec:
2518    case INDEX_op_sarv_vec:
2519    case INDEX_op_rotrv_vec:
2520    case INDEX_op_rotlv_vec:
2521        return C_O1_I2(w, w, w);
2522
2523    case INDEX_op_not_vec:
2524    case INDEX_op_neg_vec:
2525    case INDEX_op_shli_vec:
2526    case INDEX_op_shri_vec:
2527    case INDEX_op_sari_vec:
2528    case INDEX_op_rotli_vec:
2529        return C_O1_I1(w, w);
2530
2531    case INDEX_op_bitsel_vec:
2532        return C_O1_I3(w, w, w, w);
2533
2534    default:
2535        return C_NotImplemented;
2536    }
2537}
2538
2539static const int tcg_target_callee_save_regs[] = {
2540    TCG_REG_S0,     /* used for the global env (TCG_AREG0) */
2541    TCG_REG_S1,
2542    TCG_REG_S2,
2543    TCG_REG_S3,
2544    TCG_REG_S4,
2545    TCG_REG_S5,
2546    TCG_REG_S6,
2547    TCG_REG_S7,
2548    TCG_REG_S8,
2549    TCG_REG_S9,
2550    TCG_REG_RA,     /* should be last for ABI compliance */
2551};
2552
2553/* Stack frame parameters.  */
2554#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
2555#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2556#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2557#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2558                     + TCG_TARGET_STACK_ALIGN - 1) \
2559                    & -TCG_TARGET_STACK_ALIGN)
2560#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2561
2562/* We're expecting to be able to use an immediate for frame allocation.  */
2563QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2564
2565/* Generate global QEMU prologue and epilogue code */
2566static void tcg_target_qemu_prologue(TCGContext *s)
2567{
2568    int i;
2569
2570    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2571
2572    /* TB prologue */
2573    tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2574    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2575        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2576                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2577    }
2578
2579    if (!tcg_use_softmmu && guest_base) {
2580        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2581        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2582    }
2583
2584    /* Call generated code */
2585    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2586    tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2587
2588    /* Return path for goto_ptr. Set return value to 0 */
2589    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2590    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2591
2592    /* TB epilogue */
2593    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2594    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2595        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2596                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2597    }
2598
2599    tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2600    tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
2601}
2602
2603static void tcg_out_tb_start(TCGContext *s)
2604{
2605    /* nothing to do */
2606}
2607
2608static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2609{
2610    for (int i = 0; i < count; ++i) {
2611        /* Canonical nop is andi r0,r0,0 */
2612        p[i] = OPC_ANDI;
2613    }
2614}
2615
2616static void tcg_target_init(TCGContext *s)
2617{
2618    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2619
2620    /* Server and desktop class cpus have UAL; embedded cpus do not. */
2621    if (!(hwcap & HWCAP_LOONGARCH_UAL)) {
2622        error_report("TCG: unaligned access support required; exiting");
2623        exit(EXIT_FAILURE);
2624    }
2625
2626    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2627    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
2628
2629    tcg_target_call_clobber_regs = ALL_GENERAL_REGS | ALL_VECTOR_REGS;
2630    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2631    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2632    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2633    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2634    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2635    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2636    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2637    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2638    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2639    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2640
2641    if (cpuinfo & CPUINFO_LSX) {
2642        tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2643        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2644        if (cpuinfo & CPUINFO_LASX) {
2645            tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
2646        }
2647        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
2648        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
2649        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
2650        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
2651        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
2652        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
2653        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
2654        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
2655    }
2656
2657    s->reserved_regs = 0;
2658    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2659    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2660    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2661    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2662    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2663    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2664    tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
2665    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
2666}
2667
2668typedef struct {
2669    DebugFrameHeader h;
2670    uint8_t fde_def_cfa[4];
2671    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2672} DebugFrame;
2673
2674#define ELF_HOST_MACHINE EM_LOONGARCH
2675
2676static const DebugFrame debug_frame = {
2677    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2678    .h.cie.id = -1,
2679    .h.cie.version = 1,
2680    .h.cie.code_align = 1,
2681    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2682    .h.cie.return_column = TCG_REG_RA,
2683
2684    /* Total FDE size does not include the "len" member.  */
2685    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2686
2687    .fde_def_cfa = {
2688        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ...  */
2689        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
2690        (FRAME_SIZE >> 7)
2691    },
2692    .fde_reg_ofs = {
2693        0x80 + 23, 11,                  /* DW_CFA_offset, s0, -88 */
2694        0x80 + 24, 10,                  /* DW_CFA_offset, s1, -80 */
2695        0x80 + 25, 9,                   /* DW_CFA_offset, s2, -72 */
2696        0x80 + 26, 8,                   /* DW_CFA_offset, s3, -64 */
2697        0x80 + 27, 7,                   /* DW_CFA_offset, s4, -56 */
2698        0x80 + 28, 6,                   /* DW_CFA_offset, s5, -48 */
2699        0x80 + 29, 5,                   /* DW_CFA_offset, s6, -40 */
2700        0x80 + 30, 4,                   /* DW_CFA_offset, s7, -32 */
2701        0x80 + 31, 3,                   /* DW_CFA_offset, s8, -24 */
2702        0x80 + 22, 2,                   /* DW_CFA_offset, s9, -16 */
2703        0x80 + 1 , 1,                   /* DW_CFA_offset, ra, -8 */
2704    }
2705};
2706
2707void tcg_register_jit(const void *buf, size_t buf_size)
2708{
2709    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2710}
2711