xref: /openbmc/qemu/tcg/loongarch64/tcg-target.c.inc (revision 003d35ad6c612d13ebf0a78f828b0c3ee4f44e3d)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
5 *
6 * Based on tcg/riscv/tcg-target.c.inc
7 *
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * THE SOFTWARE.
30 */
31
32#include <asm/hwcap.h>
33
34/* used for function call generation */
35#define TCG_REG_CALL_STACK              TCG_REG_SP
36#define TCG_TARGET_STACK_ALIGN          16
37#define TCG_TARGET_CALL_STACK_OFFSET    0
38#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_NORMAL
39#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_NORMAL
40#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_NORMAL
41#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
42
43#ifdef CONFIG_DEBUG_TCG
44static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
45    "zero",
46    "ra",
47    "tp",
48    "sp",
49    "a0",
50    "a1",
51    "a2",
52    "a3",
53    "a4",
54    "a5",
55    "a6",
56    "a7",
57    "t0",
58    "t1",
59    "t2",
60    "t3",
61    "t4",
62    "t5",
63    "t6",
64    "t7",
65    "t8",
66    "r21", /* reserved in the LP64* ABI, hence no ABI name */
67    "s9",
68    "s0",
69    "s1",
70    "s2",
71    "s3",
72    "s4",
73    "s5",
74    "s6",
75    "s7",
76    "s8",
77    "vr0",
78    "vr1",
79    "vr2",
80    "vr3",
81    "vr4",
82    "vr5",
83    "vr6",
84    "vr7",
85    "vr8",
86    "vr9",
87    "vr10",
88    "vr11",
89    "vr12",
90    "vr13",
91    "vr14",
92    "vr15",
93    "vr16",
94    "vr17",
95    "vr18",
96    "vr19",
97    "vr20",
98    "vr21",
99    "vr22",
100    "vr23",
101    "vr24",
102    "vr25",
103    "vr26",
104    "vr27",
105    "vr28",
106    "vr29",
107    "vr30",
108    "vr31",
109};
110#endif
111
112static const int tcg_target_reg_alloc_order[] = {
113    /* Registers preserved across calls */
114    /* TCG_REG_S0 reserved for TCG_AREG0 */
115    TCG_REG_S1,
116    TCG_REG_S2,
117    TCG_REG_S3,
118    TCG_REG_S4,
119    TCG_REG_S5,
120    TCG_REG_S6,
121    TCG_REG_S7,
122    TCG_REG_S8,
123    TCG_REG_S9,
124
125    /* Registers (potentially) clobbered across calls */
126    TCG_REG_T0,
127    TCG_REG_T1,
128    TCG_REG_T2,
129    TCG_REG_T3,
130    TCG_REG_T4,
131    TCG_REG_T5,
132    TCG_REG_T6,
133    TCG_REG_T7,
134    TCG_REG_T8,
135
136    /* Argument registers, opposite order of allocation.  */
137    TCG_REG_A7,
138    TCG_REG_A6,
139    TCG_REG_A5,
140    TCG_REG_A4,
141    TCG_REG_A3,
142    TCG_REG_A2,
143    TCG_REG_A1,
144    TCG_REG_A0,
145
146    /* Vector registers */
147    TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
148    TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
149    TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
150    TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
151    TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
152    TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
153    /* V24 - V31 are caller-saved, and skipped.  */
154};
155
156static const int tcg_target_call_iarg_regs[] = {
157    TCG_REG_A0,
158    TCG_REG_A1,
159    TCG_REG_A2,
160    TCG_REG_A3,
161    TCG_REG_A4,
162    TCG_REG_A5,
163    TCG_REG_A6,
164    TCG_REG_A7,
165};
166
167static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
168{
169    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
170    tcg_debug_assert(slot >= 0 && slot <= 1);
171    return TCG_REG_A0 + slot;
172}
173
174#define TCG_GUEST_BASE_REG TCG_REG_S1
175
176#define TCG_CT_CONST_S12   0x100
177#define TCG_CT_CONST_S32   0x200
178#define TCG_CT_CONST_U12   0x400
179#define TCG_CT_CONST_C12   0x800
180#define TCG_CT_CONST_WSZ   0x1000
181#define TCG_CT_CONST_VCMP  0x2000
182#define TCG_CT_CONST_VADD  0x4000
183
184#define ALL_GENERAL_REGS   MAKE_64BIT_MASK(0, 32)
185#define ALL_VECTOR_REGS    MAKE_64BIT_MASK(32, 32)
186
187static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
188{
189    return sextract64(val, pos, len);
190}
191
192/* test if a constant matches the constraint */
193static bool tcg_target_const_match(int64_t val, int ct,
194                                   TCGType type, TCGCond cond, int vece)
195{
196    if (ct & TCG_CT_CONST) {
197        return true;
198    }
199    if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
200        return true;
201    }
202    if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
203        return true;
204    }
205    if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
206        return true;
207    }
208    if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
209        return true;
210    }
211    if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
212        return true;
213    }
214    int64_t vec_val = sextract64(val, 0, 8 << vece);
215    if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
216        return true;
217    }
218    if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
219        return true;
220    }
221    return false;
222}
223
224/*
225 * Relocations
226 */
227
228/*
229 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
230 * complicated; a whopping stack machine is needed to stuff the fields, at
231 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
232 * needed.
233 *
234 * Hence, define our own simpler relocation types. Numbers are chosen as to
235 * not collide with potential future additions to the true ELF relocation
236 * type enum.
237 */
238
239/* Field Sk16, shifted right by 2; suitable for conditional jumps */
240#define R_LOONGARCH_BR_SK16     256
241/* Field Sd10k16, shifted right by 2; suitable for B and BL */
242#define R_LOONGARCH_BR_SD10K16  257
243
244static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
245{
246    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
247    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
248
249    tcg_debug_assert((offset & 3) == 0);
250    offset >>= 2;
251    if (offset == sextreg(offset, 0, 16)) {
252        *src_rw = deposit64(*src_rw, 10, 16, offset);
253        return true;
254    }
255
256    return false;
257}
258
259static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
260                             const tcg_insn_unit *target)
261{
262    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
263    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
264
265    tcg_debug_assert((offset & 3) == 0);
266    offset >>= 2;
267    if (offset == sextreg(offset, 0, 26)) {
268        *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
269        *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
270        return true;
271    }
272
273    return false;
274}
275
276static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
277                        intptr_t value, intptr_t addend)
278{
279    tcg_debug_assert(addend == 0);
280    switch (type) {
281    case R_LOONGARCH_BR_SK16:
282        return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
283    case R_LOONGARCH_BR_SD10K16:
284        return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
285    default:
286        g_assert_not_reached();
287    }
288}
289
290#include "tcg-insn-defs.c.inc"
291
292/*
293 * TCG intrinsics
294 */
295
296static void tcg_out_mb(TCGContext *s, TCGArg a0)
297{
298    /* Baseline LoongArch only has the full barrier, unfortunately.  */
299    tcg_out_opc_dbar(s, 0);
300}
301
302static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
303{
304    if (ret == arg) {
305        return true;
306    }
307    switch (type) {
308    case TCG_TYPE_I32:
309    case TCG_TYPE_I64:
310        if (ret < TCG_REG_V0) {
311            if (arg < TCG_REG_V0) {
312                /*
313                 * Conventional register-register move used in LoongArch is
314                 * `or dst, src, zero`.
315                 */
316                tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
317            } else {
318                tcg_out_opc_movfr2gr_d(s, ret, arg);
319            }
320        } else {
321            if (arg < TCG_REG_V0) {
322                tcg_out_opc_movgr2fr_d(s, ret, arg);
323            } else {
324                tcg_out_opc_fmov_d(s, ret, arg);
325            }
326        }
327        break;
328    case TCG_TYPE_V64:
329    case TCG_TYPE_V128:
330        tcg_out_opc_vori_b(s, ret, arg, 0);
331        break;
332    case TCG_TYPE_V256:
333        tcg_out_opc_xvori_b(s, ret, arg, 0);
334        break;
335    default:
336        g_assert_not_reached();
337    }
338    return true;
339}
340
341/* Loads a 32-bit immediate into rd, sign-extended.  */
342static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
343{
344    tcg_target_long lo = sextreg(val, 0, 12);
345    tcg_target_long hi12 = sextreg(val, 12, 20);
346
347    /* Single-instruction cases.  */
348    if (hi12 == 0) {
349        /* val fits in uimm12: ori rd, zero, val */
350        tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
351        return;
352    }
353    if (hi12 == sextreg(lo, 12, 20)) {
354        /* val fits in simm12: addi.w rd, zero, val */
355        tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
356        return;
357    }
358
359    /* High bits must be set; load with lu12i.w + optional ori.  */
360    tcg_out_opc_lu12i_w(s, rd, hi12);
361    if (lo != 0) {
362        tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
363    }
364}
365
366static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
367                         tcg_target_long val)
368{
369    /*
370     * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
371     * with dedicated instructions for filling the respective bitfields
372     * below:
373     *
374     *        6                   5                   4               3
375     *  3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
376     * +-----------------------+---------------------------------------+...
377     * |          hi52         |                  hi32                 |
378     * +-----------------------+---------------------------------------+...
379     *       3                   2                   1
380     *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
381     * ...+-------------------------------------+-------------------------+
382     *    |                 hi12                |            lo           |
383     * ...+-------------------------------------+-------------------------+
384     *
385     * Check if val belong to one of the several fast cases, before falling
386     * back to the slow path.
387     */
388
389    intptr_t src_rx, pc_offset;
390    tcg_target_long hi12, hi32, hi52;
391
392    /* Value fits in signed i32.  */
393    if (type == TCG_TYPE_I32 || val == (int32_t)val) {
394        tcg_out_movi_i32(s, rd, val);
395        return;
396    }
397
398    /* PC-relative cases.  */
399    src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
400    if ((val & 3) == 0) {
401        pc_offset = val - src_rx;
402        if (pc_offset == sextreg(pc_offset, 0, 22)) {
403            /* Single pcaddu2i.  */
404            tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
405            return;
406        }
407    }
408
409    pc_offset = (val >> 12) - (src_rx >> 12);
410    if (pc_offset == sextreg(pc_offset, 0, 20)) {
411        /* Load with pcalau12i + ori.  */
412        tcg_target_long val_lo = val & 0xfff;
413        tcg_out_opc_pcalau12i(s, rd, pc_offset);
414        if (val_lo != 0) {
415            tcg_out_opc_ori(s, rd, rd, val_lo);
416        }
417        return;
418    }
419
420    hi12 = sextreg(val, 12, 20);
421    hi32 = sextreg(val, 32, 20);
422    hi52 = sextreg(val, 52, 12);
423
424    /* Single cu52i.d case.  */
425    if ((hi52 != 0) && (ctz64(val) >= 52)) {
426        tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
427        return;
428    }
429
430    /* Slow path.  Initialize the low 32 bits, then concat high bits.  */
431    tcg_out_movi_i32(s, rd, val);
432
433    /* Load hi32 and hi52 explicitly when they are unexpected values. */
434    if (hi32 != sextreg(hi12, 20, 20)) {
435        tcg_out_opc_cu32i_d(s, rd, hi32);
436    }
437
438    if (hi52 != sextreg(hi32, 20, 12)) {
439        tcg_out_opc_cu52i_d(s, rd, rd, hi52);
440    }
441}
442
443static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
444                         TCGReg rs, tcg_target_long imm)
445{
446    tcg_target_long lo12 = sextreg(imm, 0, 12);
447    tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
448
449    /*
450     * Note that there's a hole in between hi16 and lo12:
451     *
452     *       3                   2                   1                   0
453     *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
454     * ...+-------------------------------+-------+-----------------------+
455     *    |             hi16              |       |          lo12         |
456     * ...+-------------------------------+-------+-----------------------+
457     *
458     * For bits within that hole, it's more efficient to use LU12I and ADD.
459     */
460    if (imm == (hi16 << 16) + lo12) {
461        if (hi16) {
462            tcg_out_opc_addu16i_d(s, rd, rs, hi16);
463            rs = rd;
464        }
465        if (type == TCG_TYPE_I32) {
466            tcg_out_opc_addi_w(s, rd, rs, lo12);
467        } else if (lo12) {
468            tcg_out_opc_addi_d(s, rd, rs, lo12);
469        } else {
470            tcg_out_mov(s, type, rd, rs);
471        }
472    } else {
473        tcg_out_movi(s, type, TCG_REG_TMP0, imm);
474        if (type == TCG_TYPE_I32) {
475            tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
476        } else {
477            tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
478        }
479    }
480}
481
482static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
483{
484    return false;
485}
486
487static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
488                             tcg_target_long imm)
489{
490    /* This function is only used for passing structs by reference. */
491    g_assert_not_reached();
492}
493
494static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
495{
496    tcg_out_opc_andi(s, ret, arg, 0xff);
497}
498
499static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
500{
501    tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
502}
503
504static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
505{
506    tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
507}
508
509static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
510{
511    tcg_out_opc_sext_b(s, ret, arg);
512}
513
514static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
515{
516    tcg_out_opc_sext_h(s, ret, arg);
517}
518
519static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
520{
521    tcg_out_opc_addi_w(s, ret, arg, 0);
522}
523
524static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
525{
526    if (ret != arg) {
527        tcg_out_ext32s(s, ret, arg);
528    }
529}
530
531static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
532{
533    tcg_out_ext32u(s, ret, arg);
534}
535
536static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
537{
538    tcg_out_ext32s(s, ret, arg);
539}
540
541static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
542                           TCGReg a0, TCGReg a1, TCGReg a2,
543                           bool c2, bool is_32bit)
544{
545    if (c2) {
546        /*
547         * Fast path: semantics already satisfied due to constraint and
548         * insn behavior, single instruction is enough.
549         */
550        tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
551        /* all clz/ctz insns belong to DJ-format */
552        tcg_out32(s, encode_dj_insn(opc, a0, a1));
553        return;
554    }
555
556    tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
557    /* a0 = a1 ? REG_TMP0 : a2 */
558    tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
559    tcg_out_opc_masknez(s, a0, a2, a1);
560    tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
561}
562
563#define SETCOND_INV    TCG_TARGET_NB_REGS
564#define SETCOND_NEZ    (SETCOND_INV << 1)
565#define SETCOND_FLAGS  (SETCOND_INV | SETCOND_NEZ)
566
567static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
568                               TCGReg arg1, tcg_target_long arg2, bool c2)
569{
570    int flags = 0;
571
572    switch (cond) {
573    case TCG_COND_EQ:    /* -> NE  */
574    case TCG_COND_GE:    /* -> LT  */
575    case TCG_COND_GEU:   /* -> LTU */
576    case TCG_COND_GT:    /* -> LE  */
577    case TCG_COND_GTU:   /* -> LEU */
578        cond = tcg_invert_cond(cond);
579        flags ^= SETCOND_INV;
580        break;
581    default:
582        break;
583    }
584
585    switch (cond) {
586    case TCG_COND_LE:
587    case TCG_COND_LEU:
588        /*
589         * If we have a constant input, the most efficient way to implement
590         * LE is by adding 1 and using LT.  Watch out for wrap around for LEU.
591         * We don't need to care for this for LE because the constant input
592         * is still constrained to int32_t, and INT32_MAX+1 is representable
593         * in the 64-bit temporary register.
594         */
595        if (c2) {
596            if (cond == TCG_COND_LEU) {
597                /* unsigned <= -1 is true */
598                if (arg2 == -1) {
599                    tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
600                    return ret;
601                }
602                cond = TCG_COND_LTU;
603            } else {
604                cond = TCG_COND_LT;
605            }
606            arg2 += 1;
607        } else {
608            TCGReg tmp = arg2;
609            arg2 = arg1;
610            arg1 = tmp;
611            cond = tcg_swap_cond(cond);    /* LE -> GE */
612            cond = tcg_invert_cond(cond);  /* GE -> LT */
613            flags ^= SETCOND_INV;
614        }
615        break;
616    default:
617        break;
618    }
619
620    switch (cond) {
621    case TCG_COND_NE:
622        flags |= SETCOND_NEZ;
623        if (!c2) {
624            tcg_out_opc_xor(s, ret, arg1, arg2);
625        } else if (arg2 == 0) {
626            ret = arg1;
627        } else if (arg2 >= 0 && arg2 <= 0xfff) {
628            tcg_out_opc_xori(s, ret, arg1, arg2);
629        } else {
630            tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
631        }
632        break;
633
634    case TCG_COND_LT:
635    case TCG_COND_LTU:
636        if (c2) {
637            if (arg2 >= -0x800 && arg2 <= 0x7ff) {
638                if (cond == TCG_COND_LT) {
639                    tcg_out_opc_slti(s, ret, arg1, arg2);
640                } else {
641                    tcg_out_opc_sltui(s, ret, arg1, arg2);
642                }
643                break;
644            }
645            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
646            arg2 = TCG_REG_TMP0;
647        }
648        if (cond == TCG_COND_LT) {
649            tcg_out_opc_slt(s, ret, arg1, arg2);
650        } else {
651            tcg_out_opc_sltu(s, ret, arg1, arg2);
652        }
653        break;
654
655    default:
656        g_assert_not_reached();
657    }
658
659    return ret | flags;
660}
661
662static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
663                            TCGReg arg1, tcg_target_long arg2, bool c2)
664{
665    int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
666
667    if (tmpflags != ret) {
668        TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
669
670        switch (tmpflags & SETCOND_FLAGS) {
671        case SETCOND_INV:
672            /* Intermediate result is boolean: simply invert. */
673            tcg_out_opc_xori(s, ret, tmp, 1);
674            break;
675        case SETCOND_NEZ:
676            /* Intermediate result is zero/non-zero: test != 0. */
677            tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
678            break;
679        case SETCOND_NEZ | SETCOND_INV:
680            /* Intermediate result is zero/non-zero: test == 0. */
681            tcg_out_opc_sltui(s, ret, tmp, 1);
682            break;
683        default:
684            g_assert_not_reached();
685        }
686    }
687}
688
689static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
690                            TCGReg c1, tcg_target_long c2, bool const2,
691                            TCGReg v1, TCGReg v2)
692{
693    int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
694    TCGReg t;
695
696    /* Standardize the test below to t != 0. */
697    if (tmpflags & SETCOND_INV) {
698        t = v1, v1 = v2, v2 = t;
699    }
700
701    t = tmpflags & ~SETCOND_FLAGS;
702    if (v1 == TCG_REG_ZERO) {
703        tcg_out_opc_masknez(s, ret, v2, t);
704    } else if (v2 == TCG_REG_ZERO) {
705        tcg_out_opc_maskeqz(s, ret, v1, t);
706    } else {
707        tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
708        tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
709        tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
710    }
711}
712
713/*
714 * Branch helpers
715 */
716
717static const struct {
718    LoongArchInsn op;
719    bool swap;
720} tcg_brcond_to_loongarch[] = {
721    [TCG_COND_EQ] =  { OPC_BEQ,  false },
722    [TCG_COND_NE] =  { OPC_BNE,  false },
723    [TCG_COND_LT] =  { OPC_BGT,  true  },
724    [TCG_COND_GE] =  { OPC_BLE,  true  },
725    [TCG_COND_LE] =  { OPC_BLE,  false },
726    [TCG_COND_GT] =  { OPC_BGT,  false },
727    [TCG_COND_LTU] = { OPC_BGTU, true  },
728    [TCG_COND_GEU] = { OPC_BLEU, true  },
729    [TCG_COND_LEU] = { OPC_BLEU, false },
730    [TCG_COND_GTU] = { OPC_BGTU, false }
731};
732
733static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
734                           TCGReg arg2, TCGLabel *l)
735{
736    LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
737
738    tcg_debug_assert(op != 0);
739
740    if (tcg_brcond_to_loongarch[cond].swap) {
741        TCGReg t = arg1;
742        arg1 = arg2;
743        arg2 = t;
744    }
745
746    /* all conditional branch insns belong to DJSk16-format */
747    tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
748    tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
749}
750
751static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
752{
753    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
754    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
755
756    tcg_debug_assert((offset & 3) == 0);
757    if (offset == sextreg(offset, 0, 28)) {
758        /* short jump: +/- 256MiB */
759        if (tail) {
760            tcg_out_opc_b(s, offset >> 2);
761        } else {
762            tcg_out_opc_bl(s, offset >> 2);
763        }
764    } else if (offset == sextreg(offset, 0, 38)) {
765        /* long jump: +/- 256GiB */
766        tcg_target_long lo = sextreg(offset, 0, 18);
767        tcg_target_long hi = offset - lo;
768        tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
769        tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
770    } else {
771        /* far jump: 64-bit */
772        tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
773        tcg_target_long hi = (tcg_target_long)arg - lo;
774        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
775        tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
776    }
777}
778
779static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
780                         const TCGHelperInfo *info)
781{
782    tcg_out_call_int(s, arg, false);
783}
784
785/*
786 * Load/store helpers
787 */
788
789static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
790                         TCGReg addr, intptr_t offset)
791{
792    intptr_t imm12 = sextreg(offset, 0, 12);
793
794    if (offset != imm12) {
795        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
796
797        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
798            imm12 = sextreg(diff, 0, 12);
799            tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
800        } else {
801            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
802            if (addr != TCG_REG_ZERO) {
803                tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
804            }
805        }
806        addr = TCG_REG_TMP2;
807    }
808
809    switch (opc) {
810    case OPC_LD_B:
811    case OPC_LD_BU:
812    case OPC_LD_H:
813    case OPC_LD_HU:
814    case OPC_LD_W:
815    case OPC_LD_WU:
816    case OPC_LD_D:
817    case OPC_ST_B:
818    case OPC_ST_H:
819    case OPC_ST_W:
820    case OPC_ST_D:
821        tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
822        break;
823    case OPC_FLD_S:
824    case OPC_FLD_D:
825    case OPC_FST_S:
826    case OPC_FST_D:
827        tcg_out32(s, encode_fdjsk12_insn(opc, data, addr, imm12));
828        break;
829    default:
830        g_assert_not_reached();
831    }
832}
833
834static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest,
835                       TCGReg base, intptr_t offset)
836{
837    switch (type) {
838    case TCG_TYPE_I32:
839        if (dest < TCG_REG_V0) {
840            tcg_out_ldst(s, OPC_LD_W, dest, base, offset);
841        } else {
842            tcg_out_ldst(s, OPC_FLD_S, dest, base, offset);
843        }
844        break;
845    case TCG_TYPE_I64:
846    case TCG_TYPE_V64:
847        if (dest < TCG_REG_V0) {
848            tcg_out_ldst(s, OPC_LD_D, dest, base, offset);
849        } else {
850            tcg_out_ldst(s, OPC_FLD_D, dest, base, offset);
851        }
852        break;
853    case TCG_TYPE_V128:
854        if (-0x800 <= offset && offset <= 0x7ff) {
855            tcg_out_opc_vld(s, dest, base, offset);
856        } else {
857            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
858            tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0);
859        }
860        break;
861    case TCG_TYPE_V256:
862        if (-0x800 <= offset && offset <= 0x7ff) {
863            tcg_out_opc_xvld(s, dest, base, offset);
864        } else {
865            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
866            tcg_out_opc_xvldx(s, dest, base, TCG_REG_TMP0);
867        }
868        break;
869    default:
870        g_assert_not_reached();
871    }
872}
873
874static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
875                       TCGReg base, intptr_t offset)
876{
877    switch (type) {
878    case TCG_TYPE_I32:
879        if (src < TCG_REG_V0) {
880            tcg_out_ldst(s, OPC_ST_W, src, base, offset);
881        } else {
882            tcg_out_ldst(s, OPC_FST_S, src, base, offset);
883        }
884        break;
885    case TCG_TYPE_I64:
886    case TCG_TYPE_V64:
887        if (src < TCG_REG_V0) {
888            tcg_out_ldst(s, OPC_ST_D, src, base, offset);
889        } else {
890            tcg_out_ldst(s, OPC_FST_D, src, base, offset);
891        }
892        break;
893    case TCG_TYPE_V128:
894        if (-0x800 <= offset && offset <= 0x7ff) {
895            tcg_out_opc_vst(s, src, base, offset);
896        } else {
897            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
898            tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0);
899        }
900        break;
901    case TCG_TYPE_V256:
902        if (-0x800 <= offset && offset <= 0x7ff) {
903            tcg_out_opc_xvst(s, src, base, offset);
904        } else {
905            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
906            tcg_out_opc_xvstx(s, src, base, TCG_REG_TMP0);
907        }
908        break;
909    default:
910        g_assert_not_reached();
911    }
912}
913
914static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
915                        TCGReg base, intptr_t ofs)
916{
917    if (val == 0) {
918        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
919        return true;
920    }
921    return false;
922}
923
924/*
925 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
926 */
927
928static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
929{
930    tcg_out_opc_b(s, 0);
931    return reloc_br_sd10k16(s->code_ptr - 1, target);
932}
933
934static const TCGLdstHelperParam ldst_helper_param = {
935    .ntmp = 1, .tmp = { TCG_REG_TMP0 }
936};
937
938static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
939{
940    MemOp opc = get_memop(l->oi);
941
942    /* resolve label address */
943    if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
944        return false;
945    }
946
947    tcg_out_ld_helper_args(s, l, &ldst_helper_param);
948    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false);
949    tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
950    return tcg_out_goto(s, l->raddr);
951}
952
953static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
954{
955    MemOp opc = get_memop(l->oi);
956
957    /* resolve label address */
958    if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
959        return false;
960    }
961
962    tcg_out_st_helper_args(s, l, &ldst_helper_param);
963    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
964    return tcg_out_goto(s, l->raddr);
965}
966
967typedef struct {
968    TCGReg base;
969    TCGReg index;
970    TCGAtomAlign aa;
971} HostAddress;
972
973bool tcg_target_has_memory_bswap(MemOp memop)
974{
975    return false;
976}
977
978/* We expect to use a 12-bit negative offset from ENV.  */
979#define MIN_TLB_MASK_TABLE_OFS  -(1 << 11)
980
981/*
982 * For system-mode, perform the TLB load and compare.
983 * For user-mode, perform any required alignment tests.
984 * In both cases, return a TCGLabelQemuLdst structure if the slow path
985 * is required and fill in @h with the host address for the fast path.
986 */
987static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
988                                           TCGReg addr_reg, MemOpIdx oi,
989                                           bool is_ld)
990{
991    TCGType addr_type = s->addr_type;
992    TCGLabelQemuLdst *ldst = NULL;
993    MemOp opc = get_memop(oi);
994    MemOp a_bits;
995
996    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
997    a_bits = h->aa.align;
998
999    if (tcg_use_softmmu) {
1000        unsigned s_bits = opc & MO_SIZE;
1001        int mem_index = get_mmuidx(oi);
1002        int fast_ofs = tlb_mask_table_ofs(s, mem_index);
1003        int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1004        int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1005
1006        ldst = new_ldst_label(s);
1007        ldst->is_ld = is_ld;
1008        ldst->oi = oi;
1009        ldst->addr_reg = addr_reg;
1010
1011        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1012        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1013
1014        tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
1015                           s->page_bits - CPU_TLB_ENTRY_BITS);
1016        tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1017        tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1018
1019        /* Load the tlb comparator and the addend.  */
1020        QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1021        tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1022                   is_ld ? offsetof(CPUTLBEntry, addr_read)
1023                         : offsetof(CPUTLBEntry, addr_write));
1024        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1025                   offsetof(CPUTLBEntry, addend));
1026
1027        /*
1028         * For aligned accesses, we check the first byte and include the
1029         * alignment bits within the address.  For unaligned access, we
1030         * check that we don't cross pages using the address of the last
1031         * byte of the access.
1032         */
1033        if (a_bits < s_bits) {
1034            unsigned a_mask = (1u << a_bits) - 1;
1035            unsigned s_mask = (1u << s_bits) - 1;
1036            tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
1037        } else {
1038            tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
1039        }
1040        tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
1041                              a_bits, s->page_bits - 1);
1042
1043        /* Compare masked address with the TLB entry.  */
1044        ldst->label_ptr[0] = s->code_ptr;
1045        tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1046
1047        h->index = TCG_REG_TMP2;
1048    } else {
1049        if (a_bits) {
1050            ldst = new_ldst_label(s);
1051
1052            ldst->is_ld = is_ld;
1053            ldst->oi = oi;
1054            ldst->addr_reg = addr_reg;
1055
1056            /*
1057             * Without micro-architecture details, we don't know which of
1058             * bstrpick or andi is faster, so use bstrpick as it's not
1059             * constrained by imm field width. Not to say alignments >= 2^12
1060             * are going to happen any time soon.
1061             */
1062            tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
1063
1064            ldst->label_ptr[0] = s->code_ptr;
1065            tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1066        }
1067
1068        h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1069    }
1070
1071    if (addr_type == TCG_TYPE_I32) {
1072        h->base = TCG_REG_TMP0;
1073        tcg_out_ext32u(s, h->base, addr_reg);
1074    } else {
1075        h->base = addr_reg;
1076    }
1077
1078    return ldst;
1079}
1080
1081static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
1082                                    TCGReg rd, HostAddress h)
1083{
1084    /* Byte swapping is left to middle-end expansion.  */
1085    tcg_debug_assert((opc & MO_BSWAP) == 0);
1086
1087    switch (opc & MO_SSIZE) {
1088    case MO_UB:
1089        tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
1090        break;
1091    case MO_SB:
1092        tcg_out_opc_ldx_b(s, rd, h.base, h.index);
1093        break;
1094    case MO_UW:
1095        tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
1096        break;
1097    case MO_SW:
1098        tcg_out_opc_ldx_h(s, rd, h.base, h.index);
1099        break;
1100    case MO_UL:
1101        if (type == TCG_TYPE_I64) {
1102            tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
1103            break;
1104        }
1105        /* fallthrough */
1106    case MO_SL:
1107        tcg_out_opc_ldx_w(s, rd, h.base, h.index);
1108        break;
1109    case MO_UQ:
1110        tcg_out_opc_ldx_d(s, rd, h.base, h.index);
1111        break;
1112    default:
1113        g_assert_not_reached();
1114    }
1115}
1116
1117static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1118                            MemOpIdx oi, TCGType data_type)
1119{
1120    TCGLabelQemuLdst *ldst;
1121    HostAddress h;
1122
1123    ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1124    tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
1125
1126    if (ldst) {
1127        ldst->type = data_type;
1128        ldst->datalo_reg = data_reg;
1129        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1130    }
1131}
1132
1133static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
1134                                    TCGReg rd, HostAddress h)
1135{
1136    /* Byte swapping is left to middle-end expansion.  */
1137    tcg_debug_assert((opc & MO_BSWAP) == 0);
1138
1139    switch (opc & MO_SIZE) {
1140    case MO_8:
1141        tcg_out_opc_stx_b(s, rd, h.base, h.index);
1142        break;
1143    case MO_16:
1144        tcg_out_opc_stx_h(s, rd, h.base, h.index);
1145        break;
1146    case MO_32:
1147        tcg_out_opc_stx_w(s, rd, h.base, h.index);
1148        break;
1149    case MO_64:
1150        tcg_out_opc_stx_d(s, rd, h.base, h.index);
1151        break;
1152    default:
1153        g_assert_not_reached();
1154    }
1155}
1156
1157static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1158                            MemOpIdx oi, TCGType data_type)
1159{
1160    TCGLabelQemuLdst *ldst;
1161    HostAddress h;
1162
1163    ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1164    tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
1165
1166    if (ldst) {
1167        ldst->type = data_type;
1168        ldst->datalo_reg = data_reg;
1169        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1170    }
1171}
1172
1173static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
1174                                   TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1175{
1176    TCGLabelQemuLdst *ldst;
1177    HostAddress h;
1178
1179    ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1180
1181    if (h.aa.atom == MO_128) {
1182        /*
1183         * Use VLDX/VSTX when 128-bit atomicity is required.
1184         * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
1185         */
1186        if (is_ld) {
1187            tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
1188            tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
1189            tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
1190        } else {
1191            tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
1192            tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
1193            tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
1194        }
1195    } else {
1196        /* Otherwise use a pair of LD/ST. */
1197        TCGReg base = h.base;
1198        if (h.index != TCG_REG_ZERO) {
1199            base = TCG_REG_TMP0;
1200            tcg_out_opc_add_d(s, base, h.base, h.index);
1201        }
1202        if (is_ld) {
1203            tcg_debug_assert(base != data_lo);
1204            tcg_out_opc_ld_d(s, data_lo, base, 0);
1205            tcg_out_opc_ld_d(s, data_hi, base, 8);
1206        } else {
1207            tcg_out_opc_st_d(s, data_lo, base, 0);
1208            tcg_out_opc_st_d(s, data_hi, base, 8);
1209        }
1210    }
1211
1212    if (ldst) {
1213        ldst->type = TCG_TYPE_I128;
1214        ldst->datalo_reg = data_lo;
1215        ldst->datahi_reg = data_hi;
1216        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1217    }
1218}
1219
1220/*
1221 * Entry-points
1222 */
1223
1224static const tcg_insn_unit *tb_ret_addr;
1225
1226static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1227{
1228    /* Reuse the zeroing that exists for goto_ptr.  */
1229    if (a0 == 0) {
1230        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1231    } else {
1232        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1233        tcg_out_call_int(s, tb_ret_addr, true);
1234    }
1235}
1236
1237static void tcg_out_goto_tb(TCGContext *s, int which)
1238{
1239    /*
1240     * Direct branch, or load indirect address, to be patched
1241     * by tb_target_set_jmp_target.  Check indirect load offset
1242     * in range early, regardless of direct branch distance,
1243     * via assert within tcg_out_opc_pcaddu2i.
1244     */
1245    uintptr_t i_addr = get_jmp_target_addr(s, which);
1246    intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1247
1248    set_jmp_insn_offset(s, which);
1249    tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1250
1251    /* Finish the load and indirect branch. */
1252    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1253    tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1254    set_jmp_reset_offset(s, which);
1255}
1256
1257void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1258                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1259{
1260    uintptr_t d_addr = tb->jmp_target_addr[n];
1261    ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1262    tcg_insn_unit insn;
1263
1264    /* Either directly branch, or load slot address for indirect branch. */
1265    if (d_disp == sextreg(d_disp, 0, 26)) {
1266        insn = encode_sd10k16_insn(OPC_B, d_disp);
1267    } else {
1268        uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1269        intptr_t i_disp = i_addr - jmp_rx;
1270        insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1271    }
1272
1273    qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1274    flush_idcache_range(jmp_rx, jmp_rw, 4);
1275}
1276
1277static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1278                       const TCGArg args[TCG_MAX_OP_ARGS],
1279                       const int const_args[TCG_MAX_OP_ARGS])
1280{
1281    TCGArg a0 = args[0];
1282    TCGArg a1 = args[1];
1283    TCGArg a2 = args[2];
1284    TCGArg a3 = args[3];
1285    int c2 = const_args[2];
1286
1287    switch (opc) {
1288    case INDEX_op_mb:
1289        tcg_out_mb(s, a0);
1290        break;
1291
1292    case INDEX_op_goto_ptr:
1293        tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1294        break;
1295
1296    case INDEX_op_br:
1297        tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1298                      0);
1299        tcg_out_opc_b(s, 0);
1300        break;
1301
1302    case INDEX_op_brcond_i32:
1303    case INDEX_op_brcond_i64:
1304        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1305        break;
1306
1307    case INDEX_op_extrh_i64_i32:
1308        tcg_out_opc_srai_d(s, a0, a1, 32);
1309        break;
1310
1311    case INDEX_op_not_i32:
1312    case INDEX_op_not_i64:
1313        tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1314        break;
1315
1316    case INDEX_op_nor_i32:
1317    case INDEX_op_nor_i64:
1318        if (c2) {
1319            tcg_out_opc_ori(s, a0, a1, a2);
1320            tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1321        } else {
1322            tcg_out_opc_nor(s, a0, a1, a2);
1323        }
1324        break;
1325
1326    case INDEX_op_andc_i32:
1327    case INDEX_op_andc_i64:
1328        if (c2) {
1329            /* guaranteed to fit due to constraint */
1330            tcg_out_opc_andi(s, a0, a1, ~a2);
1331        } else {
1332            tcg_out_opc_andn(s, a0, a1, a2);
1333        }
1334        break;
1335
1336    case INDEX_op_orc_i32:
1337    case INDEX_op_orc_i64:
1338        if (c2) {
1339            /* guaranteed to fit due to constraint */
1340            tcg_out_opc_ori(s, a0, a1, ~a2);
1341        } else {
1342            tcg_out_opc_orn(s, a0, a1, a2);
1343        }
1344        break;
1345
1346    case INDEX_op_and_i32:
1347    case INDEX_op_and_i64:
1348        if (c2) {
1349            tcg_out_opc_andi(s, a0, a1, a2);
1350        } else {
1351            tcg_out_opc_and(s, a0, a1, a2);
1352        }
1353        break;
1354
1355    case INDEX_op_or_i32:
1356    case INDEX_op_or_i64:
1357        if (c2) {
1358            tcg_out_opc_ori(s, a0, a1, a2);
1359        } else {
1360            tcg_out_opc_or(s, a0, a1, a2);
1361        }
1362        break;
1363
1364    case INDEX_op_xor_i32:
1365    case INDEX_op_xor_i64:
1366        if (c2) {
1367            tcg_out_opc_xori(s, a0, a1, a2);
1368        } else {
1369            tcg_out_opc_xor(s, a0, a1, a2);
1370        }
1371        break;
1372
1373    case INDEX_op_extract_i32:
1374        if (a2 == 0 && args[3] <= 12) {
1375            tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
1376        } else {
1377            tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1378        }
1379        break;
1380    case INDEX_op_extract_i64:
1381        if (a2 == 0 && args[3] <= 12) {
1382            tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
1383        } else {
1384            tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1385        }
1386        break;
1387
1388    case INDEX_op_sextract_i64:
1389        if (a2 + args[3] == 32) {
1390            if (a2 == 0) {
1391                tcg_out_ext32s(s, a0, a1);
1392            } else {
1393                tcg_out_opc_srai_w(s, a0, a1, a2);
1394            }
1395            break;
1396        }
1397        /* FALLTHRU */
1398    case INDEX_op_sextract_i32:
1399        if (a2 == 0 && args[3] == 8) {
1400            tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
1401        } else if (a2 == 0 && args[3] == 16) {
1402            tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
1403        } else {
1404            g_assert_not_reached();
1405        }
1406        break;
1407
1408    case INDEX_op_deposit_i32:
1409        tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1410        break;
1411    case INDEX_op_deposit_i64:
1412        tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1413        break;
1414
1415    case INDEX_op_bswap16_i32:
1416    case INDEX_op_bswap16_i64:
1417        tcg_out_opc_revb_2h(s, a0, a1);
1418        if (a2 & TCG_BSWAP_OS) {
1419            tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1420        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1421            tcg_out_ext16u(s, a0, a0);
1422        }
1423        break;
1424
1425    case INDEX_op_bswap32_i32:
1426        /* All 32-bit values are computed sign-extended in the register.  */
1427        a2 = TCG_BSWAP_OS;
1428        /* fallthrough */
1429    case INDEX_op_bswap32_i64:
1430        tcg_out_opc_revb_2w(s, a0, a1);
1431        if (a2 & TCG_BSWAP_OS) {
1432            tcg_out_ext32s(s, a0, a0);
1433        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1434            tcg_out_ext32u(s, a0, a0);
1435        }
1436        break;
1437
1438    case INDEX_op_bswap64_i64:
1439        tcg_out_opc_revb_d(s, a0, a1);
1440        break;
1441
1442    case INDEX_op_clz_i32:
1443        tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1444        break;
1445    case INDEX_op_clz_i64:
1446        tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1447        break;
1448
1449    case INDEX_op_ctz_i32:
1450        tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1451        break;
1452    case INDEX_op_ctz_i64:
1453        tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1454        break;
1455
1456    case INDEX_op_shl_i32:
1457        if (c2) {
1458            tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1459        } else {
1460            tcg_out_opc_sll_w(s, a0, a1, a2);
1461        }
1462        break;
1463    case INDEX_op_shl_i64:
1464        if (c2) {
1465            tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1466        } else {
1467            tcg_out_opc_sll_d(s, a0, a1, a2);
1468        }
1469        break;
1470
1471    case INDEX_op_shr_i32:
1472        if (c2) {
1473            tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1474        } else {
1475            tcg_out_opc_srl_w(s, a0, a1, a2);
1476        }
1477        break;
1478    case INDEX_op_shr_i64:
1479        if (c2) {
1480            tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1481        } else {
1482            tcg_out_opc_srl_d(s, a0, a1, a2);
1483        }
1484        break;
1485
1486    case INDEX_op_sar_i32:
1487        if (c2) {
1488            tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1489        } else {
1490            tcg_out_opc_sra_w(s, a0, a1, a2);
1491        }
1492        break;
1493    case INDEX_op_sar_i64:
1494        if (c2) {
1495            tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1496        } else {
1497            tcg_out_opc_sra_d(s, a0, a1, a2);
1498        }
1499        break;
1500
1501    case INDEX_op_rotl_i32:
1502        /* transform into equivalent rotr/rotri */
1503        if (c2) {
1504            tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1505        } else {
1506            tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1507            tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1508        }
1509        break;
1510    case INDEX_op_rotl_i64:
1511        /* transform into equivalent rotr/rotri */
1512        if (c2) {
1513            tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1514        } else {
1515            tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1516            tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1517        }
1518        break;
1519
1520    case INDEX_op_rotr_i32:
1521        if (c2) {
1522            tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1523        } else {
1524            tcg_out_opc_rotr_w(s, a0, a1, a2);
1525        }
1526        break;
1527    case INDEX_op_rotr_i64:
1528        if (c2) {
1529            tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1530        } else {
1531            tcg_out_opc_rotr_d(s, a0, a1, a2);
1532        }
1533        break;
1534
1535    case INDEX_op_add_i32:
1536        if (c2) {
1537            tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1538        } else {
1539            tcg_out_opc_add_w(s, a0, a1, a2);
1540        }
1541        break;
1542    case INDEX_op_add_i64:
1543        if (c2) {
1544            tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1545        } else {
1546            tcg_out_opc_add_d(s, a0, a1, a2);
1547        }
1548        break;
1549
1550    case INDEX_op_sub_i32:
1551        if (c2) {
1552            tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1553        } else {
1554            tcg_out_opc_sub_w(s, a0, a1, a2);
1555        }
1556        break;
1557    case INDEX_op_sub_i64:
1558        if (c2) {
1559            tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1560        } else {
1561            tcg_out_opc_sub_d(s, a0, a1, a2);
1562        }
1563        break;
1564
1565    case INDEX_op_neg_i32:
1566        tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
1567        break;
1568    case INDEX_op_neg_i64:
1569        tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1);
1570        break;
1571
1572    case INDEX_op_mul_i32:
1573        tcg_out_opc_mul_w(s, a0, a1, a2);
1574        break;
1575    case INDEX_op_mul_i64:
1576        tcg_out_opc_mul_d(s, a0, a1, a2);
1577        break;
1578
1579    case INDEX_op_mulsh_i32:
1580        tcg_out_opc_mulh_w(s, a0, a1, a2);
1581        break;
1582    case INDEX_op_mulsh_i64:
1583        tcg_out_opc_mulh_d(s, a0, a1, a2);
1584        break;
1585
1586    case INDEX_op_muluh_i32:
1587        tcg_out_opc_mulh_wu(s, a0, a1, a2);
1588        break;
1589    case INDEX_op_muluh_i64:
1590        tcg_out_opc_mulh_du(s, a0, a1, a2);
1591        break;
1592
1593    case INDEX_op_div_i32:
1594        tcg_out_opc_div_w(s, a0, a1, a2);
1595        break;
1596    case INDEX_op_div_i64:
1597        tcg_out_opc_div_d(s, a0, a1, a2);
1598        break;
1599
1600    case INDEX_op_divu_i32:
1601        tcg_out_opc_div_wu(s, a0, a1, a2);
1602        break;
1603    case INDEX_op_divu_i64:
1604        tcg_out_opc_div_du(s, a0, a1, a2);
1605        break;
1606
1607    case INDEX_op_rem_i32:
1608        tcg_out_opc_mod_w(s, a0, a1, a2);
1609        break;
1610    case INDEX_op_rem_i64:
1611        tcg_out_opc_mod_d(s, a0, a1, a2);
1612        break;
1613
1614    case INDEX_op_remu_i32:
1615        tcg_out_opc_mod_wu(s, a0, a1, a2);
1616        break;
1617    case INDEX_op_remu_i64:
1618        tcg_out_opc_mod_du(s, a0, a1, a2);
1619        break;
1620
1621    case INDEX_op_setcond_i32:
1622    case INDEX_op_setcond_i64:
1623        tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1624        break;
1625
1626    case INDEX_op_movcond_i32:
1627    case INDEX_op_movcond_i64:
1628        tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1629        break;
1630
1631    case INDEX_op_ld8s_i32:
1632    case INDEX_op_ld8s_i64:
1633        tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1634        break;
1635    case INDEX_op_ld8u_i32:
1636    case INDEX_op_ld8u_i64:
1637        tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1638        break;
1639    case INDEX_op_ld16s_i32:
1640    case INDEX_op_ld16s_i64:
1641        tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1642        break;
1643    case INDEX_op_ld16u_i32:
1644    case INDEX_op_ld16u_i64:
1645        tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1646        break;
1647    case INDEX_op_ld_i32:
1648    case INDEX_op_ld32s_i64:
1649        tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1650        break;
1651    case INDEX_op_ld32u_i64:
1652        tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1653        break;
1654    case INDEX_op_ld_i64:
1655        tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1656        break;
1657
1658    case INDEX_op_st8_i32:
1659    case INDEX_op_st8_i64:
1660        tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1661        break;
1662    case INDEX_op_st16_i32:
1663    case INDEX_op_st16_i64:
1664        tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1665        break;
1666    case INDEX_op_st_i32:
1667    case INDEX_op_st32_i64:
1668        tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1669        break;
1670    case INDEX_op_st_i64:
1671        tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1672        break;
1673
1674    case INDEX_op_qemu_ld_i32:
1675        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1676        break;
1677    case INDEX_op_qemu_ld_i64:
1678        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1679        break;
1680    case INDEX_op_qemu_ld_i128:
1681        tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
1682        break;
1683    case INDEX_op_qemu_st_i32:
1684        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1685        break;
1686    case INDEX_op_qemu_st_i64:
1687        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1688        break;
1689    case INDEX_op_qemu_st_i128:
1690        tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
1691        break;
1692
1693    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1694    case INDEX_op_mov_i64:
1695    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1696    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1697    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1698    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
1699    case INDEX_op_ext8s_i64:
1700    case INDEX_op_ext8u_i32:
1701    case INDEX_op_ext8u_i64:
1702    case INDEX_op_ext16s_i32:
1703    case INDEX_op_ext16s_i64:
1704    case INDEX_op_ext16u_i32:
1705    case INDEX_op_ext16u_i64:
1706    case INDEX_op_ext32s_i64:
1707    case INDEX_op_ext32u_i64:
1708    case INDEX_op_ext_i32_i64:
1709    case INDEX_op_extu_i32_i64:
1710    case INDEX_op_extrl_i64_i32:
1711    default:
1712        g_assert_not_reached();
1713    }
1714}
1715
1716static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1717                            TCGReg rd, TCGReg rs)
1718{
1719    static const LoongArchInsn repl_insn[2][4] = {
1720        { OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H,
1721          OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D },
1722        { OPC_XVREPLGR2VR_B, OPC_XVREPLGR2VR_H,
1723          OPC_XVREPLGR2VR_W, OPC_XVREPLGR2VR_D },
1724    };
1725    bool lasx = type == TCG_TYPE_V256;
1726
1727    tcg_debug_assert(vece <= MO_64);
1728    tcg_out32(s, encode_vdj_insn(repl_insn[lasx][vece], rd, rs));
1729    return true;
1730}
1731
1732static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1733                             TCGReg r, TCGReg base, intptr_t offset)
1734{
1735    bool lasx = type == TCG_TYPE_V256;
1736
1737    /* Handle imm overflow and division (vldrepl.d imm is divided by 8). */
1738    if (offset < -0x800 || offset > 0x7ff ||
1739        (offset & ((1 << vece) - 1)) != 0) {
1740        tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
1741        base = TCG_REG_TMP0;
1742        offset = 0;
1743    }
1744    offset >>= vece;
1745
1746    switch (vece) {
1747    case MO_8:
1748        if (lasx) {
1749            tcg_out_opc_xvldrepl_b(s, r, base, offset);
1750        } else {
1751            tcg_out_opc_vldrepl_b(s, r, base, offset);
1752        }
1753        break;
1754    case MO_16:
1755        if (lasx) {
1756            tcg_out_opc_xvldrepl_h(s, r, base, offset);
1757        } else {
1758            tcg_out_opc_vldrepl_h(s, r, base, offset);
1759        }
1760        break;
1761    case MO_32:
1762        if (lasx) {
1763            tcg_out_opc_xvldrepl_w(s, r, base, offset);
1764        } else {
1765            tcg_out_opc_vldrepl_w(s, r, base, offset);
1766        }
1767        break;
1768    case MO_64:
1769        if (lasx) {
1770            tcg_out_opc_xvldrepl_d(s, r, base, offset);
1771        } else {
1772            tcg_out_opc_vldrepl_d(s, r, base, offset);
1773        }
1774        break;
1775    default:
1776        g_assert_not_reached();
1777    }
1778    return true;
1779}
1780
1781static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1782                             TCGReg rd, int64_t v64)
1783{
1784    /* Try vldi if imm can fit */
1785    int64_t value = sextract64(v64, 0, 8 << vece);
1786    if (-0x200 <= value && value <= 0x1FF) {
1787        uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
1788
1789        if (type == TCG_TYPE_V256) {
1790            tcg_out_opc_xvldi(s, rd, imm);
1791        } else {
1792            tcg_out_opc_vldi(s, rd, imm);
1793        }
1794        return;
1795    }
1796
1797    /* TODO: vldi patterns when imm 12 is set */
1798
1799    tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
1800    tcg_out_dup_vec(s, type, vece, rd, TCG_REG_TMP0);
1801}
1802
1803static void tcg_out_addsub_vec(TCGContext *s, bool lasx, unsigned vece,
1804                               TCGArg a0, TCGArg a1, TCGArg a2,
1805                               bool a2_is_const, bool is_add)
1806{
1807    static const LoongArchInsn add_vec_insn[2][4] = {
1808        { OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D },
1809        { OPC_XVADD_B, OPC_XVADD_H, OPC_XVADD_W, OPC_XVADD_D },
1810    };
1811    static const LoongArchInsn add_vec_imm_insn[2][4] = {
1812        { OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU },
1813        { OPC_XVADDI_BU, OPC_XVADDI_HU, OPC_XVADDI_WU, OPC_XVADDI_DU },
1814    };
1815    static const LoongArchInsn sub_vec_insn[2][4] = {
1816        { OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D },
1817        { OPC_XVSUB_B, OPC_XVSUB_H, OPC_XVSUB_W, OPC_XVSUB_D },
1818    };
1819    static const LoongArchInsn sub_vec_imm_insn[2][4] = {
1820        { OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU },
1821        { OPC_XVSUBI_BU, OPC_XVSUBI_HU, OPC_XVSUBI_WU, OPC_XVSUBI_DU },
1822    };
1823    LoongArchInsn insn;
1824
1825    if (a2_is_const) {
1826        int64_t value = sextract64(a2, 0, 8 << vece);
1827
1828        if (!is_add) {
1829            value = -value;
1830        }
1831        if (value < 0) {
1832            insn = sub_vec_imm_insn[lasx][vece];
1833            value = -value;
1834        } else {
1835            insn = add_vec_imm_insn[lasx][vece];
1836        }
1837
1838        /* Constraint TCG_CT_CONST_VADD ensures validity. */
1839        tcg_debug_assert(0 <= value && value <= 0x1f);
1840
1841        tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
1842        return;
1843    }
1844
1845    if (is_add) {
1846        insn = add_vec_insn[lasx][vece];
1847    } else {
1848        insn = sub_vec_insn[lasx][vece];
1849    }
1850    tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
1851}
1852
1853static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
1854                           unsigned vecl, unsigned vece,
1855                           const TCGArg args[TCG_MAX_OP_ARGS],
1856                           const int const_args[TCG_MAX_OP_ARGS])
1857{
1858    TCGType type = vecl + TCG_TYPE_V64;
1859    bool lasx = type == TCG_TYPE_V256;
1860    TCGArg a0, a1, a2, a3;
1861    LoongArchInsn insn;
1862
1863    static const LoongArchInsn cmp_vec_insn[16][2][4] = {
1864        [TCG_COND_EQ] = {
1865            { OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D },
1866            { OPC_XVSEQ_B, OPC_XVSEQ_H, OPC_XVSEQ_W, OPC_XVSEQ_D },
1867        },
1868        [TCG_COND_LE] = {
1869            { OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D },
1870            { OPC_XVSLE_B, OPC_XVSLE_H, OPC_XVSLE_W, OPC_XVSLE_D },
1871        },
1872        [TCG_COND_LEU] = {
1873            { OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU },
1874            { OPC_XVSLE_BU, OPC_XVSLE_HU, OPC_XVSLE_WU, OPC_XVSLE_DU },
1875        },
1876        [TCG_COND_LT] = {
1877            { OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D },
1878            { OPC_XVSLT_B, OPC_XVSLT_H, OPC_XVSLT_W, OPC_XVSLT_D },
1879        },
1880        [TCG_COND_LTU] = {
1881            { OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU },
1882            { OPC_XVSLT_BU, OPC_XVSLT_HU, OPC_XVSLT_WU, OPC_XVSLT_DU },
1883        }
1884    };
1885    static const LoongArchInsn cmp_vec_imm_insn[16][2][4] = {
1886        [TCG_COND_EQ] = {
1887            { OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D },
1888            { OPC_XVSEQI_B, OPC_XVSEQI_H, OPC_XVSEQI_W, OPC_XVSEQI_D },
1889        },
1890        [TCG_COND_LE] = {
1891            { OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D },
1892            { OPC_XVSLEI_B, OPC_XVSLEI_H, OPC_XVSLEI_W, OPC_XVSLEI_D },
1893        },
1894        [TCG_COND_LEU] = {
1895            { OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU },
1896            { OPC_XVSLEI_BU, OPC_XVSLEI_HU, OPC_XVSLEI_WU, OPC_XVSLEI_DU },
1897        },
1898        [TCG_COND_LT] = {
1899            { OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D },
1900            { OPC_XVSLTI_B, OPC_XVSLTI_H, OPC_XVSLTI_W, OPC_XVSLTI_D },
1901        },
1902        [TCG_COND_LTU] = {
1903            { OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU },
1904            { OPC_XVSLTI_BU, OPC_XVSLTI_HU, OPC_XVSLTI_WU, OPC_XVSLTI_DU },
1905        }
1906    };
1907    static const LoongArchInsn neg_vec_insn[2][4] = {
1908        { OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D },
1909        { OPC_XVNEG_B, OPC_XVNEG_H, OPC_XVNEG_W, OPC_XVNEG_D },
1910    };
1911    static const LoongArchInsn mul_vec_insn[2][4] = {
1912        { OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D },
1913        { OPC_XVMUL_B, OPC_XVMUL_H, OPC_XVMUL_W, OPC_XVMUL_D },
1914    };
1915    static const LoongArchInsn smin_vec_insn[2][4] = {
1916        { OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D },
1917        { OPC_XVMIN_B, OPC_XVMIN_H, OPC_XVMIN_W, OPC_XVMIN_D },
1918    };
1919    static const LoongArchInsn umin_vec_insn[2][4] = {
1920        { OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU },
1921        { OPC_XVMIN_BU, OPC_XVMIN_HU, OPC_XVMIN_WU, OPC_XVMIN_DU },
1922    };
1923    static const LoongArchInsn smax_vec_insn[2][4] = {
1924        { OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D },
1925        { OPC_XVMAX_B, OPC_XVMAX_H, OPC_XVMAX_W, OPC_XVMAX_D },
1926    };
1927    static const LoongArchInsn umax_vec_insn[2][4] = {
1928        { OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU },
1929        { OPC_XVMAX_BU, OPC_XVMAX_HU, OPC_XVMAX_WU, OPC_XVMAX_DU },
1930    };
1931    static const LoongArchInsn ssadd_vec_insn[2][4] = {
1932        { OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D },
1933        { OPC_XVSADD_B, OPC_XVSADD_H, OPC_XVSADD_W, OPC_XVSADD_D },
1934    };
1935    static const LoongArchInsn usadd_vec_insn[2][4] = {
1936        { OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU },
1937        { OPC_XVSADD_BU, OPC_XVSADD_HU, OPC_XVSADD_WU, OPC_XVSADD_DU },
1938    };
1939    static const LoongArchInsn sssub_vec_insn[2][4] = {
1940        { OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D },
1941        { OPC_XVSSUB_B, OPC_XVSSUB_H, OPC_XVSSUB_W, OPC_XVSSUB_D },
1942    };
1943    static const LoongArchInsn ussub_vec_insn[2][4] = {
1944        { OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU },
1945        { OPC_XVSSUB_BU, OPC_XVSSUB_HU, OPC_XVSSUB_WU, OPC_XVSSUB_DU },
1946    };
1947    static const LoongArchInsn shlv_vec_insn[2][4] = {
1948        { OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D },
1949        { OPC_XVSLL_B, OPC_XVSLL_H, OPC_XVSLL_W, OPC_XVSLL_D },
1950    };
1951    static const LoongArchInsn shrv_vec_insn[2][4] = {
1952        { OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D },
1953        { OPC_XVSRL_B, OPC_XVSRL_H, OPC_XVSRL_W, OPC_XVSRL_D },
1954    };
1955    static const LoongArchInsn sarv_vec_insn[2][4] = {
1956        { OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D },
1957        { OPC_XVSRA_B, OPC_XVSRA_H, OPC_XVSRA_W, OPC_XVSRA_D },
1958    };
1959    static const LoongArchInsn shli_vec_insn[2][4] = {
1960        { OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D },
1961        { OPC_XVSLLI_B, OPC_XVSLLI_H, OPC_XVSLLI_W, OPC_XVSLLI_D },
1962    };
1963    static const LoongArchInsn shri_vec_insn[2][4] = {
1964        { OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D },
1965        { OPC_XVSRLI_B, OPC_XVSRLI_H, OPC_XVSRLI_W, OPC_XVSRLI_D },
1966    };
1967    static const LoongArchInsn sari_vec_insn[2][4] = {
1968        { OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D },
1969        { OPC_XVSRAI_B, OPC_XVSRAI_H, OPC_XVSRAI_W, OPC_XVSRAI_D },
1970    };
1971    static const LoongArchInsn rotrv_vec_insn[2][4] = {
1972        { OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D },
1973        { OPC_XVROTR_B, OPC_XVROTR_H, OPC_XVROTR_W, OPC_XVROTR_D },
1974    };
1975    static const LoongArchInsn rotri_vec_insn[2][4] = {
1976        { OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D },
1977        { OPC_XVROTRI_B, OPC_XVROTRI_H, OPC_XVROTRI_W, OPC_XVROTRI_D },
1978    };
1979
1980    a0 = args[0];
1981    a1 = args[1];
1982    a2 = args[2];
1983    a3 = args[3];
1984
1985    switch (opc) {
1986    case INDEX_op_st_vec:
1987        tcg_out_st(s, type, a0, a1, a2);
1988        break;
1989    case INDEX_op_ld_vec:
1990        tcg_out_ld(s, type, a0, a1, a2);
1991        break;
1992    case INDEX_op_and_vec:
1993        insn = lasx ? OPC_XVAND_V : OPC_VAND_V;
1994        goto vdvjvk;
1995    case INDEX_op_andc_vec:
1996        /*
1997         * vandn vd, vj, vk: vd = vk & ~vj
1998         * andc_vec vd, vj, vk: vd = vj & ~vk
1999         * vj and vk are swapped
2000         */
2001        a1 = a2;
2002        a2 = args[1];
2003        insn = lasx ? OPC_XVANDN_V : OPC_VANDN_V;
2004        goto vdvjvk;
2005    case INDEX_op_or_vec:
2006        insn = lasx ? OPC_XVOR_V : OPC_VOR_V;
2007        goto vdvjvk;
2008    case INDEX_op_orc_vec:
2009        insn = lasx ? OPC_XVORN_V : OPC_VORN_V;
2010        goto vdvjvk;
2011    case INDEX_op_xor_vec:
2012        insn = lasx ? OPC_XVXOR_V : OPC_VXOR_V;
2013        goto vdvjvk;
2014    case INDEX_op_not_vec:
2015        a2 = a1;
2016        /* fall through */
2017    case INDEX_op_nor_vec:
2018        insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V;
2019        goto vdvjvk;
2020    case INDEX_op_cmp_vec:
2021        {
2022            TCGCond cond = args[3];
2023
2024            if (const_args[2]) {
2025                /*
2026                 * cmp_vec dest, src, value
2027                 * Try vseqi/vslei/vslti
2028                 */
2029                int64_t value = sextract64(a2, 0, 8 << vece);
2030                if ((cond == TCG_COND_EQ ||
2031                     cond == TCG_COND_LE ||
2032                     cond == TCG_COND_LT) &&
2033                    (-0x10 <= value && value <= 0x0f)) {
2034                    insn = cmp_vec_imm_insn[cond][lasx][vece];
2035                    tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
2036                    break;
2037                } else if ((cond == TCG_COND_LEU ||
2038                            cond == TCG_COND_LTU) &&
2039                           (0x00 <= value && value <= 0x1f)) {
2040                    insn = cmp_vec_imm_insn[cond][lasx][vece];
2041                    tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
2042                    break;
2043                }
2044
2045                /*
2046                 * Fallback to:
2047                 * dupi_vec temp, a2
2048                 * cmp_vec a0, a1, temp, cond
2049                 */
2050                tcg_out_dupi_vec(s, type, vece, TCG_VEC_TMP0, a2);
2051                a2 = TCG_VEC_TMP0;
2052            }
2053
2054            insn = cmp_vec_insn[cond][lasx][vece];
2055            if (insn == 0) {
2056                TCGArg t;
2057                t = a1, a1 = a2, a2 = t;
2058                cond = tcg_swap_cond(cond);
2059                insn = cmp_vec_insn[cond][lasx][vece];
2060                tcg_debug_assert(insn != 0);
2061            }
2062        }
2063        goto vdvjvk;
2064    case INDEX_op_add_vec:
2065        tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], true);
2066        break;
2067    case INDEX_op_sub_vec:
2068        tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], false);
2069        break;
2070    case INDEX_op_neg_vec:
2071        tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], a0, a1));
2072        break;
2073    case INDEX_op_mul_vec:
2074        insn = mul_vec_insn[lasx][vece];
2075        goto vdvjvk;
2076    case INDEX_op_smin_vec:
2077        insn = smin_vec_insn[lasx][vece];
2078        goto vdvjvk;
2079    case INDEX_op_smax_vec:
2080        insn = smax_vec_insn[lasx][vece];
2081        goto vdvjvk;
2082    case INDEX_op_umin_vec:
2083        insn = umin_vec_insn[lasx][vece];
2084        goto vdvjvk;
2085    case INDEX_op_umax_vec:
2086        insn = umax_vec_insn[lasx][vece];
2087        goto vdvjvk;
2088    case INDEX_op_ssadd_vec:
2089        insn = ssadd_vec_insn[lasx][vece];
2090        goto vdvjvk;
2091    case INDEX_op_usadd_vec:
2092        insn = usadd_vec_insn[lasx][vece];
2093        goto vdvjvk;
2094    case INDEX_op_sssub_vec:
2095        insn = sssub_vec_insn[lasx][vece];
2096        goto vdvjvk;
2097    case INDEX_op_ussub_vec:
2098        insn = ussub_vec_insn[lasx][vece];
2099        goto vdvjvk;
2100    case INDEX_op_shlv_vec:
2101        insn = shlv_vec_insn[lasx][vece];
2102        goto vdvjvk;
2103    case INDEX_op_shrv_vec:
2104        insn = shrv_vec_insn[lasx][vece];
2105        goto vdvjvk;
2106    case INDEX_op_sarv_vec:
2107        insn = sarv_vec_insn[lasx][vece];
2108        goto vdvjvk;
2109    case INDEX_op_rotlv_vec:
2110        /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
2111        tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece],
2112                                      TCG_VEC_TMP0, a2));
2113        a2 = TCG_VEC_TMP0;
2114        /* fall through */
2115    case INDEX_op_rotrv_vec:
2116        insn = rotrv_vec_insn[lasx][vece];
2117        goto vdvjvk;
2118    case INDEX_op_shli_vec:
2119        insn = shli_vec_insn[lasx][vece];
2120        goto vdvjukN;
2121    case INDEX_op_shri_vec:
2122        insn = shri_vec_insn[lasx][vece];
2123        goto vdvjukN;
2124    case INDEX_op_sari_vec:
2125        insn = sari_vec_insn[lasx][vece];
2126        goto vdvjukN;
2127    case INDEX_op_rotli_vec:
2128        /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
2129        a2 = extract32(-a2, 0, 3 + vece);
2130        insn = rotri_vec_insn[lasx][vece];
2131        goto vdvjukN;
2132    case INDEX_op_bitsel_vec:
2133        /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
2134        if (lasx) {
2135            tcg_out_opc_xvbitsel_v(s, a0, a3, a2, a1);
2136        } else {
2137            tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
2138        }
2139        break;
2140    case INDEX_op_dupm_vec:
2141        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2142        break;
2143    default:
2144        g_assert_not_reached();
2145    vdvjvk:
2146        tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
2147        break;
2148    vdvjukN:
2149        switch (vece) {
2150        case MO_8:
2151            tcg_out32(s, encode_vdvjuk3_insn(insn, a0, a1, a2));
2152            break;
2153        case MO_16:
2154            tcg_out32(s, encode_vdvjuk4_insn(insn, a0, a1, a2));
2155            break;
2156        case MO_32:
2157            tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, a2));
2158            break;
2159        case MO_64:
2160            tcg_out32(s, encode_vdvjuk6_insn(insn, a0, a1, a2));
2161            break;
2162        default:
2163            g_assert_not_reached();
2164        }
2165        break;
2166    }
2167}
2168
2169int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2170{
2171    switch (opc) {
2172    case INDEX_op_ld_vec:
2173    case INDEX_op_st_vec:
2174    case INDEX_op_dup_vec:
2175    case INDEX_op_dupm_vec:
2176    case INDEX_op_cmp_vec:
2177    case INDEX_op_add_vec:
2178    case INDEX_op_sub_vec:
2179    case INDEX_op_and_vec:
2180    case INDEX_op_andc_vec:
2181    case INDEX_op_or_vec:
2182    case INDEX_op_orc_vec:
2183    case INDEX_op_xor_vec:
2184    case INDEX_op_nor_vec:
2185    case INDEX_op_not_vec:
2186    case INDEX_op_neg_vec:
2187    case INDEX_op_mul_vec:
2188    case INDEX_op_smin_vec:
2189    case INDEX_op_smax_vec:
2190    case INDEX_op_umin_vec:
2191    case INDEX_op_umax_vec:
2192    case INDEX_op_ssadd_vec:
2193    case INDEX_op_usadd_vec:
2194    case INDEX_op_sssub_vec:
2195    case INDEX_op_ussub_vec:
2196    case INDEX_op_shlv_vec:
2197    case INDEX_op_shrv_vec:
2198    case INDEX_op_sarv_vec:
2199    case INDEX_op_bitsel_vec:
2200        return 1;
2201    default:
2202        return 0;
2203    }
2204}
2205
2206void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2207                       TCGArg a0, ...)
2208{
2209    g_assert_not_reached();
2210}
2211
2212static TCGConstraintSetIndex
2213tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
2214{
2215    switch (op) {
2216    case INDEX_op_goto_ptr:
2217        return C_O0_I1(r);
2218
2219    case INDEX_op_st8_i32:
2220    case INDEX_op_st8_i64:
2221    case INDEX_op_st16_i32:
2222    case INDEX_op_st16_i64:
2223    case INDEX_op_st32_i64:
2224    case INDEX_op_st_i32:
2225    case INDEX_op_st_i64:
2226    case INDEX_op_qemu_st_i32:
2227    case INDEX_op_qemu_st_i64:
2228        return C_O0_I2(rz, r);
2229
2230    case INDEX_op_qemu_ld_i128:
2231        return C_N2_I1(r, r, r);
2232
2233    case INDEX_op_qemu_st_i128:
2234        return C_O0_I3(r, r, r);
2235
2236    case INDEX_op_brcond_i32:
2237    case INDEX_op_brcond_i64:
2238        return C_O0_I2(rz, rz);
2239
2240    case INDEX_op_ext8s_i32:
2241    case INDEX_op_ext8s_i64:
2242    case INDEX_op_ext8u_i32:
2243    case INDEX_op_ext8u_i64:
2244    case INDEX_op_ext16s_i32:
2245    case INDEX_op_ext16s_i64:
2246    case INDEX_op_ext16u_i32:
2247    case INDEX_op_ext16u_i64:
2248    case INDEX_op_ext32s_i64:
2249    case INDEX_op_ext32u_i64:
2250    case INDEX_op_extu_i32_i64:
2251    case INDEX_op_extrl_i64_i32:
2252    case INDEX_op_extrh_i64_i32:
2253    case INDEX_op_ext_i32_i64:
2254    case INDEX_op_neg_i32:
2255    case INDEX_op_neg_i64:
2256    case INDEX_op_not_i32:
2257    case INDEX_op_not_i64:
2258    case INDEX_op_extract_i32:
2259    case INDEX_op_extract_i64:
2260    case INDEX_op_sextract_i32:
2261    case INDEX_op_sextract_i64:
2262    case INDEX_op_bswap16_i32:
2263    case INDEX_op_bswap16_i64:
2264    case INDEX_op_bswap32_i32:
2265    case INDEX_op_bswap32_i64:
2266    case INDEX_op_bswap64_i64:
2267    case INDEX_op_ld8s_i32:
2268    case INDEX_op_ld8s_i64:
2269    case INDEX_op_ld8u_i32:
2270    case INDEX_op_ld8u_i64:
2271    case INDEX_op_ld16s_i32:
2272    case INDEX_op_ld16s_i64:
2273    case INDEX_op_ld16u_i32:
2274    case INDEX_op_ld16u_i64:
2275    case INDEX_op_ld32s_i64:
2276    case INDEX_op_ld32u_i64:
2277    case INDEX_op_ld_i32:
2278    case INDEX_op_ld_i64:
2279    case INDEX_op_qemu_ld_i32:
2280    case INDEX_op_qemu_ld_i64:
2281        return C_O1_I1(r, r);
2282
2283    case INDEX_op_andc_i32:
2284    case INDEX_op_andc_i64:
2285    case INDEX_op_orc_i32:
2286    case INDEX_op_orc_i64:
2287        /*
2288         * LoongArch insns for these ops don't have reg-imm forms, but we
2289         * can express using andi/ori if ~constant satisfies
2290         * TCG_CT_CONST_U12.
2291         */
2292        return C_O1_I2(r, r, rC);
2293
2294    case INDEX_op_shl_i32:
2295    case INDEX_op_shl_i64:
2296    case INDEX_op_shr_i32:
2297    case INDEX_op_shr_i64:
2298    case INDEX_op_sar_i32:
2299    case INDEX_op_sar_i64:
2300    case INDEX_op_rotl_i32:
2301    case INDEX_op_rotl_i64:
2302    case INDEX_op_rotr_i32:
2303    case INDEX_op_rotr_i64:
2304        return C_O1_I2(r, r, ri);
2305
2306    case INDEX_op_add_i32:
2307        return C_O1_I2(r, r, ri);
2308    case INDEX_op_add_i64:
2309        return C_O1_I2(r, r, rJ);
2310
2311    case INDEX_op_and_i32:
2312    case INDEX_op_and_i64:
2313    case INDEX_op_nor_i32:
2314    case INDEX_op_nor_i64:
2315    case INDEX_op_or_i32:
2316    case INDEX_op_or_i64:
2317    case INDEX_op_xor_i32:
2318    case INDEX_op_xor_i64:
2319        /* LoongArch reg-imm bitops have their imms ZERO-extended */
2320        return C_O1_I2(r, r, rU);
2321
2322    case INDEX_op_clz_i32:
2323    case INDEX_op_clz_i64:
2324    case INDEX_op_ctz_i32:
2325    case INDEX_op_ctz_i64:
2326        return C_O1_I2(r, r, rW);
2327
2328    case INDEX_op_deposit_i32:
2329    case INDEX_op_deposit_i64:
2330        /* Must deposit into the same register as input */
2331        return C_O1_I2(r, 0, rz);
2332
2333    case INDEX_op_sub_i32:
2334    case INDEX_op_setcond_i32:
2335        return C_O1_I2(r, rz, ri);
2336    case INDEX_op_sub_i64:
2337    case INDEX_op_setcond_i64:
2338        return C_O1_I2(r, rz, rJ);
2339
2340    case INDEX_op_mul_i32:
2341    case INDEX_op_mul_i64:
2342    case INDEX_op_mulsh_i32:
2343    case INDEX_op_mulsh_i64:
2344    case INDEX_op_muluh_i32:
2345    case INDEX_op_muluh_i64:
2346    case INDEX_op_div_i32:
2347    case INDEX_op_div_i64:
2348    case INDEX_op_divu_i32:
2349    case INDEX_op_divu_i64:
2350    case INDEX_op_rem_i32:
2351    case INDEX_op_rem_i64:
2352    case INDEX_op_remu_i32:
2353    case INDEX_op_remu_i64:
2354        return C_O1_I2(r, rz, rz);
2355
2356    case INDEX_op_movcond_i32:
2357    case INDEX_op_movcond_i64:
2358        return C_O1_I4(r, rz, rJ, rz, rz);
2359
2360    case INDEX_op_ld_vec:
2361    case INDEX_op_dupm_vec:
2362    case INDEX_op_dup_vec:
2363        return C_O1_I1(w, r);
2364
2365    case INDEX_op_st_vec:
2366        return C_O0_I2(w, r);
2367
2368    case INDEX_op_cmp_vec:
2369        return C_O1_I2(w, w, wM);
2370
2371    case INDEX_op_add_vec:
2372    case INDEX_op_sub_vec:
2373        return C_O1_I2(w, w, wA);
2374
2375    case INDEX_op_and_vec:
2376    case INDEX_op_andc_vec:
2377    case INDEX_op_or_vec:
2378    case INDEX_op_orc_vec:
2379    case INDEX_op_xor_vec:
2380    case INDEX_op_nor_vec:
2381    case INDEX_op_mul_vec:
2382    case INDEX_op_smin_vec:
2383    case INDEX_op_smax_vec:
2384    case INDEX_op_umin_vec:
2385    case INDEX_op_umax_vec:
2386    case INDEX_op_ssadd_vec:
2387    case INDEX_op_usadd_vec:
2388    case INDEX_op_sssub_vec:
2389    case INDEX_op_ussub_vec:
2390    case INDEX_op_shlv_vec:
2391    case INDEX_op_shrv_vec:
2392    case INDEX_op_sarv_vec:
2393    case INDEX_op_rotrv_vec:
2394    case INDEX_op_rotlv_vec:
2395        return C_O1_I2(w, w, w);
2396
2397    case INDEX_op_not_vec:
2398    case INDEX_op_neg_vec:
2399    case INDEX_op_shli_vec:
2400    case INDEX_op_shri_vec:
2401    case INDEX_op_sari_vec:
2402    case INDEX_op_rotli_vec:
2403        return C_O1_I1(w, w);
2404
2405    case INDEX_op_bitsel_vec:
2406        return C_O1_I3(w, w, w, w);
2407
2408    default:
2409        return C_NotImplemented;
2410    }
2411}
2412
2413static const int tcg_target_callee_save_regs[] = {
2414    TCG_REG_S0,     /* used for the global env (TCG_AREG0) */
2415    TCG_REG_S1,
2416    TCG_REG_S2,
2417    TCG_REG_S3,
2418    TCG_REG_S4,
2419    TCG_REG_S5,
2420    TCG_REG_S6,
2421    TCG_REG_S7,
2422    TCG_REG_S8,
2423    TCG_REG_S9,
2424    TCG_REG_RA,     /* should be last for ABI compliance */
2425};
2426
2427/* Stack frame parameters.  */
2428#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
2429#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2430#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2431#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2432                     + TCG_TARGET_STACK_ALIGN - 1) \
2433                    & -TCG_TARGET_STACK_ALIGN)
2434#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2435
2436/* We're expecting to be able to use an immediate for frame allocation.  */
2437QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2438
2439/* Generate global QEMU prologue and epilogue code */
2440static void tcg_target_qemu_prologue(TCGContext *s)
2441{
2442    int i;
2443
2444    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2445
2446    /* TB prologue */
2447    tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2448    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2449        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2450                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2451    }
2452
2453    if (!tcg_use_softmmu && guest_base) {
2454        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2455        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2456    }
2457
2458    /* Call generated code */
2459    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2460    tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2461
2462    /* Return path for goto_ptr. Set return value to 0 */
2463    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2464    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2465
2466    /* TB epilogue */
2467    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2468    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2469        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2470                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2471    }
2472
2473    tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2474    tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
2475}
2476
2477static void tcg_out_tb_start(TCGContext *s)
2478{
2479    /* nothing to do */
2480}
2481
2482static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2483{
2484    for (int i = 0; i < count; ++i) {
2485        /* Canonical nop is andi r0,r0,0 */
2486        p[i] = OPC_ANDI;
2487    }
2488}
2489
2490static void tcg_target_init(TCGContext *s)
2491{
2492    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2493
2494    /* Server and desktop class cpus have UAL; embedded cpus do not. */
2495    if (!(hwcap & HWCAP_LOONGARCH_UAL)) {
2496        error_report("TCG: unaligned access support required; exiting");
2497        exit(EXIT_FAILURE);
2498    }
2499
2500    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2501    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
2502
2503    tcg_target_call_clobber_regs = ALL_GENERAL_REGS | ALL_VECTOR_REGS;
2504    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2505    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2506    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2507    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2508    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2509    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2510    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2511    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2512    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2513    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2514
2515    if (cpuinfo & CPUINFO_LSX) {
2516        tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2517        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2518        if (cpuinfo & CPUINFO_LASX) {
2519            tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
2520        }
2521        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
2522        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
2523        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
2524        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
2525        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
2526        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
2527        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
2528        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
2529    }
2530
2531    s->reserved_regs = 0;
2532    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2533    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2534    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2535    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2536    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2537    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2538    tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
2539    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
2540}
2541
2542typedef struct {
2543    DebugFrameHeader h;
2544    uint8_t fde_def_cfa[4];
2545    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2546} DebugFrame;
2547
2548#define ELF_HOST_MACHINE EM_LOONGARCH
2549
2550static const DebugFrame debug_frame = {
2551    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2552    .h.cie.id = -1,
2553    .h.cie.version = 1,
2554    .h.cie.code_align = 1,
2555    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2556    .h.cie.return_column = TCG_REG_RA,
2557
2558    /* Total FDE size does not include the "len" member.  */
2559    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2560
2561    .fde_def_cfa = {
2562        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ...  */
2563        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
2564        (FRAME_SIZE >> 7)
2565    },
2566    .fde_reg_ofs = {
2567        0x80 + 23, 11,                  /* DW_CFA_offset, s0, -88 */
2568        0x80 + 24, 10,                  /* DW_CFA_offset, s1, -80 */
2569        0x80 + 25, 9,                   /* DW_CFA_offset, s2, -72 */
2570        0x80 + 26, 8,                   /* DW_CFA_offset, s3, -64 */
2571        0x80 + 27, 7,                   /* DW_CFA_offset, s4, -56 */
2572        0x80 + 28, 6,                   /* DW_CFA_offset, s5, -48 */
2573        0x80 + 29, 5,                   /* DW_CFA_offset, s6, -40 */
2574        0x80 + 30, 4,                   /* DW_CFA_offset, s7, -32 */
2575        0x80 + 31, 3,                   /* DW_CFA_offset, s8, -24 */
2576        0x80 + 22, 2,                   /* DW_CFA_offset, s9, -16 */
2577        0x80 + 1 , 1,                   /* DW_CFA_offset, ra, -8 */
2578    }
2579};
2580
2581void tcg_register_jit(const void *buf, size_t buf_size)
2582{
2583    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2584}
2585