xref: /openbmc/qemu/tcg/sparc64/tcg-target.c.inc (revision a363e1e179445102d7940e92d394d6c00c126f13)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* We only support generating code for 64-bit mode.  */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
30/* Used for function call generation. */
31#define TCG_REG_CALL_STACK              TCG_REG_O6
32#define TCG_TARGET_STACK_BIAS           2047
33#define TCG_TARGET_STACK_ALIGN          16
34#define TCG_TARGET_CALL_STACK_OFFSET    (128 + 6 * 8 + TCG_TARGET_STACK_BIAS)
35#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_EXTEND
36#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_NORMAL
37#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_NORMAL
38#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
39
40#ifdef CONFIG_DEBUG_TCG
41static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
42    "%g0",
43    "%g1",
44    "%g2",
45    "%g3",
46    "%g4",
47    "%g5",
48    "%g6",
49    "%g7",
50    "%o0",
51    "%o1",
52    "%o2",
53    "%o3",
54    "%o4",
55    "%o5",
56    "%o6",
57    "%o7",
58    "%l0",
59    "%l1",
60    "%l2",
61    "%l3",
62    "%l4",
63    "%l5",
64    "%l6",
65    "%l7",
66    "%i0",
67    "%i1",
68    "%i2",
69    "%i3",
70    "%i4",
71    "%i5",
72    "%i6",
73    "%i7",
74};
75#endif
76
77#define TCG_CT_CONST_S11  0x100
78#define TCG_CT_CONST_S13  0x200
79
80#define ALL_GENERAL_REGS  MAKE_64BIT_MASK(0, 32)
81
82/* Define some temporary registers.  T3 is used for constant generation.  */
83#define TCG_REG_T1  TCG_REG_G1
84#define TCG_REG_T2  TCG_REG_G2
85#define TCG_REG_T3  TCG_REG_O7
86
87#ifndef CONFIG_SOFTMMU
88# define TCG_GUEST_BASE_REG TCG_REG_I5
89#endif
90
91#define TCG_REG_TB  TCG_REG_I1
92
93static const int tcg_target_reg_alloc_order[] = {
94    TCG_REG_L0,
95    TCG_REG_L1,
96    TCG_REG_L2,
97    TCG_REG_L3,
98    TCG_REG_L4,
99    TCG_REG_L5,
100    TCG_REG_L6,
101    TCG_REG_L7,
102
103    TCG_REG_I0,
104    TCG_REG_I1,
105    TCG_REG_I2,
106    TCG_REG_I3,
107    TCG_REG_I4,
108    TCG_REG_I5,
109
110    TCG_REG_G3,
111    TCG_REG_G4,
112    TCG_REG_G5,
113
114    TCG_REG_O0,
115    TCG_REG_O1,
116    TCG_REG_O2,
117    TCG_REG_O3,
118    TCG_REG_O4,
119    TCG_REG_O5,
120};
121
122static const int tcg_target_call_iarg_regs[6] = {
123    TCG_REG_O0,
124    TCG_REG_O1,
125    TCG_REG_O2,
126    TCG_REG_O3,
127    TCG_REG_O4,
128    TCG_REG_O5,
129};
130
131static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
132{
133    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
134    tcg_debug_assert(slot >= 0 && slot <= 3);
135    return TCG_REG_O0 + slot;
136}
137
138#define INSN_OP(x)  ((x) << 30)
139#define INSN_OP2(x) ((x) << 22)
140#define INSN_OP3(x) ((x) << 19)
141#define INSN_OPF(x) ((x) << 5)
142#define INSN_RD(x)  ((x) << 25)
143#define INSN_RS1(x) ((x) << 14)
144#define INSN_RS2(x) (x)
145#define INSN_ASI(x) ((x) << 5)
146
147#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
148#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
149#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
150#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
151#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
152#define INSN_COND(x) ((x) << 25)
153
154#define COND_N     0x0
155#define COND_E     0x1
156#define COND_LE    0x2
157#define COND_L     0x3
158#define COND_LEU   0x4
159#define COND_CS    0x5
160#define COND_NEG   0x6
161#define COND_VS    0x7
162#define COND_A     0x8
163#define COND_NE    0x9
164#define COND_G     0xa
165#define COND_GE    0xb
166#define COND_GU    0xc
167#define COND_CC    0xd
168#define COND_POS   0xe
169#define COND_VC    0xf
170#define BA         (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
171
172#define RCOND_Z    1
173#define RCOND_LEZ  2
174#define RCOND_LZ   3
175#define RCOND_NZ   5
176#define RCOND_GZ   6
177#define RCOND_GEZ  7
178
179#define MOVCC_ICC  (1 << 18)
180#define MOVCC_XCC  (1 << 18 | 1 << 12)
181
182#define BPCC_ICC   0
183#define BPCC_XCC   (2 << 20)
184#define BPCC_PT    (1 << 19)
185#define BPCC_PN    0
186#define BPCC_A     (1 << 29)
187
188#define BPR_PT     BPCC_PT
189
190#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
191#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
192#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
193#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
194#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
195#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
196#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
197#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
198#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
199#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
200#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
201#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
202#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
203#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
204#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
205#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
206#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
207#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
208#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
209#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
210#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
211#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
212
213#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
214#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
215
216#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
217#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
218#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
219
220#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
221#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
222#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
223
224#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
225#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
226#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
227#define RETURN     (INSN_OP(2) | INSN_OP3(0x39))
228#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
229#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
230#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
231#define CALL       INSN_OP(1)
232#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
233#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
234#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
235#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
236#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
237#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
238#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
239#define STB        (INSN_OP(3) | INSN_OP3(0x05))
240#define STH        (INSN_OP(3) | INSN_OP3(0x06))
241#define STW        (INSN_OP(3) | INSN_OP3(0x04))
242#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
243#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
244#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
245#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
246#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
247#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
248#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
249#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
250#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
251#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
252#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
253#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
254
255#define MEMBAR     (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
256
257#define NOP        (SETHI | INSN_RD(TCG_REG_G0) | 0)
258
259#ifndef ASI_PRIMARY_LITTLE
260#define ASI_PRIMARY_LITTLE 0x88
261#endif
262
263#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
265#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
266#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
267#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
268
269#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#ifndef use_vis3_instructions
274bool use_vis3_instructions;
275#endif
276
277static bool check_fit_i64(int64_t val, unsigned int bits)
278{
279    return val == sextract64(val, 0, bits);
280}
281
282static bool check_fit_i32(int32_t val, unsigned int bits)
283{
284    return val == sextract32(val, 0, bits);
285}
286
287#define check_fit_tl    check_fit_i64
288#define check_fit_ptr   check_fit_i64
289
290static bool patch_reloc(tcg_insn_unit *src_rw, int type,
291                        intptr_t value, intptr_t addend)
292{
293    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
294    uint32_t insn = *src_rw;
295    intptr_t pcrel;
296
297    value += addend;
298    pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
299
300    switch (type) {
301    case R_SPARC_WDISP16:
302        if (!check_fit_ptr(pcrel >> 2, 16)) {
303            return false;
304        }
305        insn &= ~INSN_OFF16(-1);
306        insn |= INSN_OFF16(pcrel);
307        break;
308    case R_SPARC_WDISP19:
309        if (!check_fit_ptr(pcrel >> 2, 19)) {
310            return false;
311        }
312        insn &= ~INSN_OFF19(-1);
313        insn |= INSN_OFF19(pcrel);
314        break;
315    case R_SPARC_13:
316        if (!check_fit_ptr(value, 13)) {
317            return false;
318        }
319        insn &= ~INSN_IMM13(-1);
320        insn |= INSN_IMM13(value);
321        break;
322    default:
323        g_assert_not_reached();
324    }
325
326    *src_rw = insn;
327    return true;
328}
329
330/* test if a constant matches the constraint */
331static bool tcg_target_const_match(int64_t val, int ct,
332                                   TCGType type, TCGCond cond, int vece)
333{
334    if (ct & TCG_CT_CONST) {
335        return 1;
336    }
337
338    if (type == TCG_TYPE_I32) {
339        val = (int32_t)val;
340    }
341
342    if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
343        return 1;
344    } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
345        return 1;
346    } else {
347        return 0;
348    }
349}
350
351static void tcg_out_nop(TCGContext *s)
352{
353    tcg_out32(s, NOP);
354}
355
356static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
357                          TCGReg rs2, int op)
358{
359    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
360}
361
362static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
363                           int32_t offset, int op)
364{
365    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
366}
367
368static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
369			   int32_t val2, int val2const, int op)
370{
371    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
372              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
373}
374
375static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
376{
377    if (ret != arg) {
378        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
379    }
380    return true;
381}
382
383static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
384{
385    if (ret != arg) {
386        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
387    } else {
388        tcg_out_nop(s);
389    }
390}
391
392static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
393{
394    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
395}
396
397/* A 13-bit constant sign-extended to 64 bits.  */
398static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
399{
400    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
401}
402
403/* A 32-bit constant sign-extended to 64 bits.  */
404static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg)
405{
406    tcg_out_sethi(s, ret, ~arg);
407    tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
408}
409
410/* A 32-bit constant zero-extended to 64 bits.  */
411static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg)
412{
413    tcg_out_sethi(s, ret, arg);
414    if (arg & 0x3ff) {
415        tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
416    }
417}
418
419static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
420                             tcg_target_long arg, bool in_prologue,
421                             TCGReg scratch)
422{
423    tcg_target_long hi, lo = (int32_t)arg;
424    tcg_target_long test, lsb;
425
426    /* A 13-bit constant sign-extended to 64-bits.  */
427    if (check_fit_tl(arg, 13)) {
428        tcg_out_movi_s13(s, ret, arg);
429        return;
430    }
431
432    /* A 32-bit constant, or 32-bit zero-extended to 64-bits.  */
433    if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
434        tcg_out_movi_u32(s, ret, arg);
435        return;
436    }
437
438    /* A 13-bit constant relative to the TB.  */
439    if (!in_prologue) {
440        test = tcg_tbrel_diff(s, (void *)arg);
441        if (check_fit_ptr(test, 13)) {
442            tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
443            return;
444        }
445    }
446
447    /* A 32-bit constant sign-extended to 64-bits.  */
448    if (arg == lo) {
449        tcg_out_movi_s32(s, ret, arg);
450        return;
451    }
452
453    /* A 32-bit constant, shifted.  */
454    lsb = ctz64(arg);
455    test = (tcg_target_long)arg >> lsb;
456    if (lsb > 10 && test == extract64(test, 0, 21)) {
457        tcg_out_sethi(s, ret, test << 10);
458        tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
459        return;
460    } else if (test == (uint32_t)test || test == (int32_t)test) {
461        tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
462        tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
463        return;
464    }
465
466    /* Use the constant pool, if possible. */
467    if (!in_prologue) {
468        new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
469                       tcg_tbrel_diff(s, NULL));
470        tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
471        return;
472    }
473
474    /* A 64-bit constant decomposed into 2 32-bit pieces.  */
475    if (check_fit_i32(lo, 13)) {
476        hi = (arg - lo) >> 32;
477        tcg_out_movi_u32(s, ret, hi);
478        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
479        tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
480    } else {
481        hi = arg >> 32;
482        tcg_out_movi_u32(s, ret, hi);
483        tcg_out_movi_u32(s, scratch, lo);
484        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
485        tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
486    }
487}
488
489static void tcg_out_movi(TCGContext *s, TCGType type,
490                         TCGReg ret, tcg_target_long arg)
491{
492    tcg_debug_assert(ret != TCG_REG_T3);
493    tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3);
494}
495
496static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
497{
498    g_assert_not_reached();
499}
500
501static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
502{
503    g_assert_not_reached();
504}
505
506static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
507{
508    tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
509}
510
511static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
512{
513    tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
514    tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
515}
516
517static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
518{
519    tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
520}
521
522static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
523{
524    tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
525}
526
527static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
528{
529    tcg_out_ext32s(s, rd, rs);
530}
531
532static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
533{
534    tcg_out_ext32u(s, rd, rs);
535}
536
537static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
538{
539    tcg_out_ext32u(s, rd, rs);
540}
541
542static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
543{
544    return false;
545}
546
547static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
548                             tcg_target_long imm)
549{
550    /* This function is only used for passing structs by reference. */
551    g_assert_not_reached();
552}
553
554static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
555                            TCGReg a2, int op)
556{
557    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
558}
559
560static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
561                         intptr_t offset, int op)
562{
563    if (check_fit_ptr(offset, 13)) {
564        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
565                  INSN_IMM13(offset));
566    } else {
567        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
568        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
569    }
570}
571
572static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
573                       TCGReg arg1, intptr_t arg2)
574{
575    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
576}
577
578static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
579                       TCGReg arg1, intptr_t arg2)
580{
581    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
582}
583
584static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
585                        TCGReg base, intptr_t ofs)
586{
587    if (val == 0) {
588        tcg_out_st(s, type, TCG_REG_G0, base, ofs);
589        return true;
590    }
591    return false;
592}
593
594static void tcg_out_sety(TCGContext *s, TCGReg rs)
595{
596    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
597}
598
599static const uint8_t tcg_cond_to_bcond[16] = {
600    [TCG_COND_EQ] = COND_E,
601    [TCG_COND_NE] = COND_NE,
602    [TCG_COND_TSTEQ] = COND_E,
603    [TCG_COND_TSTNE] = COND_NE,
604    [TCG_COND_LT] = COND_L,
605    [TCG_COND_GE] = COND_GE,
606    [TCG_COND_LE] = COND_LE,
607    [TCG_COND_GT] = COND_G,
608    [TCG_COND_LTU] = COND_CS,
609    [TCG_COND_GEU] = COND_CC,
610    [TCG_COND_LEU] = COND_LEU,
611    [TCG_COND_GTU] = COND_GU,
612};
613
614static const uint8_t tcg_cond_to_rcond[16] = {
615    [TCG_COND_EQ] = RCOND_Z,
616    [TCG_COND_NE] = RCOND_NZ,
617    [TCG_COND_LT] = RCOND_LZ,
618    [TCG_COND_GT] = RCOND_GZ,
619    [TCG_COND_LE] = RCOND_LEZ,
620    [TCG_COND_GE] = RCOND_GEZ
621};
622
623static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
624{
625    tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
626}
627
628static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
629{
630    int off19 = 0;
631
632    if (l->has_value) {
633        off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
634    } else {
635        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
636    }
637    tcg_out_bpcc0(s, scond, flags, off19);
638}
639
640static void tcg_out_cmp(TCGContext *s, TCGCond cond,
641                        TCGReg c1, int32_t c2, int c2const)
642{
643    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const,
644                   is_tst_cond(cond) ? ARITH_ANDCC : ARITH_SUBCC);
645}
646
647static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
648                               int32_t arg2, int const_arg2, TCGLabel *l)
649{
650    tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
651    tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
652    tcg_out_nop(s);
653}
654
655static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
656                          int32_t v1, int v1const)
657{
658    tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
659              | INSN_RS1(tcg_cond_to_bcond[cond])
660              | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
661}
662
663static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
664                                TCGReg c1, int32_t c2, int c2const,
665                                int32_t v1, int v1const)
666{
667    tcg_out_cmp(s, cond, c1, c2, c2const);
668    tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
669}
670
671static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
672                               int32_t arg2, int const_arg2, TCGLabel *l)
673{
674    /* For 64-bit signed comparisons vs zero, we can avoid the compare.  */
675    int rcond = tcg_cond_to_rcond[cond];
676    if (arg2 == 0 && rcond) {
677        int off16 = 0;
678
679        if (l->has_value) {
680            off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
681        } else {
682            tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
683        }
684        tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
685                  | INSN_COND(rcond) | off16);
686    } else {
687        tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
688        tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
689    }
690    tcg_out_nop(s);
691}
692
693static void tcg_out_movr(TCGContext *s, int rcond, TCGReg ret, TCGReg c1,
694                         int32_t v1, int v1const)
695{
696    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (rcond << 10)
697              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
698}
699
700static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
701                                TCGReg c1, int32_t c2, int c2const,
702                                int32_t v1, int v1const)
703{
704    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
705       Note that the immediate range is one bit smaller, so we must check
706       for that as well.  */
707    int rcond = tcg_cond_to_rcond[cond];
708    if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) {
709        tcg_out_movr(s, rcond, ret, c1, v1, v1const);
710    } else {
711        tcg_out_cmp(s, cond, c1, c2, c2const);
712        tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
713    }
714}
715
716static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
717                                TCGReg c1, int32_t c2, bool c2const, bool neg)
718{
719    /* For 32-bit comparisons, we can play games with ADDC/SUBC.  */
720    switch (cond) {
721    case TCG_COND_LTU:
722    case TCG_COND_GEU:
723        /* The result of the comparison is in the carry bit.  */
724        break;
725
726    case TCG_COND_EQ:
727    case TCG_COND_NE:
728        /* For equality, we can transform to inequality vs zero.  */
729        if (c2 != 0) {
730            tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
731            c2 = TCG_REG_T1;
732        } else {
733            c2 = c1;
734        }
735        c1 = TCG_REG_G0, c2const = 0;
736        cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
737	break;
738
739    case TCG_COND_TSTEQ:
740    case TCG_COND_TSTNE:
741        /* Transform to inequality vs zero.  */
742        tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_AND);
743        c1 = TCG_REG_G0;
744        c2 = TCG_REG_T1, c2const = 0;
745        cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
746	break;
747
748    case TCG_COND_GTU:
749    case TCG_COND_LEU:
750        /* If we don't need to load a constant into a register, we can
751           swap the operands on GTU/LEU.  There's no benefit to loading
752           the constant into a temporary register.  */
753        if (!c2const || c2 == 0) {
754            TCGReg t = c1;
755            c1 = c2;
756            c2 = t;
757            c2const = 0;
758            cond = tcg_swap_cond(cond);
759            break;
760        }
761        /* FALLTHRU */
762
763    default:
764        tcg_out_cmp(s, cond, c1, c2, c2const);
765        tcg_out_movi_s13(s, ret, 0);
766        tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
767        return;
768    }
769
770    tcg_out_cmp(s, cond, c1, c2, c2const);
771    if (cond == TCG_COND_LTU) {
772        if (neg) {
773            /* 0 - 0 - C = -C = (C ? -1 : 0) */
774            tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC);
775        } else {
776            /* 0 + 0 + C =  C = (C ? 1 : 0) */
777            tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
778        }
779    } else {
780        if (neg) {
781            /* 0 + -1 + C = C - 1 = (C ? 0 : -1) */
782            tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC);
783        } else {
784            /* 0 - -1 - C = 1 - C = (C ? 0 : 1) */
785            tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
786        }
787    }
788}
789
790static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
791                                TCGReg c1, int32_t c2, bool c2const, bool neg)
792{
793    int rcond;
794
795    if (use_vis3_instructions && !neg) {
796        switch (cond) {
797        case TCG_COND_NE:
798            if (c2 != 0) {
799                break;
800            }
801            c2 = c1, c2const = 0, c1 = TCG_REG_G0;
802            /* FALLTHRU */
803        case TCG_COND_LTU:
804            tcg_out_cmp(s, cond, c1, c2, c2const);
805            tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
806            return;
807        default:
808            break;
809        }
810    }
811
812    /* For 64-bit signed comparisons vs zero, we can avoid the compare
813       if the input does not overlap the output.  */
814    rcond = tcg_cond_to_rcond[cond];
815    if (c2 == 0 && rcond && c1 != ret) {
816        tcg_out_movi_s13(s, ret, 0);
817        tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1);
818    } else {
819        tcg_out_cmp(s, cond, c1, c2, c2const);
820        tcg_out_movi_s13(s, ret, 0);
821        tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
822    }
823}
824
825static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
826                            TCGReg ret, TCGReg c1,
827                            TCGArg c2, bool c2const, bool neg)
828{
829    if (type == TCG_TYPE_I32) {
830        tcg_out_setcond_i32(s, cond, ret, c1, c2, c2const, neg);
831    } else {
832        tcg_out_setcond_i64(s, cond, ret, c1, c2, c2const, neg);
833    }
834}
835
836static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
837                         TCGReg dest, TCGReg arg1, TCGReg arg2)
838{
839    tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
840}
841
842static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
843                          TCGReg dest, TCGReg arg1, tcg_target_long arg2)
844{
845    tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
846}
847
848static const TCGOutOpSetcond outop_setcond = {
849    .base.static_constraint = C_O1_I2(r, r, rJ),
850    .out_rrr = tgen_setcond,
851    .out_rri = tgen_setcondi,
852};
853
854static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
855                            TCGReg dest, TCGReg arg1, TCGReg arg2)
856{
857    tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
858}
859
860static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
861                             TCGReg dest, TCGReg arg1, tcg_target_long arg2)
862{
863    tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
864}
865
866static const TCGOutOpSetcond outop_negsetcond = {
867    .base.static_constraint = C_O1_I2(r, r, rJ),
868    .out_rrr = tgen_negsetcond,
869    .out_rri = tgen_negsetcondi,
870};
871
872static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
873                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
874                                int32_t bh, int bhconst, int opl, int oph)
875{
876    TCGReg tmp = TCG_REG_T1;
877
878    /* Note that the low parts are fully consumed before tmp is set.  */
879    if (rl != ah && (bhconst || rl != bh)) {
880        tmp = rl;
881    }
882
883    tcg_out_arithc(s, tmp, al, bl, blconst, opl);
884    tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
885    tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
886}
887
888static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
889                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
890                                int32_t bh, int bhconst, bool is_sub)
891{
892    TCGReg tmp = TCG_REG_T1;
893
894    /* Note that the low parts are fully consumed before tmp is set.  */
895    if (rl != ah && (bhconst || rl != bh)) {
896        tmp = rl;
897    }
898
899    tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
900
901    if (use_vis3_instructions && !is_sub) {
902        /* Note that ADDXC doesn't accept immediates.  */
903        if (bhconst && bh != 0) {
904           tcg_out_movi_s13(s, TCG_REG_T2, bh);
905           bh = TCG_REG_T2;
906        }
907        tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
908    } else if (bh == TCG_REG_G0) {
909	/* If we have a zero, we can perform the operation in two insns,
910           with the arithmetic first, and a conditional move into place.  */
911	if (rh == ah) {
912            tcg_out_arithi(s, TCG_REG_T2, ah, 1,
913			   is_sub ? ARITH_SUB : ARITH_ADD);
914            tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
915	} else {
916            tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
917	    tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
918	}
919    } else {
920        /*
921         * Otherwise adjust BH as if there is carry into T2.
922         * Note that constant BH is constrained to 11 bits for the MOVCC,
923         * so the adjustment fits 12 bits.
924         */
925        if (bhconst) {
926            tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
927        } else {
928            tcg_out_arithi(s, TCG_REG_T2, bh, 1,
929                           is_sub ? ARITH_SUB : ARITH_ADD);
930        }
931        /* ... smoosh T2 back to original BH if carry is clear ... */
932        tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
933	/* ... and finally perform the arithmetic with the new operand.  */
934        tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
935    }
936
937    tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
938}
939
940static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
941                               bool in_prologue, bool tail_call)
942{
943    uintptr_t desti = (uintptr_t)dest;
944
945    tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
946                     desti & ~0xfff, in_prologue, TCG_REG_T2);
947    tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
948                   TCG_REG_T1, desti & 0xfff, JMPL);
949}
950
951static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
952                                 bool in_prologue)
953{
954    ptrdiff_t disp = tcg_pcrel_diff(s, dest);
955
956    if (disp == (int32_t)disp) {
957        tcg_out32(s, CALL | (uint32_t)disp >> 2);
958    } else {
959        tcg_out_jmpl_const(s, dest, in_prologue, false);
960    }
961}
962
963static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
964                         const TCGHelperInfo *info)
965{
966    tcg_out_call_nodelay(s, dest, false);
967    tcg_out_nop(s);
968}
969
970static void tcg_out_mb(TCGContext *s, TCGArg a0)
971{
972    /* Note that the TCG memory order constants mirror the Sparc MEMBAR.  */
973    tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
974}
975
976/* Generate global QEMU prologue and epilogue code */
977static void tcg_target_qemu_prologue(TCGContext *s)
978{
979    int tmp_buf_size, frame_size;
980
981    /*
982     * The TCG temp buffer is at the top of the frame, immediately
983     * below the frame pointer.  Use the logical (aligned) offset here;
984     * the stack bias is applied in temp_allocate_frame().
985     */
986    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
987    tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
988
989    /*
990     * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
991     * otherwise the minimal frame usable by callees.
992     */
993    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
994    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
995    frame_size += TCG_TARGET_STACK_ALIGN - 1;
996    frame_size &= -TCG_TARGET_STACK_ALIGN;
997    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
998              INSN_IMM13(-frame_size));
999
1000#ifndef CONFIG_SOFTMMU
1001    if (guest_base != 0) {
1002        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1003                         guest_base, true, TCG_REG_T1);
1004        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1005    }
1006#endif
1007
1008    /* We choose TCG_REG_TB such that no move is required.  */
1009    QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1010    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1011
1012    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1013    /* delay slot */
1014    tcg_out_nop(s);
1015
1016    /* Epilogue for goto_ptr.  */
1017    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1018    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1019    /* delay slot */
1020    tcg_out_movi_s13(s, TCG_REG_O0, 0);
1021}
1022
1023static void tcg_out_tb_start(TCGContext *s)
1024{
1025    /* nothing to do */
1026}
1027
1028static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1029{
1030    int i;
1031    for (i = 0; i < count; ++i) {
1032        p[i] = NOP;
1033    }
1034}
1035
1036static const TCGLdstHelperParam ldst_helper_param = {
1037    .ntmp = 1, .tmp = { TCG_REG_T1 }
1038};
1039
1040static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1041{
1042    MemOp opc = get_memop(lb->oi);
1043    MemOp sgn;
1044
1045    if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1046                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1047        return false;
1048    }
1049
1050    /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */
1051    sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0;
1052
1053    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1054    tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL);
1055    tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param);
1056
1057    tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1058    return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1059                       (intptr_t)lb->raddr, 0);
1060}
1061
1062static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1063{
1064    MemOp opc = get_memop(lb->oi);
1065
1066    if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1067                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1068        return false;
1069    }
1070
1071    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1072    tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL);
1073
1074    tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1075    return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1076                       (intptr_t)lb->raddr, 0);
1077}
1078
1079typedef struct {
1080    TCGReg base;
1081    TCGReg index;
1082    TCGAtomAlign aa;
1083} HostAddress;
1084
1085bool tcg_target_has_memory_bswap(MemOp memop)
1086{
1087    return true;
1088}
1089
1090/* We expect to use a 13-bit negative offset from ENV.  */
1091#define MIN_TLB_MASK_TABLE_OFS  -(1 << 12)
1092
1093/*
1094 * For system-mode, perform the TLB load and compare.
1095 * For user-mode, perform any required alignment tests.
1096 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1097 * is required and fill in @h with the host address for the fast path.
1098 */
1099static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1100                                           TCGReg addr_reg, MemOpIdx oi,
1101                                           bool is_ld)
1102{
1103    TCGType addr_type = s->addr_type;
1104    TCGLabelQemuLdst *ldst = NULL;
1105    MemOp opc = get_memop(oi);
1106    MemOp s_bits = opc & MO_SIZE;
1107    unsigned a_mask;
1108
1109    /* We don't support unaligned accesses. */
1110    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1111    h->aa.align = MAX(h->aa.align, s_bits);
1112    a_mask = (1u << h->aa.align) - 1;
1113
1114#ifdef CONFIG_SOFTMMU
1115    int mem_index = get_mmuidx(oi);
1116    int fast_off = tlb_mask_table_ofs(s, mem_index);
1117    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1118    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1119    int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1120                        : offsetof(CPUTLBEntry, addr_write);
1121    int add_off = offsetof(CPUTLBEntry, addend);
1122    int compare_mask;
1123    int cc;
1124
1125    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
1126    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off);
1127    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off);
1128
1129    /* Extract the page index, shifted into place for tlb index.  */
1130    tcg_out_arithi(s, TCG_REG_T1, addr_reg,
1131                   s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
1132    tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
1133
1134    /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2.  */
1135    tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
1136
1137    /*
1138     * Load the tlb comparator and the addend.
1139     * Always load the entire 64-bit comparator for simplicity.
1140     * We will ignore the high bits via BPCC_ICC below.
1141     */
1142    tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off);
1143    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
1144    h->base = TCG_REG_T1;
1145
1146    /* Mask out the page offset, except for the required alignment. */
1147    compare_mask = s->page_mask | a_mask;
1148    if (check_fit_tl(compare_mask, 13)) {
1149        tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
1150    } else {
1151        tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
1152        tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
1153    }
1154    tcg_out_cmp(s, TCG_COND_NE, TCG_REG_T2, TCG_REG_T3, 0);
1155
1156    ldst = new_ldst_label(s);
1157    ldst->is_ld = is_ld;
1158    ldst->oi = oi;
1159    ldst->addr_reg = addr_reg;
1160    ldst->label_ptr[0] = s->code_ptr;
1161
1162    /* bne,pn %[xi]cc, label0 */
1163    cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC;
1164    tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0);
1165#else
1166    /*
1167     * If the size equals the required alignment, we can skip the test
1168     * and allow host SIGBUS to deliver SIGBUS to the guest.
1169     * Otherwise, test for at least natural alignment and defer
1170     * everything else to the helper functions.
1171     */
1172    if (s_bits != memop_alignment_bits(opc)) {
1173        tcg_debug_assert(check_fit_tl(a_mask, 13));
1174        tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
1175
1176        ldst = new_ldst_label(s);
1177        ldst->is_ld = is_ld;
1178        ldst->oi = oi;
1179        ldst->addr_reg = addr_reg;
1180        ldst->label_ptr[0] = s->code_ptr;
1181
1182        /* bne,pn %icc, label0 */
1183        tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0);
1184    }
1185    h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0;
1186#endif
1187
1188    /* If the guest address must be zero-extended, do in the delay slot.  */
1189    if (addr_type == TCG_TYPE_I32) {
1190        tcg_out_ext32u(s, TCG_REG_T2, addr_reg);
1191        h->index = TCG_REG_T2;
1192    } else {
1193        if (ldst) {
1194            tcg_out_nop(s);
1195        }
1196        h->index = addr_reg;
1197    }
1198    return ldst;
1199}
1200
1201static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1202                            MemOpIdx oi, TCGType data_type)
1203{
1204    static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1205        [MO_UB]   = LDUB,
1206        [MO_SB]   = LDSB,
1207        [MO_UB | MO_LE] = LDUB,
1208        [MO_SB | MO_LE] = LDSB,
1209
1210        [MO_BEUW] = LDUH,
1211        [MO_BESW] = LDSH,
1212        [MO_BEUL] = LDUW,
1213        [MO_BESL] = LDSW,
1214        [MO_BEUQ] = LDX,
1215        [MO_BESQ] = LDX,
1216
1217        [MO_LEUW] = LDUH_LE,
1218        [MO_LESW] = LDSH_LE,
1219        [MO_LEUL] = LDUW_LE,
1220        [MO_LESL] = LDSW_LE,
1221        [MO_LEUQ] = LDX_LE,
1222        [MO_LESQ] = LDX_LE,
1223    };
1224
1225    TCGLabelQemuLdst *ldst;
1226    HostAddress h;
1227
1228    ldst = prepare_host_addr(s, &h, addr, oi, true);
1229
1230    tcg_out_ldst_rr(s, data, h.base, h.index,
1231                    ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
1232
1233    if (ldst) {
1234        ldst->type = data_type;
1235        ldst->datalo_reg = data;
1236        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1237    }
1238}
1239
1240static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1241                            MemOpIdx oi, TCGType data_type)
1242{
1243    static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1244        [MO_UB]   = STB,
1245
1246        [MO_BEUW] = STH,
1247        [MO_BEUL] = STW,
1248        [MO_BEUQ] = STX,
1249
1250        [MO_LEUW] = STH_LE,
1251        [MO_LEUL] = STW_LE,
1252        [MO_LEUQ] = STX_LE,
1253    };
1254
1255    TCGLabelQemuLdst *ldst;
1256    HostAddress h;
1257
1258    ldst = prepare_host_addr(s, &h, addr, oi, false);
1259
1260    tcg_out_ldst_rr(s, data, h.base, h.index,
1261                    st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
1262
1263    if (ldst) {
1264        ldst->type = data_type;
1265        ldst->datalo_reg = data;
1266        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1267    }
1268}
1269
1270static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1271{
1272    if (check_fit_ptr(a0, 13)) {
1273        tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1274        tcg_out_movi_s13(s, TCG_REG_O0, a0);
1275        return;
1276    } else {
1277        intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1278        if (check_fit_ptr(tb_diff, 13)) {
1279            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1280            /* Note that TCG_REG_TB has been unwound to O1.  */
1281            tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1282            return;
1283        }
1284    }
1285    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1286    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1287    tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1288}
1289
1290static void tcg_out_goto_tb(TCGContext *s, int which)
1291{
1292    ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1293
1294    /* Load link and indirect branch. */
1295    set_jmp_insn_offset(s, which);
1296    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1297    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1298    /* delay slot */
1299    tcg_out_nop(s);
1300    set_jmp_reset_offset(s, which);
1301
1302    /*
1303     * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1304     * to the beginning of this TB.
1305     */
1306    off = -tcg_current_code_size(s);
1307    if (check_fit_i32(off, 13)) {
1308        tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1309    } else {
1310        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1311        tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1312    }
1313}
1314
1315void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1316                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1317{
1318}
1319
1320
1321static void tgen_add(TCGContext *s, TCGType type,
1322                     TCGReg a0, TCGReg a1, TCGReg a2)
1323{
1324    tcg_out_arith(s, a0, a1, a2, ARITH_ADD);
1325}
1326
1327static void tgen_addi(TCGContext *s, TCGType type,
1328                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1329{
1330    tcg_out_arithi(s, a0, a1, a2, ARITH_ADD);
1331}
1332
1333static const TCGOutOpBinary outop_add = {
1334    .base.static_constraint = C_O1_I2(r, r, rJ),
1335    .out_rrr = tgen_add,
1336    .out_rri = tgen_addi,
1337};
1338
1339static void tgen_and(TCGContext *s, TCGType type,
1340                     TCGReg a0, TCGReg a1, TCGReg a2)
1341{
1342    tcg_out_arith(s, a0, a1, a2, ARITH_AND);
1343}
1344
1345static void tgen_andi(TCGContext *s, TCGType type,
1346                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1347{
1348    tcg_out_arithi(s, a0, a1, a2, ARITH_AND);
1349}
1350
1351static const TCGOutOpBinary outop_and = {
1352    .base.static_constraint = C_O1_I2(r, r, rJ),
1353    .out_rrr = tgen_and,
1354    .out_rri = tgen_andi,
1355};
1356
1357static void tgen_andc(TCGContext *s, TCGType type,
1358                      TCGReg a0, TCGReg a1, TCGReg a2)
1359{
1360    tcg_out_arith(s, a0, a1, a2, ARITH_ANDN);
1361}
1362
1363static const TCGOutOpBinary outop_andc = {
1364    .base.static_constraint = C_O1_I2(r, r, r),
1365    .out_rrr = tgen_andc,
1366};
1367
1368static const TCGOutOpBinary outop_clz = {
1369    .base.static_constraint = C_NotImplemented,
1370};
1371
1372static const TCGOutOpUnary outop_ctpop = {
1373    .base.static_constraint = C_NotImplemented,
1374};
1375
1376static const TCGOutOpBinary outop_ctz = {
1377    .base.static_constraint = C_NotImplemented,
1378};
1379
1380static void tgen_divs_rJ(TCGContext *s, TCGType type,
1381                         TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
1382{
1383    uint32_t insn;
1384
1385    if (type == TCG_TYPE_I32) {
1386        /* Load Y with the sign extension of a1 to 64-bits.  */
1387        tcg_out_arithi(s, TCG_REG_T1, a1, 31, SHIFT_SRA);
1388        tcg_out_sety(s, TCG_REG_T1);
1389        insn = ARITH_SDIV;
1390    } else {
1391        insn = ARITH_SDIVX;
1392    }
1393    tcg_out_arithc(s, a0, a1, a2, c2, insn);
1394}
1395
1396static void tgen_divs(TCGContext *s, TCGType type,
1397                      TCGReg a0, TCGReg a1, TCGReg a2)
1398{
1399    tgen_divs_rJ(s, type, a0, a1, a2, false);
1400}
1401
1402static void tgen_divsi(TCGContext *s, TCGType type,
1403                       TCGReg a0, TCGReg a1, tcg_target_long a2)
1404{
1405    tgen_divs_rJ(s, type, a0, a1, a2, true);
1406}
1407
1408static const TCGOutOpBinary outop_divs = {
1409    .base.static_constraint = C_O1_I2(r, r, rJ),
1410    .out_rrr = tgen_divs,
1411    .out_rri = tgen_divsi,
1412};
1413
1414static const TCGOutOpDivRem outop_divs2 = {
1415    .base.static_constraint = C_NotImplemented,
1416};
1417
1418static void tgen_divu_rJ(TCGContext *s, TCGType type,
1419                         TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
1420{
1421    uint32_t insn;
1422
1423    if (type == TCG_TYPE_I32) {
1424        /* Load Y with the zero extension to 64-bits.  */
1425        tcg_out_sety(s, TCG_REG_G0);
1426        insn = ARITH_UDIV;
1427    } else {
1428        insn = ARITH_UDIVX;
1429    }
1430    tcg_out_arithc(s, a0, a1, a2, c2, insn);
1431}
1432
1433static void tgen_divu(TCGContext *s, TCGType type,
1434                      TCGReg a0, TCGReg a1, TCGReg a2)
1435{
1436    tgen_divu_rJ(s, type, a0, a1, a2, false);
1437}
1438
1439static void tgen_divui(TCGContext *s, TCGType type,
1440                       TCGReg a0, TCGReg a1, tcg_target_long a2)
1441{
1442    tgen_divu_rJ(s, type, a0, a1, a2, true);
1443}
1444
1445static const TCGOutOpBinary outop_divu = {
1446    .base.static_constraint = C_O1_I2(r, r, rJ),
1447    .out_rrr = tgen_divu,
1448    .out_rri = tgen_divui,
1449};
1450
1451static const TCGOutOpDivRem outop_divu2 = {
1452    .base.static_constraint = C_NotImplemented,
1453};
1454
1455static const TCGOutOpBinary outop_eqv = {
1456    .base.static_constraint = C_NotImplemented,
1457};
1458
1459static void tgen_mul(TCGContext *s, TCGType type,
1460                     TCGReg a0, TCGReg a1, TCGReg a2)
1461{
1462    uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
1463    tcg_out_arith(s, a0, a1, a2, insn);
1464}
1465
1466static void tgen_muli(TCGContext *s, TCGType type,
1467                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1468{
1469    uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
1470    tcg_out_arithi(s, a0, a1, a2, insn);
1471}
1472
1473static const TCGOutOpBinary outop_mul = {
1474    .base.static_constraint = C_O1_I2(r, r, rJ),
1475    .out_rrr = tgen_mul,
1476    .out_rri = tgen_muli,
1477};
1478
1479/*
1480 * The 32-bit multiply insns produce a full 64-bit result.
1481 * Supporting 32-bit mul[us]2 opcodes avoids sign/zero-extensions
1482 * before the actual multiply; we only need extract the high part
1483 * into the separate operand.
1484 */
1485static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
1486{
1487    return type == TCG_TYPE_I32 ? C_O2_I2(r, r, r, r) : C_NotImplemented;
1488}
1489
1490static void tgen_muls2(TCGContext *s, TCGType type,
1491                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
1492{
1493    tcg_out_arith(s, a0, a2, a3, ARITH_SMUL);
1494    tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1495}
1496
1497static const TCGOutOpMul2 outop_muls2 = {
1498    .base.static_constraint = C_Dynamic,
1499    .base.dynamic_constraint = cset_mul2,
1500    .out_rrrr = tgen_muls2,
1501};
1502
1503static const TCGOutOpBinary outop_mulsh = {
1504    .base.static_constraint = C_NotImplemented,
1505};
1506
1507static void tgen_mulu2(TCGContext *s, TCGType type,
1508                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
1509{
1510    tcg_out_arith(s, a0, a2, a3, ARITH_UMUL);
1511    tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1512}
1513
1514static const TCGOutOpMul2 outop_mulu2 = {
1515    .base.static_constraint = C_Dynamic,
1516    .base.dynamic_constraint = cset_mul2,
1517    .out_rrrr = tgen_mulu2,
1518};
1519
1520static void tgen_muluh(TCGContext *s, TCGType type,
1521                       TCGReg a0, TCGReg a1, TCGReg a2)
1522{
1523    tcg_out_arith(s, a0, a1, a2, ARITH_UMULXHI);
1524}
1525
1526static TCGConstraintSetIndex cset_muluh(TCGType type, unsigned flags)
1527{
1528    return (type == TCG_TYPE_I64 && use_vis3_instructions
1529            ? C_O1_I2(r, r, r) : C_NotImplemented);
1530}
1531
1532static const TCGOutOpBinary outop_muluh = {
1533    .base.static_constraint = C_Dynamic,
1534    .base.dynamic_constraint = cset_muluh,
1535    .out_rrr = tgen_muluh,
1536};
1537
1538static const TCGOutOpBinary outop_nand = {
1539    .base.static_constraint = C_NotImplemented,
1540};
1541
1542static const TCGOutOpBinary outop_nor = {
1543    .base.static_constraint = C_NotImplemented,
1544};
1545
1546static void tgen_or(TCGContext *s, TCGType type,
1547                     TCGReg a0, TCGReg a1, TCGReg a2)
1548{
1549    tcg_out_arith(s, a0, a1, a2, ARITH_OR);
1550}
1551
1552static void tgen_ori(TCGContext *s, TCGType type,
1553                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1554{
1555    tcg_out_arithi(s, a0, a1, a2, ARITH_OR);
1556}
1557
1558static const TCGOutOpBinary outop_or = {
1559    .base.static_constraint = C_O1_I2(r, r, rJ),
1560    .out_rrr = tgen_or,
1561    .out_rri = tgen_ori,
1562};
1563
1564static void tgen_orc(TCGContext *s, TCGType type,
1565                     TCGReg a0, TCGReg a1, TCGReg a2)
1566{
1567    tcg_out_arith(s, a0, a1, a2, ARITH_ORN);
1568}
1569
1570static const TCGOutOpBinary outop_orc = {
1571    .base.static_constraint = C_O1_I2(r, r, r),
1572    .out_rrr = tgen_orc,
1573};
1574
1575static const TCGOutOpBinary outop_rems = {
1576    .base.static_constraint = C_NotImplemented,
1577};
1578
1579static const TCGOutOpBinary outop_remu = {
1580    .base.static_constraint = C_NotImplemented,
1581};
1582
1583static const TCGOutOpBinary outop_rotl = {
1584    .base.static_constraint = C_NotImplemented,
1585};
1586
1587static const TCGOutOpBinary outop_rotr = {
1588    .base.static_constraint = C_NotImplemented,
1589};
1590
1591static void tgen_sar(TCGContext *s, TCGType type,
1592                     TCGReg a0, TCGReg a1, TCGReg a2)
1593{
1594    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
1595    tcg_out_arith(s, a0, a1, a2, insn);
1596}
1597
1598static void tgen_sari(TCGContext *s, TCGType type,
1599                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1600{
1601    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
1602    uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
1603    tcg_out_arithi(s, a0, a1, a2 & mask, insn);
1604}
1605
1606static const TCGOutOpBinary outop_sar = {
1607    .base.static_constraint = C_O1_I2(r, r, rJ),
1608    .out_rrr = tgen_sar,
1609    .out_rri = tgen_sari,
1610};
1611
1612static void tgen_shl(TCGContext *s, TCGType type,
1613                     TCGReg a0, TCGReg a1, TCGReg a2)
1614{
1615    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
1616    tcg_out_arith(s, a0, a1, a2, insn);
1617}
1618
1619static void tgen_shli(TCGContext *s, TCGType type,
1620                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1621{
1622    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
1623    uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
1624    tcg_out_arithi(s, a0, a1, a2 & mask, insn);
1625}
1626
1627static const TCGOutOpBinary outop_shl = {
1628    .base.static_constraint = C_O1_I2(r, r, rJ),
1629    .out_rrr = tgen_shl,
1630    .out_rri = tgen_shli,
1631};
1632
1633static void tgen_shr(TCGContext *s, TCGType type,
1634                     TCGReg a0, TCGReg a1, TCGReg a2)
1635{
1636    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
1637    tcg_out_arith(s, a0, a1, a2, insn);
1638}
1639
1640static void tgen_shri(TCGContext *s, TCGType type,
1641                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1642{
1643    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
1644    uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
1645    tcg_out_arithi(s, a0, a1, a2 & mask, insn);
1646}
1647
1648static const TCGOutOpBinary outop_shr = {
1649    .base.static_constraint = C_O1_I2(r, r, rJ),
1650    .out_rrr = tgen_shr,
1651    .out_rri = tgen_shri,
1652};
1653
1654static void tgen_sub(TCGContext *s, TCGType type,
1655                     TCGReg a0, TCGReg a1, TCGReg a2)
1656{
1657    tcg_out_arith(s, a0, a1, a2, ARITH_SUB);
1658}
1659
1660static const TCGOutOpSubtract outop_sub = {
1661    .base.static_constraint = C_O1_I2(r, r, r),
1662    .out_rrr = tgen_sub,
1663};
1664
1665static void tgen_xor(TCGContext *s, TCGType type,
1666                     TCGReg a0, TCGReg a1, TCGReg a2)
1667{
1668    tcg_out_arith(s, a0, a1, a2, ARITH_XOR);
1669}
1670
1671static void tgen_xori(TCGContext *s, TCGType type,
1672                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1673{
1674    tcg_out_arithi(s, a0, a1, a2, ARITH_XOR);
1675}
1676
1677static const TCGOutOpBinary outop_xor = {
1678    .base.static_constraint = C_O1_I2(r, r, rJ),
1679    .out_rrr = tgen_xor,
1680    .out_rri = tgen_xori,
1681};
1682
1683static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
1684{
1685     tgen_sub(s, type, a0, TCG_REG_G0, a1);
1686}
1687
1688static const TCGOutOpUnary outop_neg = {
1689    .base.static_constraint = C_O1_I1(r, r),
1690    .out_rr = tgen_neg,
1691};
1692
1693static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
1694{
1695     tgen_orc(s, type, a0, TCG_REG_G0, a1);
1696}
1697
1698static const TCGOutOpUnary outop_not = {
1699    .base.static_constraint = C_O1_I1(r, r),
1700    .out_rr = tgen_not,
1701};
1702
1703
1704static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1705                       const TCGArg args[TCG_MAX_OP_ARGS],
1706                       const int const_args[TCG_MAX_OP_ARGS])
1707{
1708    TCGArg a0, a1, a2;
1709    int c2;
1710
1711    /* Hoist the loads of the most common arguments.  */
1712    a0 = args[0];
1713    a1 = args[1];
1714    a2 = args[2];
1715    c2 = const_args[2];
1716
1717    switch (opc) {
1718    case INDEX_op_goto_ptr:
1719        tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1720        tcg_out_mov_delay(s, TCG_REG_TB, a0);
1721        break;
1722    case INDEX_op_br:
1723        tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1724        tcg_out_nop(s);
1725        break;
1726
1727#define OP_32_64(x)                             \
1728        glue(glue(case INDEX_op_, x), _i32):    \
1729        glue(glue(case INDEX_op_, x), _i64)
1730
1731    OP_32_64(ld8u):
1732        tcg_out_ldst(s, a0, a1, a2, LDUB);
1733        break;
1734    OP_32_64(ld8s):
1735        tcg_out_ldst(s, a0, a1, a2, LDSB);
1736        break;
1737    OP_32_64(ld16u):
1738        tcg_out_ldst(s, a0, a1, a2, LDUH);
1739        break;
1740    OP_32_64(ld16s):
1741        tcg_out_ldst(s, a0, a1, a2, LDSH);
1742        break;
1743    case INDEX_op_ld_i32:
1744    case INDEX_op_ld32u_i64:
1745        tcg_out_ldst(s, a0, a1, a2, LDUW);
1746        break;
1747    OP_32_64(st8):
1748        tcg_out_ldst(s, a0, a1, a2, STB);
1749        break;
1750    OP_32_64(st16):
1751        tcg_out_ldst(s, a0, a1, a2, STH);
1752        break;
1753    case INDEX_op_st_i32:
1754    case INDEX_op_st32_i64:
1755        tcg_out_ldst(s, a0, a1, a2, STW);
1756        break;
1757
1758    case INDEX_op_brcond_i32:
1759        tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1760        break;
1761    case INDEX_op_movcond_i32:
1762        tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1763        break;
1764
1765    case INDEX_op_add2_i32:
1766        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1767                            args[4], const_args[4], args[5], const_args[5],
1768                            ARITH_ADDCC, ARITH_ADDC);
1769        break;
1770    case INDEX_op_sub2_i32:
1771        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1772                            args[4], const_args[4], args[5], const_args[5],
1773                            ARITH_SUBCC, ARITH_SUBC);
1774        break;
1775
1776    case INDEX_op_qemu_ld_i32:
1777        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1778        break;
1779    case INDEX_op_qemu_ld_i64:
1780        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1781        break;
1782    case INDEX_op_qemu_st_i32:
1783        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1784        break;
1785    case INDEX_op_qemu_st_i64:
1786        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1787        break;
1788
1789    case INDEX_op_ld32s_i64:
1790        tcg_out_ldst(s, a0, a1, a2, LDSW);
1791        break;
1792    case INDEX_op_ld_i64:
1793        tcg_out_ldst(s, a0, a1, a2, LDX);
1794        break;
1795    case INDEX_op_st_i64:
1796        tcg_out_ldst(s, a0, a1, a2, STX);
1797        break;
1798
1799    case INDEX_op_brcond_i64:
1800        tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1801        break;
1802    case INDEX_op_movcond_i64:
1803        tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1804        break;
1805    case INDEX_op_add2_i64:
1806        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1807                            const_args[4], args[5], const_args[5], false);
1808        break;
1809    case INDEX_op_sub2_i64:
1810        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1811                            const_args[4], args[5], const_args[5], true);
1812        break;
1813
1814    case INDEX_op_mb:
1815        tcg_out_mb(s, a0);
1816        break;
1817
1818    case INDEX_op_extract_i64:
1819        tcg_debug_assert(a2 + args[3] == 32);
1820        tcg_out_arithi(s, a0, a1, a2, SHIFT_SRL);
1821        break;
1822    case INDEX_op_sextract_i64:
1823        tcg_debug_assert(a2 + args[3] == 32);
1824        tcg_out_arithi(s, a0, a1, a2, SHIFT_SRA);
1825        break;
1826
1827    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1828    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1829    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1830    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
1831    case INDEX_op_extu_i32_i64:
1832    default:
1833        g_assert_not_reached();
1834    }
1835}
1836
1837static TCGConstraintSetIndex
1838tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
1839{
1840    switch (op) {
1841    case INDEX_op_goto_ptr:
1842        return C_O0_I1(r);
1843
1844    case INDEX_op_ld8u_i32:
1845    case INDEX_op_ld8u_i64:
1846    case INDEX_op_ld8s_i32:
1847    case INDEX_op_ld8s_i64:
1848    case INDEX_op_ld16u_i32:
1849    case INDEX_op_ld16u_i64:
1850    case INDEX_op_ld16s_i32:
1851    case INDEX_op_ld16s_i64:
1852    case INDEX_op_ld_i32:
1853    case INDEX_op_ld32u_i64:
1854    case INDEX_op_ld32s_i64:
1855    case INDEX_op_ld_i64:
1856    case INDEX_op_ext_i32_i64:
1857    case INDEX_op_extu_i32_i64:
1858    case INDEX_op_extract_i64:
1859    case INDEX_op_sextract_i64:
1860    case INDEX_op_qemu_ld_i32:
1861    case INDEX_op_qemu_ld_i64:
1862        return C_O1_I1(r, r);
1863
1864    case INDEX_op_st8_i32:
1865    case INDEX_op_st8_i64:
1866    case INDEX_op_st16_i32:
1867    case INDEX_op_st16_i64:
1868    case INDEX_op_st_i32:
1869    case INDEX_op_st32_i64:
1870    case INDEX_op_st_i64:
1871    case INDEX_op_qemu_st_i32:
1872    case INDEX_op_qemu_st_i64:
1873        return C_O0_I2(rz, r);
1874
1875    case INDEX_op_brcond_i32:
1876    case INDEX_op_brcond_i64:
1877        return C_O0_I2(rz, rJ);
1878    case INDEX_op_movcond_i32:
1879    case INDEX_op_movcond_i64:
1880        return C_O1_I4(r, rz, rJ, rI, 0);
1881    case INDEX_op_add2_i32:
1882    case INDEX_op_add2_i64:
1883    case INDEX_op_sub2_i32:
1884    case INDEX_op_sub2_i64:
1885        return C_O2_I4(r, r, rz, rz, rJ, rJ);
1886
1887    default:
1888        return C_NotImplemented;
1889    }
1890}
1891
1892static void tcg_target_init(TCGContext *s)
1893{
1894    /*
1895     * Only probe for the platform and capabilities if we haven't already
1896     * determined maximum values at compile time.
1897     */
1898#ifndef use_vis3_instructions
1899    {
1900        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1901        use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1902    }
1903#endif
1904
1905    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1906    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1907
1908    tcg_target_call_clobber_regs = 0;
1909    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1910    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1911    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1912    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1913    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1914    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1915    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1916    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1917    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1918    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1919    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1920    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1921    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1922    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1923    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1924
1925    s->reserved_regs = 0;
1926    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1927    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1928    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1929    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1930    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1931    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1932    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1933    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1934    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */
1935}
1936
1937#define ELF_HOST_MACHINE  EM_SPARCV9
1938
1939typedef struct {
1940    DebugFrameHeader h;
1941    uint8_t fde_def_cfa[4];
1942    uint8_t fde_win_save;
1943    uint8_t fde_ret_save[3];
1944} DebugFrame;
1945
1946static const DebugFrame debug_frame = {
1947    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1948    .h.cie.id = -1,
1949    .h.cie.version = 1,
1950    .h.cie.code_align = 1,
1951    .h.cie.data_align = -sizeof(void *) & 0x7f,
1952    .h.cie.return_column = 15,            /* o7 */
1953
1954    /* Total FDE size does not include the "len" member.  */
1955    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1956
1957    .fde_def_cfa = {
1958        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
1959        (2047 & 0x7f) | 0x80, (2047 >> 7)
1960    },
1961    .fde_win_save = 0x2d,               /* DW_CFA_GNU_window_save */
1962    .fde_ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
1963};
1964
1965void tcg_register_jit(const void *buf, size_t buf_size)
1966{
1967    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1968}
1969