xref: /openbmc/qemu/tcg/sparc64/tcg-target.c.inc (revision f2b1708e8080ab1beb0a2bf52a79a51e8de335cb)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* We only support generating code for 64-bit mode.  */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
30/* Used for function call generation. */
31#define TCG_REG_CALL_STACK              TCG_REG_O6
32#define TCG_TARGET_STACK_BIAS           2047
33#define TCG_TARGET_STACK_ALIGN          16
34#define TCG_TARGET_CALL_STACK_OFFSET    (128 + 6 * 8 + TCG_TARGET_STACK_BIAS)
35#define TCG_TARGET_CALL_ARG_I32         TCG_CALL_ARG_EXTEND
36#define TCG_TARGET_CALL_ARG_I64         TCG_CALL_ARG_NORMAL
37#define TCG_TARGET_CALL_ARG_I128        TCG_CALL_ARG_NORMAL
38#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
39
40#ifdef CONFIG_DEBUG_TCG
41static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
42    "%g0",
43    "%g1",
44    "%g2",
45    "%g3",
46    "%g4",
47    "%g5",
48    "%g6",
49    "%g7",
50    "%o0",
51    "%o1",
52    "%o2",
53    "%o3",
54    "%o4",
55    "%o5",
56    "%o6",
57    "%o7",
58    "%l0",
59    "%l1",
60    "%l2",
61    "%l3",
62    "%l4",
63    "%l5",
64    "%l6",
65    "%l7",
66    "%i0",
67    "%i1",
68    "%i2",
69    "%i3",
70    "%i4",
71    "%i5",
72    "%i6",
73    "%i7",
74};
75#endif
76
77#define TCG_CT_CONST_S11  0x100
78#define TCG_CT_CONST_S13  0x200
79
80#define ALL_GENERAL_REGS  MAKE_64BIT_MASK(0, 32)
81
82/* Define some temporary registers.  T3 is used for constant generation.  */
83#define TCG_REG_T1  TCG_REG_G1
84#define TCG_REG_T2  TCG_REG_G2
85#define TCG_REG_T3  TCG_REG_O7
86
87#ifndef CONFIG_SOFTMMU
88# define TCG_GUEST_BASE_REG TCG_REG_I5
89#endif
90
91#define TCG_REG_TB  TCG_REG_I1
92
93static const int tcg_target_reg_alloc_order[] = {
94    TCG_REG_L0,
95    TCG_REG_L1,
96    TCG_REG_L2,
97    TCG_REG_L3,
98    TCG_REG_L4,
99    TCG_REG_L5,
100    TCG_REG_L6,
101    TCG_REG_L7,
102
103    TCG_REG_I0,
104    TCG_REG_I1,
105    TCG_REG_I2,
106    TCG_REG_I3,
107    TCG_REG_I4,
108    TCG_REG_I5,
109
110    TCG_REG_G3,
111    TCG_REG_G4,
112    TCG_REG_G5,
113
114    TCG_REG_O0,
115    TCG_REG_O1,
116    TCG_REG_O2,
117    TCG_REG_O3,
118    TCG_REG_O4,
119    TCG_REG_O5,
120};
121
122static const int tcg_target_call_iarg_regs[6] = {
123    TCG_REG_O0,
124    TCG_REG_O1,
125    TCG_REG_O2,
126    TCG_REG_O3,
127    TCG_REG_O4,
128    TCG_REG_O5,
129};
130
131static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
132{
133    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
134    tcg_debug_assert(slot >= 0 && slot <= 3);
135    return TCG_REG_O0 + slot;
136}
137
138#define INSN_OP(x)  ((x) << 30)
139#define INSN_OP2(x) ((x) << 22)
140#define INSN_OP3(x) ((x) << 19)
141#define INSN_OPF(x) ((x) << 5)
142#define INSN_RD(x)  ((x) << 25)
143#define INSN_RS1(x) ((x) << 14)
144#define INSN_RS2(x) (x)
145#define INSN_ASI(x) ((x) << 5)
146
147#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
148#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
149#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
150#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
151#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
152#define INSN_COND(x) ((x) << 25)
153
154#define COND_N     0x0
155#define COND_E     0x1
156#define COND_LE    0x2
157#define COND_L     0x3
158#define COND_LEU   0x4
159#define COND_CS    0x5
160#define COND_NEG   0x6
161#define COND_VS    0x7
162#define COND_A     0x8
163#define COND_NE    0x9
164#define COND_G     0xa
165#define COND_GE    0xb
166#define COND_GU    0xc
167#define COND_CC    0xd
168#define COND_POS   0xe
169#define COND_VC    0xf
170#define BA         (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
171
172#define RCOND_Z    1
173#define RCOND_LEZ  2
174#define RCOND_LZ   3
175#define RCOND_NZ   5
176#define RCOND_GZ   6
177#define RCOND_GEZ  7
178
179#define MOVCC_ICC  (1 << 18)
180#define MOVCC_XCC  (1 << 18 | 1 << 12)
181
182#define BPCC_ICC   0
183#define BPCC_XCC   (2 << 20)
184#define BPCC_PT    (1 << 19)
185#define BPCC_PN    0
186#define BPCC_A     (1 << 29)
187
188#define BPR_PT     BPCC_PT
189
190#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
191#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
192#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
193#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
194#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
195#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
196#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
197#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
198#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
199#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
200#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
201#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
202#define ARITH_ADDCCC (INSN_OP(2) | INSN_OP3(0x18))
203#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
204#define ARITH_SUBCCC (INSN_OP(2) | INSN_OP3(0x1c))
205#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
206#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
207#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
208#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
209#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
210#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
211#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
212#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
213#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
214
215#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
216#define ARITH_ADDXCCC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x13))
217#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
218
219#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
220#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
221#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
222
223#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
224#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
225#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
226
227#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
228#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
229#define WRCCR      (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(2))
230#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
231#define RETURN     (INSN_OP(2) | INSN_OP3(0x39))
232#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
233#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
234#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
235#define CALL       INSN_OP(1)
236#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
237#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
238#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
239#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
240#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
241#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
242#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
243#define STB        (INSN_OP(3) | INSN_OP3(0x05))
244#define STH        (INSN_OP(3) | INSN_OP3(0x06))
245#define STW        (INSN_OP(3) | INSN_OP3(0x04))
246#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
247#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
248#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
249#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
250#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
251#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
252#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
253#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
254#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
255#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
256#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
257#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
258
259#define MEMBAR     (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
260
261#define NOP        (SETHI | INSN_RD(TCG_REG_G0) | 0)
262
263#ifndef ASI_PRIMARY_LITTLE
264#define ASI_PRIMARY_LITTLE 0x88
265#endif
266
267#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
274#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
276
277#ifndef use_vis3_instructions
278bool use_vis3_instructions;
279#endif
280
281static bool check_fit_i64(int64_t val, unsigned int bits)
282{
283    return val == sextract64(val, 0, bits);
284}
285
286static bool check_fit_i32(int32_t val, unsigned int bits)
287{
288    return val == sextract32(val, 0, bits);
289}
290
291#define check_fit_tl    check_fit_i64
292#define check_fit_ptr   check_fit_i64
293
294static bool patch_reloc(tcg_insn_unit *src_rw, int type,
295                        intptr_t value, intptr_t addend)
296{
297    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298    uint32_t insn = *src_rw;
299    intptr_t pcrel;
300
301    value += addend;
302    pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
303
304    switch (type) {
305    case R_SPARC_WDISP16:
306        if (!check_fit_ptr(pcrel >> 2, 16)) {
307            return false;
308        }
309        insn &= ~INSN_OFF16(-1);
310        insn |= INSN_OFF16(pcrel);
311        break;
312    case R_SPARC_WDISP19:
313        if (!check_fit_ptr(pcrel >> 2, 19)) {
314            return false;
315        }
316        insn &= ~INSN_OFF19(-1);
317        insn |= INSN_OFF19(pcrel);
318        break;
319    case R_SPARC_13:
320        if (!check_fit_ptr(value, 13)) {
321            return false;
322        }
323        insn &= ~INSN_IMM13(-1);
324        insn |= INSN_IMM13(value);
325        break;
326    default:
327        g_assert_not_reached();
328    }
329
330    *src_rw = insn;
331    return true;
332}
333
334/* test if a constant matches the constraint */
335static bool tcg_target_const_match(int64_t val, int ct,
336                                   TCGType type, TCGCond cond, int vece)
337{
338    if (ct & TCG_CT_CONST) {
339        return 1;
340    }
341
342    if (type == TCG_TYPE_I32) {
343        val = (int32_t)val;
344    }
345
346    if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
347        return 1;
348    } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
349        return 1;
350    } else {
351        return 0;
352    }
353}
354
355static void tcg_out_nop(TCGContext *s)
356{
357    tcg_out32(s, NOP);
358}
359
360static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
361                          TCGReg rs2, int op)
362{
363    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
364}
365
366static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
367                           int32_t offset, int op)
368{
369    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
370}
371
372static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
373                           int32_t val2, int val2const, int op)
374{
375    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
376              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
377}
378
379static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
380{
381    if (ret != arg) {
382        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
383    }
384    return true;
385}
386
387static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
388{
389    if (ret != arg) {
390        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
391    } else {
392        tcg_out_nop(s);
393    }
394}
395
396static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
397{
398    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
399}
400
401/* A 13-bit constant sign-extended to 64 bits.  */
402static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
403{
404    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
405}
406
407/* A 32-bit constant sign-extended to 64 bits.  */
408static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg)
409{
410    tcg_out_sethi(s, ret, ~arg);
411    tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
412}
413
414/* A 32-bit constant zero-extended to 64 bits.  */
415static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg)
416{
417    tcg_out_sethi(s, ret, arg);
418    if (arg & 0x3ff) {
419        tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
420    }
421}
422
423static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
424                             tcg_target_long arg, bool in_prologue,
425                             TCGReg scratch)
426{
427    tcg_target_long hi, lo = (int32_t)arg;
428    tcg_target_long test, lsb;
429
430    /* A 13-bit constant sign-extended to 64-bits.  */
431    if (check_fit_tl(arg, 13)) {
432        tcg_out_movi_s13(s, ret, arg);
433        return;
434    }
435
436    /* A 32-bit constant, or 32-bit zero-extended to 64-bits.  */
437    if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
438        tcg_out_movi_u32(s, ret, arg);
439        return;
440    }
441
442    /* A 13-bit constant relative to the TB.  */
443    if (!in_prologue) {
444        test = tcg_tbrel_diff(s, (void *)arg);
445        if (check_fit_ptr(test, 13)) {
446            tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
447            return;
448        }
449    }
450
451    /* A 32-bit constant sign-extended to 64-bits.  */
452    if (arg == lo) {
453        tcg_out_movi_s32(s, ret, arg);
454        return;
455    }
456
457    /* A 32-bit constant, shifted.  */
458    lsb = ctz64(arg);
459    test = (tcg_target_long)arg >> lsb;
460    if (lsb > 10 && test == extract64(test, 0, 21)) {
461        tcg_out_sethi(s, ret, test << 10);
462        tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
463        return;
464    } else if (test == (uint32_t)test || test == (int32_t)test) {
465        tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
466        tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
467        return;
468    }
469
470    /* Use the constant pool, if possible. */
471    if (!in_prologue) {
472        new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
473                       tcg_tbrel_diff(s, NULL));
474        tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
475        return;
476    }
477
478    /* A 64-bit constant decomposed into 2 32-bit pieces.  */
479    if (check_fit_i32(lo, 13)) {
480        hi = (arg - lo) >> 32;
481        tcg_out_movi_u32(s, ret, hi);
482        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
483        tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
484    } else {
485        hi = arg >> 32;
486        tcg_out_movi_u32(s, ret, hi);
487        tcg_out_movi_u32(s, scratch, lo);
488        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
489        tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
490    }
491}
492
493static void tcg_out_movi(TCGContext *s, TCGType type,
494                         TCGReg ret, tcg_target_long arg)
495{
496    tcg_debug_assert(ret != TCG_REG_T3);
497    tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3);
498}
499
500static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
501{
502    g_assert_not_reached();
503}
504
505static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
506{
507    g_assert_not_reached();
508}
509
510static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
511{
512    tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
513}
514
515static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
516{
517    tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
518    tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
519}
520
521static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
522{
523    tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
524}
525
526static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
527{
528    tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
529}
530
531static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
532{
533    tcg_out_ext32s(s, rd, rs);
534}
535
536static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
537{
538    tcg_out_ext32u(s, rd, rs);
539}
540
541static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
542{
543    tcg_out_ext32u(s, rd, rs);
544}
545
546static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
547{
548    return false;
549}
550
551static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
552                             tcg_target_long imm)
553{
554    /* This function is only used for passing structs by reference. */
555    g_assert_not_reached();
556}
557
558static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
559                            TCGReg a2, int op)
560{
561    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
562}
563
564static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
565                         intptr_t offset, int op)
566{
567    if (check_fit_ptr(offset, 13)) {
568        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
569                  INSN_IMM13(offset));
570    } else {
571        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
572        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
573    }
574}
575
576static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
577                       TCGReg arg1, intptr_t arg2)
578{
579    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
580}
581
582static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
583                       TCGReg arg1, intptr_t arg2)
584{
585    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
586}
587
588static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
589                        TCGReg base, intptr_t ofs)
590{
591    if (val == 0) {
592        tcg_out_st(s, type, TCG_REG_G0, base, ofs);
593        return true;
594    }
595    return false;
596}
597
598static void tcg_out_sety(TCGContext *s, TCGReg rs)
599{
600    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
601}
602
603static const uint8_t tcg_cond_to_bcond[16] = {
604    [TCG_COND_EQ] = COND_E,
605    [TCG_COND_NE] = COND_NE,
606    [TCG_COND_TSTEQ] = COND_E,
607    [TCG_COND_TSTNE] = COND_NE,
608    [TCG_COND_LT] = COND_L,
609    [TCG_COND_GE] = COND_GE,
610    [TCG_COND_LE] = COND_LE,
611    [TCG_COND_GT] = COND_G,
612    [TCG_COND_LTU] = COND_CS,
613    [TCG_COND_GEU] = COND_CC,
614    [TCG_COND_LEU] = COND_LEU,
615    [TCG_COND_GTU] = COND_GU,
616};
617
618static const uint8_t tcg_cond_to_rcond[16] = {
619    [TCG_COND_EQ] = RCOND_Z,
620    [TCG_COND_NE] = RCOND_NZ,
621    [TCG_COND_LT] = RCOND_LZ,
622    [TCG_COND_GT] = RCOND_GZ,
623    [TCG_COND_LE] = RCOND_LEZ,
624    [TCG_COND_GE] = RCOND_GEZ
625};
626
627static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
628{
629    tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
630}
631
632static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
633{
634    int off19 = 0;
635
636    if (l->has_value) {
637        off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
638    } else {
639        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
640    }
641    tcg_out_bpcc0(s, scond, flags, off19);
642}
643
644static void tcg_out_cmp(TCGContext *s, TCGCond cond,
645                        TCGReg c1, int32_t c2, int c2const)
646{
647    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const,
648                   is_tst_cond(cond) ? ARITH_ANDCC : ARITH_SUBCC);
649}
650
651static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
652                               int32_t arg2, int const_arg2, TCGLabel *l)
653{
654    tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
655    tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
656    tcg_out_nop(s);
657}
658
659static void tcg_out_movcc(TCGContext *s, int scond, int cc, TCGReg ret,
660                          int32_t v1, int v1const)
661{
662    tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) | INSN_RS1(scond)
663              | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
664}
665
666static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
667                                TCGReg c1, int32_t c2, int c2const,
668                                int32_t v1, int v1const)
669{
670    tcg_out_cmp(s, cond, c1, c2, c2const);
671    tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_ICC, ret, v1, v1const);
672}
673
674static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
675                               int32_t arg2, int const_arg2, TCGLabel *l)
676{
677    /* For 64-bit signed comparisons vs zero, we can avoid the compare.  */
678    int rcond = tcg_cond_to_rcond[cond];
679    if (arg2 == 0 && rcond) {
680        int off16 = 0;
681
682        if (l->has_value) {
683            off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
684        } else {
685            tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
686        }
687        tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
688                  | INSN_COND(rcond) | off16);
689    } else {
690        tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
691        tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
692    }
693    tcg_out_nop(s);
694}
695
696static void tcg_out_movr(TCGContext *s, int rcond, TCGReg ret, TCGReg c1,
697                         int32_t v1, int v1const)
698{
699    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (rcond << 10)
700              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
701}
702
703static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
704                                TCGReg c1, int32_t c2, int c2const,
705                                int32_t v1, int v1const)
706{
707    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
708       Note that the immediate range is one bit smaller, so we must check
709       for that as well.  */
710    int rcond = tcg_cond_to_rcond[cond];
711    if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) {
712        tcg_out_movr(s, rcond, ret, c1, v1, v1const);
713    } else {
714        tcg_out_cmp(s, cond, c1, c2, c2const);
715        tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_XCC, ret, v1, v1const);
716    }
717}
718
719static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
720                                TCGReg c1, int32_t c2, bool c2const, bool neg)
721{
722    /* For 32-bit comparisons, we can play games with ADDC/SUBC.  */
723    switch (cond) {
724    case TCG_COND_LTU:
725    case TCG_COND_GEU:
726        /* The result of the comparison is in the carry bit.  */
727        break;
728
729    case TCG_COND_EQ:
730    case TCG_COND_NE:
731        /* For equality, we can transform to inequality vs zero.  */
732        if (c2 != 0) {
733            tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
734            c2 = TCG_REG_T1;
735        } else {
736            c2 = c1;
737        }
738        c1 = TCG_REG_G0, c2const = 0;
739        cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
740        break;
741
742    case TCG_COND_TSTEQ:
743    case TCG_COND_TSTNE:
744        /* Transform to inequality vs zero.  */
745        tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_AND);
746        c1 = TCG_REG_G0;
747        c2 = TCG_REG_T1, c2const = 0;
748        cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
749        break;
750
751    case TCG_COND_GTU:
752    case TCG_COND_LEU:
753        /* If we don't need to load a constant into a register, we can
754           swap the operands on GTU/LEU.  There's no benefit to loading
755           the constant into a temporary register.  */
756        if (!c2const || c2 == 0) {
757            TCGReg t = c1;
758            c1 = c2;
759            c2 = t;
760            c2const = 0;
761            cond = tcg_swap_cond(cond);
762            break;
763        }
764        /* FALLTHRU */
765
766    default:
767        tcg_out_cmp(s, cond, c1, c2, c2const);
768        tcg_out_movi_s13(s, ret, 0);
769        tcg_out_movcc(s, tcg_cond_to_bcond[cond],
770                      MOVCC_ICC, ret, neg ? -1 : 1, 1);
771        return;
772    }
773
774    tcg_out_cmp(s, cond, c1, c2, c2const);
775    if (cond == TCG_COND_LTU) {
776        if (neg) {
777            /* 0 - 0 - C = -C = (C ? -1 : 0) */
778            tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC);
779        } else {
780            /* 0 + 0 + C =  C = (C ? 1 : 0) */
781            tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
782        }
783    } else {
784        if (neg) {
785            /* 0 + -1 + C = C - 1 = (C ? 0 : -1) */
786            tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC);
787        } else {
788            /* 0 - -1 - C = 1 - C = (C ? 0 : 1) */
789            tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
790        }
791    }
792}
793
794static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
795                                TCGReg c1, int32_t c2, bool c2const, bool neg)
796{
797    int rcond;
798
799    if (use_vis3_instructions && !neg) {
800        switch (cond) {
801        case TCG_COND_NE:
802            if (c2 != 0) {
803                break;
804            }
805            c2 = c1, c2const = 0, c1 = TCG_REG_G0;
806            /* FALLTHRU */
807        case TCG_COND_LTU:
808            tcg_out_cmp(s, cond, c1, c2, c2const);
809            tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
810            return;
811        default:
812            break;
813        }
814    }
815
816    /* For 64-bit signed comparisons vs zero, we can avoid the compare
817       if the input does not overlap the output.  */
818    rcond = tcg_cond_to_rcond[cond];
819    if (c2 == 0 && rcond && c1 != ret) {
820        tcg_out_movi_s13(s, ret, 0);
821        tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1);
822    } else {
823        tcg_out_cmp(s, cond, c1, c2, c2const);
824        tcg_out_movi_s13(s, ret, 0);
825        tcg_out_movcc(s, tcg_cond_to_bcond[cond],
826                      MOVCC_XCC, ret, neg ? -1 : 1, 1);
827    }
828}
829
830static void tcg_out_brcond(TCGContext *s, TCGType type, TCGCond cond,
831                           TCGReg arg1, TCGArg arg2, bool const_arg2,
832                           TCGLabel *l)
833{
834    if (type == TCG_TYPE_I32) {
835        tcg_out_brcond_i32(s, cond, arg1, arg2, const_arg2, l);
836    } else {
837        tcg_out_brcond_i64(s, cond, arg1, arg2, const_arg2, l);
838    }
839}
840
841static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
842                        TCGReg arg1, TCGReg arg2, TCGLabel *l)
843{
844    tcg_out_brcond(s, type, cond, arg1, arg2, false, l);
845}
846
847static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
848                         TCGReg arg1, tcg_target_long arg2, TCGLabel *l)
849{
850    tcg_out_brcond(s, type, cond, arg1, arg2, true, l);
851}
852
853static const TCGOutOpBrcond outop_brcond = {
854    .base.static_constraint = C_O0_I2(r, rJ),
855    .out_rr = tgen_brcond,
856    .out_ri = tgen_brcondi,
857};
858
859static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
860                            TCGReg ret, TCGReg c1,
861                            TCGArg c2, bool c2const, bool neg)
862{
863    if (type == TCG_TYPE_I32) {
864        tcg_out_setcond_i32(s, cond, ret, c1, c2, c2const, neg);
865    } else {
866        tcg_out_setcond_i64(s, cond, ret, c1, c2, c2const, neg);
867    }
868}
869
870static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
871                         TCGReg dest, TCGReg arg1, TCGReg arg2)
872{
873    tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
874}
875
876static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
877                          TCGReg dest, TCGReg arg1, tcg_target_long arg2)
878{
879    tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
880}
881
882static const TCGOutOpSetcond outop_setcond = {
883    .base.static_constraint = C_O1_I2(r, r, rJ),
884    .out_rrr = tgen_setcond,
885    .out_rri = tgen_setcondi,
886};
887
888static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
889                            TCGReg dest, TCGReg arg1, TCGReg arg2)
890{
891    tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
892}
893
894static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
895                             TCGReg dest, TCGReg arg1, tcg_target_long arg2)
896{
897    tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
898}
899
900static const TCGOutOpSetcond outop_negsetcond = {
901    .base.static_constraint = C_O1_I2(r, r, rJ),
902    .out_rrr = tgen_negsetcond,
903    .out_rri = tgen_negsetcondi,
904};
905
906static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
907                         TCGReg ret, TCGReg c1, TCGArg c2, bool c2const,
908                         TCGArg v1, bool v1const, TCGArg v2, bool v2consf)
909{
910    if (type == TCG_TYPE_I32) {
911        tcg_out_movcond_i32(s, cond, ret, c1, c2, c2const, v1, v1const);
912    } else {
913        tcg_out_movcond_i64(s, cond, ret, c1, c2, c2const, v1, v1const);
914    }
915}
916
917static const TCGOutOpMovcond outop_movcond = {
918    .base.static_constraint = C_O1_I4(r, r, rJ, rI, 0),
919    .out = tgen_movcond,
920};
921
922static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
923                               bool in_prologue, bool tail_call)
924{
925    uintptr_t desti = (uintptr_t)dest;
926
927    tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
928                     desti & ~0xfff, in_prologue, TCG_REG_T2);
929    tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
930                   TCG_REG_T1, desti & 0xfff, JMPL);
931}
932
933static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
934                                 bool in_prologue)
935{
936    ptrdiff_t disp = tcg_pcrel_diff(s, dest);
937
938    if (disp == (int32_t)disp) {
939        tcg_out32(s, CALL | (uint32_t)disp >> 2);
940    } else {
941        tcg_out_jmpl_const(s, dest, in_prologue, false);
942    }
943}
944
945static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
946                         const TCGHelperInfo *info)
947{
948    tcg_out_call_nodelay(s, dest, false);
949    tcg_out_nop(s);
950}
951
952static void tcg_out_mb(TCGContext *s, TCGArg a0)
953{
954    /* Note that the TCG memory order constants mirror the Sparc MEMBAR.  */
955    tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
956}
957
958/* Generate global QEMU prologue and epilogue code */
959static void tcg_target_qemu_prologue(TCGContext *s)
960{
961    int tmp_buf_size, frame_size;
962
963    /*
964     * The TCG temp buffer is at the top of the frame, immediately
965     * below the frame pointer.  Use the logical (aligned) offset here;
966     * the stack bias is applied in temp_allocate_frame().
967     */
968    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
969    tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
970
971    /*
972     * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
973     * otherwise the minimal frame usable by callees.
974     */
975    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
976    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
977    frame_size += TCG_TARGET_STACK_ALIGN - 1;
978    frame_size &= -TCG_TARGET_STACK_ALIGN;
979    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
980              INSN_IMM13(-frame_size));
981
982#ifndef CONFIG_SOFTMMU
983    if (guest_base != 0) {
984        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
985                         guest_base, true, TCG_REG_T1);
986        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
987    }
988#endif
989
990    /* We choose TCG_REG_TB such that no move is required.  */
991    QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
992    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
993
994    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
995    /* delay slot */
996    tcg_out_nop(s);
997
998    /* Epilogue for goto_ptr.  */
999    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1000    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1001    /* delay slot */
1002    tcg_out_movi_s13(s, TCG_REG_O0, 0);
1003}
1004
1005static void tcg_out_tb_start(TCGContext *s)
1006{
1007    /* nothing to do */
1008}
1009
1010static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1011{
1012    int i;
1013    for (i = 0; i < count; ++i) {
1014        p[i] = NOP;
1015    }
1016}
1017
1018static const TCGLdstHelperParam ldst_helper_param = {
1019    .ntmp = 1, .tmp = { TCG_REG_T1 }
1020};
1021
1022static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1023{
1024    MemOp opc = get_memop(lb->oi);
1025    MemOp sgn;
1026
1027    if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1028                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1029        return false;
1030    }
1031
1032    /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */
1033    sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0;
1034
1035    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1036    tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL);
1037    tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param);
1038
1039    tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1040    return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1041                       (intptr_t)lb->raddr, 0);
1042}
1043
1044static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1045{
1046    MemOp opc = get_memop(lb->oi);
1047
1048    if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1049                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1050        return false;
1051    }
1052
1053    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1054    tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL);
1055
1056    tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1057    return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1058                       (intptr_t)lb->raddr, 0);
1059}
1060
1061typedef struct {
1062    TCGReg base;
1063    TCGReg index;
1064    TCGAtomAlign aa;
1065} HostAddress;
1066
1067bool tcg_target_has_memory_bswap(MemOp memop)
1068{
1069    return true;
1070}
1071
1072/* We expect to use a 13-bit negative offset from ENV.  */
1073#define MIN_TLB_MASK_TABLE_OFS  -(1 << 12)
1074
1075/*
1076 * For system-mode, perform the TLB load and compare.
1077 * For user-mode, perform any required alignment tests.
1078 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1079 * is required and fill in @h with the host address for the fast path.
1080 */
1081static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1082                                           TCGReg addr_reg, MemOpIdx oi,
1083                                           bool is_ld)
1084{
1085    TCGType addr_type = s->addr_type;
1086    TCGLabelQemuLdst *ldst = NULL;
1087    MemOp opc = get_memop(oi);
1088    MemOp s_bits = opc & MO_SIZE;
1089    unsigned a_mask;
1090
1091    /* We don't support unaligned accesses. */
1092    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1093    h->aa.align = MAX(h->aa.align, s_bits);
1094    a_mask = (1u << h->aa.align) - 1;
1095
1096#ifdef CONFIG_SOFTMMU
1097    int mem_index = get_mmuidx(oi);
1098    int fast_off = tlb_mask_table_ofs(s, mem_index);
1099    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1100    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1101    int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1102                        : offsetof(CPUTLBEntry, addr_write);
1103    int add_off = offsetof(CPUTLBEntry, addend);
1104    int compare_mask;
1105    int cc;
1106
1107    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
1108    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off);
1109    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off);
1110
1111    /* Extract the page index, shifted into place for tlb index.  */
1112    tcg_out_arithi(s, TCG_REG_T1, addr_reg,
1113                   s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
1114    tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
1115
1116    /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2.  */
1117    tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
1118
1119    /*
1120     * Load the tlb comparator and the addend.
1121     * Always load the entire 64-bit comparator for simplicity.
1122     * We will ignore the high bits via BPCC_ICC below.
1123     */
1124    tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off);
1125    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
1126    h->base = TCG_REG_T1;
1127
1128    /* Mask out the page offset, except for the required alignment. */
1129    compare_mask = s->page_mask | a_mask;
1130    if (check_fit_tl(compare_mask, 13)) {
1131        tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
1132    } else {
1133        tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
1134        tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
1135    }
1136    tcg_out_cmp(s, TCG_COND_NE, TCG_REG_T2, TCG_REG_T3, 0);
1137
1138    ldst = new_ldst_label(s);
1139    ldst->is_ld = is_ld;
1140    ldst->oi = oi;
1141    ldst->addr_reg = addr_reg;
1142    ldst->label_ptr[0] = s->code_ptr;
1143
1144    /* bne,pn %[xi]cc, label0 */
1145    cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC;
1146    tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0);
1147#else
1148    /*
1149     * If the size equals the required alignment, we can skip the test
1150     * and allow host SIGBUS to deliver SIGBUS to the guest.
1151     * Otherwise, test for at least natural alignment and defer
1152     * everything else to the helper functions.
1153     */
1154    if (s_bits != memop_alignment_bits(opc)) {
1155        tcg_debug_assert(check_fit_tl(a_mask, 13));
1156        tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
1157
1158        ldst = new_ldst_label(s);
1159        ldst->is_ld = is_ld;
1160        ldst->oi = oi;
1161        ldst->addr_reg = addr_reg;
1162        ldst->label_ptr[0] = s->code_ptr;
1163
1164        /* bne,pn %icc, label0 */
1165        tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0);
1166    }
1167    h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0;
1168#endif
1169
1170    /* If the guest address must be zero-extended, do in the delay slot.  */
1171    if (addr_type == TCG_TYPE_I32) {
1172        tcg_out_ext32u(s, TCG_REG_T2, addr_reg);
1173        h->index = TCG_REG_T2;
1174    } else {
1175        if (ldst) {
1176            tcg_out_nop(s);
1177        }
1178        h->index = addr_reg;
1179    }
1180    return ldst;
1181}
1182
1183static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1184                            MemOpIdx oi, TCGType data_type)
1185{
1186    static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1187        [MO_UB]   = LDUB,
1188        [MO_SB]   = LDSB,
1189        [MO_UB | MO_LE] = LDUB,
1190        [MO_SB | MO_LE] = LDSB,
1191
1192        [MO_BEUW] = LDUH,
1193        [MO_BESW] = LDSH,
1194        [MO_BEUL] = LDUW,
1195        [MO_BESL] = LDSW,
1196        [MO_BEUQ] = LDX,
1197        [MO_BESQ] = LDX,
1198
1199        [MO_LEUW] = LDUH_LE,
1200        [MO_LESW] = LDSH_LE,
1201        [MO_LEUL] = LDUW_LE,
1202        [MO_LESL] = LDSW_LE,
1203        [MO_LEUQ] = LDX_LE,
1204        [MO_LESQ] = LDX_LE,
1205    };
1206
1207    TCGLabelQemuLdst *ldst;
1208    HostAddress h;
1209
1210    ldst = prepare_host_addr(s, &h, addr, oi, true);
1211
1212    tcg_out_ldst_rr(s, data, h.base, h.index,
1213                    ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
1214
1215    if (ldst) {
1216        ldst->type = data_type;
1217        ldst->datalo_reg = data;
1218        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1219    }
1220}
1221
1222static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1223                            MemOpIdx oi, TCGType data_type)
1224{
1225    static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1226        [MO_UB]   = STB,
1227
1228        [MO_BEUW] = STH,
1229        [MO_BEUL] = STW,
1230        [MO_BEUQ] = STX,
1231
1232        [MO_LEUW] = STH_LE,
1233        [MO_LEUL] = STW_LE,
1234        [MO_LEUQ] = STX_LE,
1235    };
1236
1237    TCGLabelQemuLdst *ldst;
1238    HostAddress h;
1239
1240    ldst = prepare_host_addr(s, &h, addr, oi, false);
1241
1242    tcg_out_ldst_rr(s, data, h.base, h.index,
1243                    st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
1244
1245    if (ldst) {
1246        ldst->type = data_type;
1247        ldst->datalo_reg = data;
1248        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1249    }
1250}
1251
1252static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1253{
1254    if (check_fit_ptr(a0, 13)) {
1255        tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1256        tcg_out_movi_s13(s, TCG_REG_O0, a0);
1257        return;
1258    } else {
1259        intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1260        if (check_fit_ptr(tb_diff, 13)) {
1261            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1262            /* Note that TCG_REG_TB has been unwound to O1.  */
1263            tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1264            return;
1265        }
1266    }
1267    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1268    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1269    tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1270}
1271
1272static void tcg_out_goto_tb(TCGContext *s, int which)
1273{
1274    ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1275
1276    /* Load link and indirect branch. */
1277    set_jmp_insn_offset(s, which);
1278    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1279    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1280    /* delay slot */
1281    tcg_out_nop(s);
1282    set_jmp_reset_offset(s, which);
1283
1284    /*
1285     * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1286     * to the beginning of this TB.
1287     */
1288    off = -tcg_current_code_size(s);
1289    if (check_fit_i32(off, 13)) {
1290        tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1291    } else {
1292        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1293        tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1294    }
1295}
1296
1297void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1298                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1299{
1300}
1301
1302
1303static void tgen_add(TCGContext *s, TCGType type,
1304                     TCGReg a0, TCGReg a1, TCGReg a2)
1305{
1306    tcg_out_arith(s, a0, a1, a2, ARITH_ADD);
1307}
1308
1309static void tgen_addi(TCGContext *s, TCGType type,
1310                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1311{
1312    tcg_out_arithi(s, a0, a1, a2, ARITH_ADD);
1313}
1314
1315static const TCGOutOpBinary outop_add = {
1316    .base.static_constraint = C_O1_I2(r, r, rJ),
1317    .out_rrr = tgen_add,
1318    .out_rri = tgen_addi,
1319};
1320
1321static void tgen_addco_rrr(TCGContext *s, TCGType type,
1322                           TCGReg a0, TCGReg a1, TCGReg a2)
1323{
1324    tcg_out_arith(s, a0, a1, a2, ARITH_ADDCC);
1325}
1326
1327static void tgen_addco_rri(TCGContext *s, TCGType type,
1328                           TCGReg a0, TCGReg a1, tcg_target_long a2)
1329{
1330    tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCC);
1331}
1332
1333static const TCGOutOpBinary outop_addco = {
1334    .base.static_constraint = C_O1_I2(r, r, rJ),
1335    .out_rrr = tgen_addco_rrr,
1336    .out_rri = tgen_addco_rri,
1337};
1338
1339static void tgen_addci_rrr(TCGContext *s, TCGType type,
1340                           TCGReg a0, TCGReg a1, TCGReg a2)
1341{
1342    if (type == TCG_TYPE_I32) {
1343        tcg_out_arith(s, a0, a1, a2, ARITH_ADDC);
1344    } else if (use_vis3_instructions) {
1345        tcg_out_arith(s, a0, a1, a2, ARITH_ADDXC);
1346    } else {
1347        tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_ADD);  /* for CC */
1348        tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD);  /* for CS */
1349        /* Select the correct result based on actual carry value. */
1350        tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
1351    }
1352}
1353
1354static void tgen_addci_rri(TCGContext *s, TCGType type,
1355                           TCGReg a0, TCGReg a1, tcg_target_long a2)
1356{
1357    if (type == TCG_TYPE_I32) {
1358        tcg_out_arithi(s, a0, a1, a2, ARITH_ADDC);
1359        return;
1360    }
1361    /* !use_vis3_instructions */
1362    if (a2 != 0) {
1363        tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */
1364        tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD);  /* for CS */
1365        tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
1366    } else if (a0 == a1) {
1367        tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_ADD);
1368        tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false);
1369    } else {
1370        tcg_out_arithi(s, a0, a1, 1, ARITH_ADD);
1371        tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false);
1372    }
1373}
1374
1375static TCGConstraintSetIndex cset_addci(TCGType type, unsigned flags)
1376{
1377    if (use_vis3_instructions && type == TCG_TYPE_I64) {
1378        /* Note that ADDXC doesn't accept immediates.  */
1379        return C_O1_I2(r, rz, rz);
1380    }
1381    return C_O1_I2(r, rz, rJ);
1382}
1383
1384static const TCGOutOpAddSubCarry outop_addci = {
1385    .base.static_constraint = C_Dynamic,
1386    .base.dynamic_constraint = cset_addci,
1387    .out_rrr = tgen_addci_rrr,
1388    .out_rri = tgen_addci_rri,
1389};
1390
1391/* Copy %xcc.c to %icc.c */
1392static void tcg_out_dup_xcc_c(TCGContext *s)
1393{
1394    if (use_vis3_instructions) {
1395        tcg_out_arith(s, TCG_REG_T1, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
1396    } else {
1397        tcg_out_movi_s13(s, TCG_REG_T1, 0);
1398        tcg_out_movcc(s, COND_CS, MOVCC_XCC, TCG_REG_T1, 1, true);
1399    }
1400    /* Write carry-in into %icc via {0,1} + -1. */
1401    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, -1, ARITH_ADDCC);
1402}
1403
1404static void tgen_addcio_rrr(TCGContext *s, TCGType type,
1405                            TCGReg a0, TCGReg a1, TCGReg a2)
1406{
1407    if (type != TCG_TYPE_I32) {
1408        if (use_vis3_instructions) {
1409            tcg_out_arith(s, a0, a1, a2, ARITH_ADDXCCC);
1410            return;
1411        }
1412        tcg_out_dup_xcc_c(s);
1413    }
1414    tcg_out_arith(s, a0, a1, a2, ARITH_ADDCCC);
1415}
1416
1417static void tgen_addcio_rri(TCGContext *s, TCGType type,
1418                            TCGReg a0, TCGReg a1, tcg_target_long a2)
1419{
1420    if (type != TCG_TYPE_I32) {
1421        /* !use_vis3_instructions */
1422        tcg_out_dup_xcc_c(s);
1423    }
1424    tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCCC);
1425}
1426
1427static TCGConstraintSetIndex cset_addcio(TCGType type, unsigned flags)
1428{
1429    if (use_vis3_instructions && type == TCG_TYPE_I64) {
1430        /* Note that ADDXCCC doesn't accept immediates.  */
1431        return C_O1_I2(r, rz, rz);
1432    }
1433    return C_O1_I2(r, rz, rJ);
1434}
1435
1436static const TCGOutOpBinary outop_addcio = {
1437    .base.static_constraint = C_Dynamic,
1438    .base.dynamic_constraint = cset_addcio,
1439    .out_rrr = tgen_addcio_rrr,
1440    .out_rri = tgen_addcio_rri,
1441};
1442
1443static void tcg_out_set_carry(TCGContext *s)
1444{
1445    /* 0x11 -> xcc = nzvC, icc = nzvC */
1446    tcg_out_arithi(s, 0, TCG_REG_G0, 0x11, WRCCR);
1447}
1448
1449static void tgen_and(TCGContext *s, TCGType type,
1450                     TCGReg a0, TCGReg a1, TCGReg a2)
1451{
1452    tcg_out_arith(s, a0, a1, a2, ARITH_AND);
1453}
1454
1455static void tgen_andi(TCGContext *s, TCGType type,
1456                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1457{
1458    tcg_out_arithi(s, a0, a1, a2, ARITH_AND);
1459}
1460
1461static const TCGOutOpBinary outop_and = {
1462    .base.static_constraint = C_O1_I2(r, r, rJ),
1463    .out_rrr = tgen_and,
1464    .out_rri = tgen_andi,
1465};
1466
1467static void tgen_andc(TCGContext *s, TCGType type,
1468                      TCGReg a0, TCGReg a1, TCGReg a2)
1469{
1470    tcg_out_arith(s, a0, a1, a2, ARITH_ANDN);
1471}
1472
1473static const TCGOutOpBinary outop_andc = {
1474    .base.static_constraint = C_O1_I2(r, r, r),
1475    .out_rrr = tgen_andc,
1476};
1477
1478static const TCGOutOpBinary outop_clz = {
1479    .base.static_constraint = C_NotImplemented,
1480};
1481
1482static const TCGOutOpUnary outop_ctpop = {
1483    .base.static_constraint = C_NotImplemented,
1484};
1485
1486static const TCGOutOpBinary outop_ctz = {
1487    .base.static_constraint = C_NotImplemented,
1488};
1489
1490static void tgen_divs_rJ(TCGContext *s, TCGType type,
1491                         TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
1492{
1493    uint32_t insn;
1494
1495    if (type == TCG_TYPE_I32) {
1496        /* Load Y with the sign extension of a1 to 64-bits.  */
1497        tcg_out_arithi(s, TCG_REG_T1, a1, 31, SHIFT_SRA);
1498        tcg_out_sety(s, TCG_REG_T1);
1499        insn = ARITH_SDIV;
1500    } else {
1501        insn = ARITH_SDIVX;
1502    }
1503    tcg_out_arithc(s, a0, a1, a2, c2, insn);
1504}
1505
1506static void tgen_divs(TCGContext *s, TCGType type,
1507                      TCGReg a0, TCGReg a1, TCGReg a2)
1508{
1509    tgen_divs_rJ(s, type, a0, a1, a2, false);
1510}
1511
1512static void tgen_divsi(TCGContext *s, TCGType type,
1513                       TCGReg a0, TCGReg a1, tcg_target_long a2)
1514{
1515    tgen_divs_rJ(s, type, a0, a1, a2, true);
1516}
1517
1518static const TCGOutOpBinary outop_divs = {
1519    .base.static_constraint = C_O1_I2(r, r, rJ),
1520    .out_rrr = tgen_divs,
1521    .out_rri = tgen_divsi,
1522};
1523
1524static const TCGOutOpDivRem outop_divs2 = {
1525    .base.static_constraint = C_NotImplemented,
1526};
1527
1528static void tgen_divu_rJ(TCGContext *s, TCGType type,
1529                         TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
1530{
1531    uint32_t insn;
1532
1533    if (type == TCG_TYPE_I32) {
1534        /* Load Y with the zero extension to 64-bits.  */
1535        tcg_out_sety(s, TCG_REG_G0);
1536        insn = ARITH_UDIV;
1537    } else {
1538        insn = ARITH_UDIVX;
1539    }
1540    tcg_out_arithc(s, a0, a1, a2, c2, insn);
1541}
1542
1543static void tgen_divu(TCGContext *s, TCGType type,
1544                      TCGReg a0, TCGReg a1, TCGReg a2)
1545{
1546    tgen_divu_rJ(s, type, a0, a1, a2, false);
1547}
1548
1549static void tgen_divui(TCGContext *s, TCGType type,
1550                       TCGReg a0, TCGReg a1, tcg_target_long a2)
1551{
1552    tgen_divu_rJ(s, type, a0, a1, a2, true);
1553}
1554
1555static const TCGOutOpBinary outop_divu = {
1556    .base.static_constraint = C_O1_I2(r, r, rJ),
1557    .out_rrr = tgen_divu,
1558    .out_rri = tgen_divui,
1559};
1560
1561static const TCGOutOpDivRem outop_divu2 = {
1562    .base.static_constraint = C_NotImplemented,
1563};
1564
1565static const TCGOutOpBinary outop_eqv = {
1566    .base.static_constraint = C_NotImplemented,
1567};
1568
1569static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
1570{
1571    tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1572}
1573
1574static const TCGOutOpUnary outop_extrh_i64_i32 = {
1575    .base.static_constraint = C_O1_I1(r, r),
1576    .out_rr = tgen_extrh_i64_i32,
1577};
1578
1579static void tgen_mul(TCGContext *s, TCGType type,
1580                     TCGReg a0, TCGReg a1, TCGReg a2)
1581{
1582    uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
1583    tcg_out_arith(s, a0, a1, a2, insn);
1584}
1585
1586static void tgen_muli(TCGContext *s, TCGType type,
1587                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1588{
1589    uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
1590    tcg_out_arithi(s, a0, a1, a2, insn);
1591}
1592
1593static const TCGOutOpBinary outop_mul = {
1594    .base.static_constraint = C_O1_I2(r, r, rJ),
1595    .out_rrr = tgen_mul,
1596    .out_rri = tgen_muli,
1597};
1598
1599/*
1600 * The 32-bit multiply insns produce a full 64-bit result.
1601 * Supporting 32-bit mul[us]2 opcodes avoids sign/zero-extensions
1602 * before the actual multiply; we only need extract the high part
1603 * into the separate operand.
1604 */
1605static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
1606{
1607    return type == TCG_TYPE_I32 ? C_O2_I2(r, r, r, r) : C_NotImplemented;
1608}
1609
1610static void tgen_muls2(TCGContext *s, TCGType type,
1611                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
1612{
1613    tcg_out_arith(s, a0, a2, a3, ARITH_SMUL);
1614    tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1615}
1616
1617static const TCGOutOpMul2 outop_muls2 = {
1618    .base.static_constraint = C_Dynamic,
1619    .base.dynamic_constraint = cset_mul2,
1620    .out_rrrr = tgen_muls2,
1621};
1622
1623static const TCGOutOpBinary outop_mulsh = {
1624    .base.static_constraint = C_NotImplemented,
1625};
1626
1627static void tgen_mulu2(TCGContext *s, TCGType type,
1628                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
1629{
1630    tcg_out_arith(s, a0, a2, a3, ARITH_UMUL);
1631    tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1632}
1633
1634static const TCGOutOpMul2 outop_mulu2 = {
1635    .base.static_constraint = C_Dynamic,
1636    .base.dynamic_constraint = cset_mul2,
1637    .out_rrrr = tgen_mulu2,
1638};
1639
1640static void tgen_muluh(TCGContext *s, TCGType type,
1641                       TCGReg a0, TCGReg a1, TCGReg a2)
1642{
1643    tcg_out_arith(s, a0, a1, a2, ARITH_UMULXHI);
1644}
1645
1646static TCGConstraintSetIndex cset_muluh(TCGType type, unsigned flags)
1647{
1648    return (type == TCG_TYPE_I64 && use_vis3_instructions
1649            ? C_O1_I2(r, r, r) : C_NotImplemented);
1650}
1651
1652static const TCGOutOpBinary outop_muluh = {
1653    .base.static_constraint = C_Dynamic,
1654    .base.dynamic_constraint = cset_muluh,
1655    .out_rrr = tgen_muluh,
1656};
1657
1658static const TCGOutOpBinary outop_nand = {
1659    .base.static_constraint = C_NotImplemented,
1660};
1661
1662static const TCGOutOpBinary outop_nor = {
1663    .base.static_constraint = C_NotImplemented,
1664};
1665
1666static void tgen_or(TCGContext *s, TCGType type,
1667                     TCGReg a0, TCGReg a1, TCGReg a2)
1668{
1669    tcg_out_arith(s, a0, a1, a2, ARITH_OR);
1670}
1671
1672static void tgen_ori(TCGContext *s, TCGType type,
1673                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1674{
1675    tcg_out_arithi(s, a0, a1, a2, ARITH_OR);
1676}
1677
1678static const TCGOutOpBinary outop_or = {
1679    .base.static_constraint = C_O1_I2(r, r, rJ),
1680    .out_rrr = tgen_or,
1681    .out_rri = tgen_ori,
1682};
1683
1684static void tgen_orc(TCGContext *s, TCGType type,
1685                     TCGReg a0, TCGReg a1, TCGReg a2)
1686{
1687    tcg_out_arith(s, a0, a1, a2, ARITH_ORN);
1688}
1689
1690static const TCGOutOpBinary outop_orc = {
1691    .base.static_constraint = C_O1_I2(r, r, r),
1692    .out_rrr = tgen_orc,
1693};
1694
1695static const TCGOutOpBinary outop_rems = {
1696    .base.static_constraint = C_NotImplemented,
1697};
1698
1699static const TCGOutOpBinary outop_remu = {
1700    .base.static_constraint = C_NotImplemented,
1701};
1702
1703static const TCGOutOpBinary outop_rotl = {
1704    .base.static_constraint = C_NotImplemented,
1705};
1706
1707static const TCGOutOpBinary outop_rotr = {
1708    .base.static_constraint = C_NotImplemented,
1709};
1710
1711static void tgen_sar(TCGContext *s, TCGType type,
1712                     TCGReg a0, TCGReg a1, TCGReg a2)
1713{
1714    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
1715    tcg_out_arith(s, a0, a1, a2, insn);
1716}
1717
1718static void tgen_sari(TCGContext *s, TCGType type,
1719                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1720{
1721    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
1722    uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
1723    tcg_out_arithi(s, a0, a1, a2 & mask, insn);
1724}
1725
1726static const TCGOutOpBinary outop_sar = {
1727    .base.static_constraint = C_O1_I2(r, r, rJ),
1728    .out_rrr = tgen_sar,
1729    .out_rri = tgen_sari,
1730};
1731
1732static void tgen_shl(TCGContext *s, TCGType type,
1733                     TCGReg a0, TCGReg a1, TCGReg a2)
1734{
1735    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
1736    tcg_out_arith(s, a0, a1, a2, insn);
1737}
1738
1739static void tgen_shli(TCGContext *s, TCGType type,
1740                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1741{
1742    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
1743    uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
1744    tcg_out_arithi(s, a0, a1, a2 & mask, insn);
1745}
1746
1747static const TCGOutOpBinary outop_shl = {
1748    .base.static_constraint = C_O1_I2(r, r, rJ),
1749    .out_rrr = tgen_shl,
1750    .out_rri = tgen_shli,
1751};
1752
1753static void tgen_shr(TCGContext *s, TCGType type,
1754                     TCGReg a0, TCGReg a1, TCGReg a2)
1755{
1756    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
1757    tcg_out_arith(s, a0, a1, a2, insn);
1758}
1759
1760static void tgen_shri(TCGContext *s, TCGType type,
1761                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1762{
1763    uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
1764    uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
1765    tcg_out_arithi(s, a0, a1, a2 & mask, insn);
1766}
1767
1768static const TCGOutOpBinary outop_shr = {
1769    .base.static_constraint = C_O1_I2(r, r, rJ),
1770    .out_rrr = tgen_shr,
1771    .out_rri = tgen_shri,
1772};
1773
1774static void tgen_sub(TCGContext *s, TCGType type,
1775                     TCGReg a0, TCGReg a1, TCGReg a2)
1776{
1777    tcg_out_arith(s, a0, a1, a2, ARITH_SUB);
1778}
1779
1780static const TCGOutOpSubtract outop_sub = {
1781    .base.static_constraint = C_O1_I2(r, r, r),
1782    .out_rrr = tgen_sub,
1783};
1784
1785static void tgen_subbo_rrr(TCGContext *s, TCGType type,
1786                           TCGReg a0, TCGReg a1, TCGReg a2)
1787{
1788    tcg_out_arith(s, a0, a1, a2, ARITH_SUBCC);
1789}
1790
1791static void tgen_subbo_rri(TCGContext *s, TCGType type,
1792                           TCGReg a0, TCGReg a1, tcg_target_long a2)
1793{
1794    tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCC);
1795}
1796
1797static const TCGOutOpAddSubCarry outop_subbo = {
1798    .base.static_constraint = C_O1_I2(r, rz, rJ),
1799    .out_rrr = tgen_subbo_rrr,
1800    .out_rri = tgen_subbo_rri,
1801};
1802
1803static void tgen_subbi_rrr(TCGContext *s, TCGType type,
1804                           TCGReg a0, TCGReg a1, TCGReg a2)
1805{
1806    /* TODO: OSA 2015 added SUBXC */
1807    if (type == TCG_TYPE_I32) {
1808        tcg_out_arith(s, a0, a1, a2, ARITH_SUBC);
1809    } else {
1810        tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_SUB);  /* for CC */
1811        tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB);  /* for CS */
1812        /* Select the correct result based on actual borrow value. */
1813        tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
1814    }
1815}
1816
1817static void tgen_subbi_rri(TCGContext *s, TCGType type,
1818                           TCGReg a0, TCGReg a1, tcg_target_long a2)
1819{
1820    if (type == TCG_TYPE_I32) {
1821        tcg_out_arithi(s, a0, a1, a2, ARITH_SUBC);
1822    } else if (a2 != 0) {
1823        tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_SUB);  /* for CC */
1824        tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB);   /* for CS */
1825        tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
1826    } else if (a0 == a1) {
1827        tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_SUB);
1828        tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false);
1829    } else {
1830        tcg_out_arithi(s, a0, a1, 1, ARITH_SUB);
1831        tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false);
1832    }
1833}
1834
1835static const TCGOutOpAddSubCarry outop_subbi = {
1836    .base.static_constraint = C_O1_I2(r, rz, rJ),
1837    .out_rrr = tgen_subbi_rrr,
1838    .out_rri = tgen_subbi_rri,
1839};
1840
1841static void tgen_subbio_rrr(TCGContext *s, TCGType type,
1842                            TCGReg a0, TCGReg a1, TCGReg a2)
1843{
1844    if (type != TCG_TYPE_I32) {
1845        /* TODO: OSA 2015 added SUBXCCC */
1846        tcg_out_dup_xcc_c(s);
1847    }
1848    tcg_out_arith(s, a0, a1, a2, ARITH_SUBCCC);
1849}
1850
1851static void tgen_subbio_rri(TCGContext *s, TCGType type,
1852                            TCGReg a0, TCGReg a1, tcg_target_long a2)
1853{
1854    if (type != TCG_TYPE_I32) {
1855        tcg_out_dup_xcc_c(s);
1856    }
1857    tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCCC);
1858}
1859
1860static const TCGOutOpAddSubCarry outop_subbio = {
1861    .base.static_constraint = C_O1_I2(r, rz, rJ),
1862    .out_rrr = tgen_subbio_rrr,
1863    .out_rri = tgen_subbio_rri,
1864};
1865
1866static void tcg_out_set_borrow(TCGContext *s)
1867{
1868    tcg_out_set_carry(s);  /* borrow == carry */
1869}
1870
1871static void tgen_xor(TCGContext *s, TCGType type,
1872                     TCGReg a0, TCGReg a1, TCGReg a2)
1873{
1874    tcg_out_arith(s, a0, a1, a2, ARITH_XOR);
1875}
1876
1877static void tgen_xori(TCGContext *s, TCGType type,
1878                      TCGReg a0, TCGReg a1, tcg_target_long a2)
1879{
1880    tcg_out_arithi(s, a0, a1, a2, ARITH_XOR);
1881}
1882
1883static const TCGOutOpBinary outop_xor = {
1884    .base.static_constraint = C_O1_I2(r, r, rJ),
1885    .out_rrr = tgen_xor,
1886    .out_rri = tgen_xori,
1887};
1888
1889static const TCGOutOpBswap outop_bswap16 = {
1890    .base.static_constraint = C_NotImplemented,
1891};
1892
1893static const TCGOutOpBswap outop_bswap32 = {
1894    .base.static_constraint = C_NotImplemented,
1895};
1896
1897static const TCGOutOpUnary outop_bswap64 = {
1898    .base.static_constraint = C_NotImplemented,
1899};
1900
1901static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
1902{
1903     tgen_sub(s, type, a0, TCG_REG_G0, a1);
1904}
1905
1906static const TCGOutOpUnary outop_neg = {
1907    .base.static_constraint = C_O1_I1(r, r),
1908    .out_rr = tgen_neg,
1909};
1910
1911static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
1912{
1913     tgen_orc(s, type, a0, TCG_REG_G0, a1);
1914}
1915
1916static const TCGOutOpUnary outop_not = {
1917    .base.static_constraint = C_O1_I1(r, r),
1918    .out_rr = tgen_not,
1919};
1920
1921static const TCGOutOpDeposit outop_deposit = {
1922    .base.static_constraint = C_NotImplemented,
1923};
1924
1925static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
1926                         unsigned ofs, unsigned len)
1927{
1928    tcg_debug_assert(ofs + len == 32);
1929    tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRL);
1930}
1931
1932static const TCGOutOpExtract outop_extract = {
1933    .base.static_constraint = C_O1_I1(r, r),
1934    .out_rr = tgen_extract,
1935};
1936
1937static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
1938                          unsigned ofs, unsigned len)
1939{
1940    tcg_debug_assert(ofs + len == 32);
1941    tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRA);
1942}
1943
1944static const TCGOutOpExtract outop_sextract = {
1945    .base.static_constraint = C_O1_I1(r, r),
1946    .out_rr = tgen_sextract,
1947};
1948
1949static const TCGOutOpExtract2 outop_extract2 = {
1950    .base.static_constraint = C_NotImplemented,
1951};
1952
1953static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1954                       const TCGArg args[TCG_MAX_OP_ARGS],
1955                       const int const_args[TCG_MAX_OP_ARGS])
1956{
1957    TCGArg a0, a1, a2;
1958
1959    /* Hoist the loads of the most common arguments.  */
1960    a0 = args[0];
1961    a1 = args[1];
1962    a2 = args[2];
1963
1964    switch (opc) {
1965    case INDEX_op_goto_ptr:
1966        tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1967        tcg_out_mov_delay(s, TCG_REG_TB, a0);
1968        break;
1969    case INDEX_op_br:
1970        tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1971        tcg_out_nop(s);
1972        break;
1973
1974#define OP_32_64(x)                             \
1975        glue(glue(case INDEX_op_, x), _i32):    \
1976        glue(glue(case INDEX_op_, x), _i64)
1977
1978    OP_32_64(ld8u):
1979        tcg_out_ldst(s, a0, a1, a2, LDUB);
1980        break;
1981    OP_32_64(ld8s):
1982        tcg_out_ldst(s, a0, a1, a2, LDSB);
1983        break;
1984    OP_32_64(ld16u):
1985        tcg_out_ldst(s, a0, a1, a2, LDUH);
1986        break;
1987    OP_32_64(ld16s):
1988        tcg_out_ldst(s, a0, a1, a2, LDSH);
1989        break;
1990    case INDEX_op_ld_i32:
1991    case INDEX_op_ld32u_i64:
1992        tcg_out_ldst(s, a0, a1, a2, LDUW);
1993        break;
1994    OP_32_64(st8):
1995        tcg_out_ldst(s, a0, a1, a2, STB);
1996        break;
1997    OP_32_64(st16):
1998        tcg_out_ldst(s, a0, a1, a2, STH);
1999        break;
2000    case INDEX_op_st_i32:
2001    case INDEX_op_st32_i64:
2002        tcg_out_ldst(s, a0, a1, a2, STW);
2003        break;
2004
2005    case INDEX_op_qemu_ld_i32:
2006        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
2007        break;
2008    case INDEX_op_qemu_ld_i64:
2009        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
2010        break;
2011    case INDEX_op_qemu_st_i32:
2012        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
2013        break;
2014    case INDEX_op_qemu_st_i64:
2015        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
2016        break;
2017
2018    case INDEX_op_ld32s_i64:
2019        tcg_out_ldst(s, a0, a1, a2, LDSW);
2020        break;
2021    case INDEX_op_ld_i64:
2022        tcg_out_ldst(s, a0, a1, a2, LDX);
2023        break;
2024    case INDEX_op_st_i64:
2025        tcg_out_ldst(s, a0, a1, a2, STX);
2026        break;
2027
2028    case INDEX_op_mb:
2029        tcg_out_mb(s, a0);
2030        break;
2031
2032    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2033    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2034    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2035    default:
2036        g_assert_not_reached();
2037    }
2038}
2039
2040static TCGConstraintSetIndex
2041tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
2042{
2043    switch (op) {
2044    case INDEX_op_goto_ptr:
2045        return C_O0_I1(r);
2046
2047    case INDEX_op_ld8u_i32:
2048    case INDEX_op_ld8u_i64:
2049    case INDEX_op_ld8s_i32:
2050    case INDEX_op_ld8s_i64:
2051    case INDEX_op_ld16u_i32:
2052    case INDEX_op_ld16u_i64:
2053    case INDEX_op_ld16s_i32:
2054    case INDEX_op_ld16s_i64:
2055    case INDEX_op_ld_i32:
2056    case INDEX_op_ld32u_i64:
2057    case INDEX_op_ld32s_i64:
2058    case INDEX_op_ld_i64:
2059    case INDEX_op_qemu_ld_i32:
2060    case INDEX_op_qemu_ld_i64:
2061        return C_O1_I1(r, r);
2062
2063    case INDEX_op_st8_i32:
2064    case INDEX_op_st8_i64:
2065    case INDEX_op_st16_i32:
2066    case INDEX_op_st16_i64:
2067    case INDEX_op_st_i32:
2068    case INDEX_op_st32_i64:
2069    case INDEX_op_st_i64:
2070    case INDEX_op_qemu_st_i32:
2071    case INDEX_op_qemu_st_i64:
2072        return C_O0_I2(rz, r);
2073
2074    default:
2075        return C_NotImplemented;
2076    }
2077}
2078
2079static void tcg_target_init(TCGContext *s)
2080{
2081    /*
2082     * Only probe for the platform and capabilities if we haven't already
2083     * determined maximum values at compile time.
2084     */
2085#ifndef use_vis3_instructions
2086    {
2087        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2088        use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
2089    }
2090#endif
2091
2092    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2093    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
2094
2095    tcg_target_call_clobber_regs = 0;
2096    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
2097    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
2098    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
2099    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
2100    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
2101    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
2102    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
2103    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
2104    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
2105    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
2106    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
2107    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
2108    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
2109    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
2110    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
2111
2112    s->reserved_regs = 0;
2113    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
2114    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
2115    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
2116    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
2117    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
2118    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
2119    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
2120    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
2121    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */
2122}
2123
2124#define ELF_HOST_MACHINE  EM_SPARCV9
2125
2126typedef struct {
2127    DebugFrameHeader h;
2128    uint8_t fde_def_cfa[4];
2129    uint8_t fde_win_save;
2130    uint8_t fde_ret_save[3];
2131} DebugFrame;
2132
2133static const DebugFrame debug_frame = {
2134    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2135    .h.cie.id = -1,
2136    .h.cie.version = 1,
2137    .h.cie.code_align = 1,
2138    .h.cie.data_align = -sizeof(void *) & 0x7f,
2139    .h.cie.return_column = 15,            /* o7 */
2140
2141    /* Total FDE size does not include the "len" member.  */
2142    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2143
2144    .fde_def_cfa = {
2145        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
2146        (2047 & 0x7f) | 0x80, (2047 >> 7)
2147    },
2148    .fde_win_save = 0x2d,               /* DW_CFA_GNU_window_save */
2149    .fde_ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
2150};
2151
2152void tcg_register_jit(const void *buf, size_t buf_size)
2153{
2154    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2155}
2156