xref: /openbmc/qemu/tcg/sparc64/tcg-target.c.inc (revision 51e47cf8)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* We only support generating code for 64-bit mode.  */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
30#include "../tcg-pool.c.inc"
31
32#ifdef CONFIG_DEBUG_TCG
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34    "%g0",
35    "%g1",
36    "%g2",
37    "%g3",
38    "%g4",
39    "%g5",
40    "%g6",
41    "%g7",
42    "%o0",
43    "%o1",
44    "%o2",
45    "%o3",
46    "%o4",
47    "%o5",
48    "%o6",
49    "%o7",
50    "%l0",
51    "%l1",
52    "%l2",
53    "%l3",
54    "%l4",
55    "%l5",
56    "%l6",
57    "%l7",
58    "%i0",
59    "%i1",
60    "%i2",
61    "%i3",
62    "%i4",
63    "%i5",
64    "%i6",
65    "%i7",
66};
67#endif
68
69#define TCG_CT_CONST_S11  0x100
70#define TCG_CT_CONST_S13  0x200
71#define TCG_CT_CONST_ZERO 0x400
72
73/*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78#ifdef CONFIG_SOFTMMU
79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80#else
81#define SOFTMMU_RESERVE_REGS 0
82#endif
83#define ALL_GENERAL_REGS     MAKE_64BIT_MASK(0, 32)
84#define ALL_QLDST_REGS       (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
85
86/* Define some temporary registers.  T2 is used for constant generation.  */
87#define TCG_REG_T1  TCG_REG_G1
88#define TCG_REG_T2  TCG_REG_O7
89
90#ifndef CONFIG_SOFTMMU
91# define TCG_GUEST_BASE_REG TCG_REG_I5
92#endif
93
94#define TCG_REG_TB  TCG_REG_I1
95
96static const int tcg_target_reg_alloc_order[] = {
97    TCG_REG_L0,
98    TCG_REG_L1,
99    TCG_REG_L2,
100    TCG_REG_L3,
101    TCG_REG_L4,
102    TCG_REG_L5,
103    TCG_REG_L6,
104    TCG_REG_L7,
105
106    TCG_REG_I0,
107    TCG_REG_I1,
108    TCG_REG_I2,
109    TCG_REG_I3,
110    TCG_REG_I4,
111    TCG_REG_I5,
112
113    TCG_REG_G2,
114    TCG_REG_G3,
115    TCG_REG_G4,
116    TCG_REG_G5,
117
118    TCG_REG_O0,
119    TCG_REG_O1,
120    TCG_REG_O2,
121    TCG_REG_O3,
122    TCG_REG_O4,
123    TCG_REG_O5,
124};
125
126static const int tcg_target_call_iarg_regs[6] = {
127    TCG_REG_O0,
128    TCG_REG_O1,
129    TCG_REG_O2,
130    TCG_REG_O3,
131    TCG_REG_O4,
132    TCG_REG_O5,
133};
134
135static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
136{
137    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
138    tcg_debug_assert(slot >= 0 && slot <= 3);
139    return TCG_REG_O0 + slot;
140}
141
142#define INSN_OP(x)  ((x) << 30)
143#define INSN_OP2(x) ((x) << 22)
144#define INSN_OP3(x) ((x) << 19)
145#define INSN_OPF(x) ((x) << 5)
146#define INSN_RD(x)  ((x) << 25)
147#define INSN_RS1(x) ((x) << 14)
148#define INSN_RS2(x) (x)
149#define INSN_ASI(x) ((x) << 5)
150
151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
156#define INSN_COND(x) ((x) << 25)
157
158#define COND_N     0x0
159#define COND_E     0x1
160#define COND_LE    0x2
161#define COND_L     0x3
162#define COND_LEU   0x4
163#define COND_CS    0x5
164#define COND_NEG   0x6
165#define COND_VS    0x7
166#define COND_A     0x8
167#define COND_NE    0x9
168#define COND_G     0xa
169#define COND_GE    0xb
170#define COND_GU    0xc
171#define COND_CC    0xd
172#define COND_POS   0xe
173#define COND_VC    0xf
174#define BA         (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
175
176#define RCOND_Z    1
177#define RCOND_LEZ  2
178#define RCOND_LZ   3
179#define RCOND_NZ   5
180#define RCOND_GZ   6
181#define RCOND_GEZ  7
182
183#define MOVCC_ICC  (1 << 18)
184#define MOVCC_XCC  (1 << 18 | 1 << 12)
185
186#define BPCC_ICC   0
187#define BPCC_XCC   (2 << 20)
188#define BPCC_PT    (1 << 19)
189#define BPCC_PN    0
190#define BPCC_A     (1 << 29)
191
192#define BPR_PT     BPCC_PT
193
194#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
196#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
199#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
201#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
202#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
203#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
216
217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
219
220#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
221#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
222#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
223
224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
227
228#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
229#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
230#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
231#define RETURN     (INSN_OP(2) | INSN_OP3(0x39))
232#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
233#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
234#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
235#define CALL       INSN_OP(1)
236#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
237#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
238#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
239#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
240#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
241#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
242#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
243#define STB        (INSN_OP(3) | INSN_OP3(0x05))
244#define STH        (INSN_OP(3) | INSN_OP3(0x06))
245#define STW        (INSN_OP(3) | INSN_OP3(0x04))
246#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
247#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
248#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
249#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
250#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
251#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
252#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
253#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
254#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
255#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
256#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
257#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
258
259#define MEMBAR     (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
260
261#define NOP        (SETHI | INSN_RD(TCG_REG_G0) | 0)
262
263#ifndef ASI_PRIMARY_LITTLE
264#define ASI_PRIMARY_LITTLE 0x88
265#endif
266
267#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
274#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
276
277#ifndef use_vis3_instructions
278bool use_vis3_instructions;
279#endif
280
281static bool check_fit_i64(int64_t val, unsigned int bits)
282{
283    return val == sextract64(val, 0, bits);
284}
285
286static bool check_fit_i32(int32_t val, unsigned int bits)
287{
288    return val == sextract32(val, 0, bits);
289}
290
291#define check_fit_tl    check_fit_i64
292#define check_fit_ptr   check_fit_i64
293
294static bool patch_reloc(tcg_insn_unit *src_rw, int type,
295                        intptr_t value, intptr_t addend)
296{
297    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298    uint32_t insn = *src_rw;
299    intptr_t pcrel;
300
301    value += addend;
302    pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
303
304    switch (type) {
305    case R_SPARC_WDISP16:
306        if (!check_fit_ptr(pcrel >> 2, 16)) {
307            return false;
308        }
309        insn &= ~INSN_OFF16(-1);
310        insn |= INSN_OFF16(pcrel);
311        break;
312    case R_SPARC_WDISP19:
313        if (!check_fit_ptr(pcrel >> 2, 19)) {
314            return false;
315        }
316        insn &= ~INSN_OFF19(-1);
317        insn |= INSN_OFF19(pcrel);
318        break;
319    case R_SPARC_13:
320        if (!check_fit_ptr(value, 13)) {
321            return false;
322        }
323        insn &= ~INSN_IMM13(-1);
324        insn |= INSN_IMM13(value);
325        break;
326    default:
327        g_assert_not_reached();
328    }
329
330    *src_rw = insn;
331    return true;
332}
333
334/* test if a constant matches the constraint */
335static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
336{
337    if (ct & TCG_CT_CONST) {
338        return 1;
339    }
340
341    if (type == TCG_TYPE_I32) {
342        val = (int32_t)val;
343    }
344
345    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
346        return 1;
347    } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
348        return 1;
349    } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
350        return 1;
351    } else {
352        return 0;
353    }
354}
355
356static void tcg_out_nop(TCGContext *s)
357{
358    tcg_out32(s, NOP);
359}
360
361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
362                          TCGReg rs2, int op)
363{
364    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
365}
366
367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
368                           int32_t offset, int op)
369{
370    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
371}
372
373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
374			   int32_t val2, int val2const, int op)
375{
376    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
377              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
378}
379
380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
381{
382    if (ret != arg) {
383        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
384    }
385    return true;
386}
387
388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
389{
390    if (ret != arg) {
391        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
392    } else {
393        tcg_out_nop(s);
394    }
395}
396
397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
398{
399    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
400}
401
402static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
403{
404    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
405}
406
407static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
408{
409    if (check_fit_i32(arg, 13)) {
410        /* A 13-bit constant sign-extended to 64-bits.  */
411        tcg_out_movi_imm13(s, ret, arg);
412    } else {
413        /* A 32-bit constant zero-extended to 64 bits.  */
414        tcg_out_sethi(s, ret, arg);
415        if (arg & 0x3ff) {
416            tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
417        }
418    }
419}
420
421static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
422                             tcg_target_long arg, bool in_prologue,
423                             TCGReg scratch)
424{
425    tcg_target_long hi, lo = (int32_t)arg;
426    tcg_target_long test, lsb;
427
428    /* A 32-bit constant, or 32-bit zero-extended to 64-bits.  */
429    if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430        tcg_out_movi_imm32(s, ret, arg);
431        return;
432    }
433
434    /* A 13-bit constant sign-extended to 64-bits.  */
435    if (check_fit_tl(arg, 13)) {
436        tcg_out_movi_imm13(s, ret, arg);
437        return;
438    }
439
440    /* A 13-bit constant relative to the TB.  */
441    if (!in_prologue) {
442        test = tcg_tbrel_diff(s, (void *)arg);
443        if (check_fit_ptr(test, 13)) {
444            tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
445            return;
446        }
447    }
448
449    /* A 32-bit constant sign-extended to 64-bits.  */
450    if (arg == lo) {
451        tcg_out_sethi(s, ret, ~arg);
452        tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
453        return;
454    }
455
456    /* A 32-bit constant, shifted.  */
457    lsb = ctz64(arg);
458    test = (tcg_target_long)arg >> lsb;
459    if (lsb > 10 && test == extract64(test, 0, 21)) {
460        tcg_out_sethi(s, ret, test << 10);
461        tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
462        return;
463    } else if (test == (uint32_t)test || test == (int32_t)test) {
464        tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
465        tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
466        return;
467    }
468
469    /* Use the constant pool, if possible. */
470    if (!in_prologue) {
471        new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
472                       tcg_tbrel_diff(s, NULL));
473        tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
474        return;
475    }
476
477    /* A 64-bit constant decomposed into 2 32-bit pieces.  */
478    if (check_fit_i32(lo, 13)) {
479        hi = (arg - lo) >> 32;
480        tcg_out_movi_imm32(s, ret, hi);
481        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
482        tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
483    } else {
484        hi = arg >> 32;
485        tcg_out_movi_imm32(s, ret, hi);
486        tcg_out_movi_imm32(s, scratch, lo);
487        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
488        tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
489    }
490}
491
492static void tcg_out_movi(TCGContext *s, TCGType type,
493                         TCGReg ret, tcg_target_long arg)
494{
495    tcg_debug_assert(ret != TCG_REG_T2);
496    tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
497}
498
499static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
500{
501    g_assert_not_reached();
502}
503
504static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
505{
506    g_assert_not_reached();
507}
508
509static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
510{
511    tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
512}
513
514static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
515{
516    tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
517    tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
518}
519
520static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
521{
522    tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
523}
524
525static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
526{
527    tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
528}
529
530static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
531{
532    tcg_out_ext32s(s, rd, rs);
533}
534
535static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
536{
537    tcg_out_ext32u(s, rd, rs);
538}
539
540static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
541{
542    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
543}
544
545static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
546{
547    return false;
548}
549
550static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
551                             tcg_target_long imm)
552{
553    /* This function is only used for passing structs by reference. */
554    g_assert_not_reached();
555}
556
557static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
558                            TCGReg a2, int op)
559{
560    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
561}
562
563static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
564                         intptr_t offset, int op)
565{
566    if (check_fit_ptr(offset, 13)) {
567        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
568                  INSN_IMM13(offset));
569    } else {
570        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
571        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
572    }
573}
574
575static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
576                       TCGReg arg1, intptr_t arg2)
577{
578    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
579}
580
581static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
582                       TCGReg arg1, intptr_t arg2)
583{
584    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
585}
586
587static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
588                        TCGReg base, intptr_t ofs)
589{
590    if (val == 0) {
591        tcg_out_st(s, type, TCG_REG_G0, base, ofs);
592        return true;
593    }
594    return false;
595}
596
597static void tcg_out_sety(TCGContext *s, TCGReg rs)
598{
599    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
600}
601
602static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
603                          int32_t val2, int val2const, int uns)
604{
605    /* Load Y with the sign/zero extension of RS1 to 64-bits.  */
606    if (uns) {
607        tcg_out_sety(s, TCG_REG_G0);
608    } else {
609        tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
610        tcg_out_sety(s, TCG_REG_T1);
611    }
612
613    tcg_out_arithc(s, rd, rs1, val2, val2const,
614                   uns ? ARITH_UDIV : ARITH_SDIV);
615}
616
617static const uint8_t tcg_cond_to_bcond[] = {
618    [TCG_COND_EQ] = COND_E,
619    [TCG_COND_NE] = COND_NE,
620    [TCG_COND_LT] = COND_L,
621    [TCG_COND_GE] = COND_GE,
622    [TCG_COND_LE] = COND_LE,
623    [TCG_COND_GT] = COND_G,
624    [TCG_COND_LTU] = COND_CS,
625    [TCG_COND_GEU] = COND_CC,
626    [TCG_COND_LEU] = COND_LEU,
627    [TCG_COND_GTU] = COND_GU,
628};
629
630static const uint8_t tcg_cond_to_rcond[] = {
631    [TCG_COND_EQ] = RCOND_Z,
632    [TCG_COND_NE] = RCOND_NZ,
633    [TCG_COND_LT] = RCOND_LZ,
634    [TCG_COND_GT] = RCOND_GZ,
635    [TCG_COND_LE] = RCOND_LEZ,
636    [TCG_COND_GE] = RCOND_GEZ
637};
638
639static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
640{
641    tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
642}
643
644static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
645{
646    int off19 = 0;
647
648    if (l->has_value) {
649        off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
650    } else {
651        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
652    }
653    tcg_out_bpcc0(s, scond, flags, off19);
654}
655
656static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
657{
658    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
659}
660
661static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
662                               int32_t arg2, int const_arg2, TCGLabel *l)
663{
664    tcg_out_cmp(s, arg1, arg2, const_arg2);
665    tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
666    tcg_out_nop(s);
667}
668
669static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
670                          int32_t v1, int v1const)
671{
672    tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
673              | INSN_RS1(tcg_cond_to_bcond[cond])
674              | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
675}
676
677static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
678                                TCGReg c1, int32_t c2, int c2const,
679                                int32_t v1, int v1const)
680{
681    tcg_out_cmp(s, c1, c2, c2const);
682    tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
683}
684
685static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
686                               int32_t arg2, int const_arg2, TCGLabel *l)
687{
688    /* For 64-bit signed comparisons vs zero, we can avoid the compare.  */
689    if (arg2 == 0 && !is_unsigned_cond(cond)) {
690        int off16 = 0;
691
692        if (l->has_value) {
693            off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
694        } else {
695            tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
696        }
697        tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
698                  | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
699    } else {
700        tcg_out_cmp(s, arg1, arg2, const_arg2);
701        tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
702    }
703    tcg_out_nop(s);
704}
705
706static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
707                         int32_t v1, int v1const)
708{
709    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
710              | (tcg_cond_to_rcond[cond] << 10)
711              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
712}
713
714static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
715                                TCGReg c1, int32_t c2, int c2const,
716                                int32_t v1, int v1const)
717{
718    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
719       Note that the immediate range is one bit smaller, so we must check
720       for that as well.  */
721    if (c2 == 0 && !is_unsigned_cond(cond)
722        && (!v1const || check_fit_i32(v1, 10))) {
723        tcg_out_movr(s, cond, ret, c1, v1, v1const);
724    } else {
725        tcg_out_cmp(s, c1, c2, c2const);
726        tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
727    }
728}
729
730static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
731                                TCGReg c1, int32_t c2, int c2const)
732{
733    /* For 32-bit comparisons, we can play games with ADDC/SUBC.  */
734    switch (cond) {
735    case TCG_COND_LTU:
736    case TCG_COND_GEU:
737        /* The result of the comparison is in the carry bit.  */
738        break;
739
740    case TCG_COND_EQ:
741    case TCG_COND_NE:
742        /* For equality, we can transform to inequality vs zero.  */
743        if (c2 != 0) {
744            tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
745            c2 = TCG_REG_T1;
746        } else {
747            c2 = c1;
748        }
749        c1 = TCG_REG_G0, c2const = 0;
750        cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
751	break;
752
753    case TCG_COND_GTU:
754    case TCG_COND_LEU:
755        /* If we don't need to load a constant into a register, we can
756           swap the operands on GTU/LEU.  There's no benefit to loading
757           the constant into a temporary register.  */
758        if (!c2const || c2 == 0) {
759            TCGReg t = c1;
760            c1 = c2;
761            c2 = t;
762            c2const = 0;
763            cond = tcg_swap_cond(cond);
764            break;
765        }
766        /* FALLTHRU */
767
768    default:
769        tcg_out_cmp(s, c1, c2, c2const);
770        tcg_out_movi_imm13(s, ret, 0);
771        tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
772        return;
773    }
774
775    tcg_out_cmp(s, c1, c2, c2const);
776    if (cond == TCG_COND_LTU) {
777        tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
778    } else {
779        tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
780    }
781}
782
783static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
784                                TCGReg c1, int32_t c2, int c2const)
785{
786    if (use_vis3_instructions) {
787        switch (cond) {
788        case TCG_COND_NE:
789            if (c2 != 0) {
790                break;
791            }
792            c2 = c1, c2const = 0, c1 = TCG_REG_G0;
793            /* FALLTHRU */
794        case TCG_COND_LTU:
795            tcg_out_cmp(s, c1, c2, c2const);
796            tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
797            return;
798        default:
799            break;
800        }
801    }
802
803    /* For 64-bit signed comparisons vs zero, we can avoid the compare
804       if the input does not overlap the output.  */
805    if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
806        tcg_out_movi_imm13(s, ret, 0);
807        tcg_out_movr(s, cond, ret, c1, 1, 1);
808    } else {
809        tcg_out_cmp(s, c1, c2, c2const);
810        tcg_out_movi_imm13(s, ret, 0);
811        tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
812    }
813}
814
815static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
816                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
817                                int32_t bh, int bhconst, int opl, int oph)
818{
819    TCGReg tmp = TCG_REG_T1;
820
821    /* Note that the low parts are fully consumed before tmp is set.  */
822    if (rl != ah && (bhconst || rl != bh)) {
823        tmp = rl;
824    }
825
826    tcg_out_arithc(s, tmp, al, bl, blconst, opl);
827    tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
828    tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
829}
830
831static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
832                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
833                                int32_t bh, int bhconst, bool is_sub)
834{
835    TCGReg tmp = TCG_REG_T1;
836
837    /* Note that the low parts are fully consumed before tmp is set.  */
838    if (rl != ah && (bhconst || rl != bh)) {
839        tmp = rl;
840    }
841
842    tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
843
844    if (use_vis3_instructions && !is_sub) {
845        /* Note that ADDXC doesn't accept immediates.  */
846        if (bhconst && bh != 0) {
847           tcg_out_movi_imm13(s, TCG_REG_T2, bh);
848           bh = TCG_REG_T2;
849        }
850        tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
851    } else if (bh == TCG_REG_G0) {
852	/* If we have a zero, we can perform the operation in two insns,
853           with the arithmetic first, and a conditional move into place.  */
854	if (rh == ah) {
855            tcg_out_arithi(s, TCG_REG_T2, ah, 1,
856			   is_sub ? ARITH_SUB : ARITH_ADD);
857            tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
858	} else {
859            tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
860	    tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
861	}
862    } else {
863        /*
864         * Otherwise adjust BH as if there is carry into T2.
865         * Note that constant BH is constrained to 11 bits for the MOVCC,
866         * so the adjustment fits 12 bits.
867         */
868        if (bhconst) {
869            tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
870        } else {
871            tcg_out_arithi(s, TCG_REG_T2, bh, 1,
872                           is_sub ? ARITH_SUB : ARITH_ADD);
873        }
874        /* ... smoosh T2 back to original BH if carry is clear ... */
875        tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
876	/* ... and finally perform the arithmetic with the new operand.  */
877        tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
878    }
879
880    tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
881}
882
883static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
884                               bool in_prologue, bool tail_call)
885{
886    uintptr_t desti = (uintptr_t)dest;
887
888    /* Be careful not to clobber %o7 for a tail call. */
889    tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
890                     desti & ~0xfff, in_prologue,
891                     tail_call ? TCG_REG_G2 : TCG_REG_O7);
892    tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
893                   TCG_REG_T1, desti & 0xfff, JMPL);
894}
895
896static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
897                                 bool in_prologue)
898{
899    ptrdiff_t disp = tcg_pcrel_diff(s, dest);
900
901    if (disp == (int32_t)disp) {
902        tcg_out32(s, CALL | (uint32_t)disp >> 2);
903    } else {
904        tcg_out_jmpl_const(s, dest, in_prologue, false);
905    }
906}
907
908static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
909                         const TCGHelperInfo *info)
910{
911    tcg_out_call_nodelay(s, dest, false);
912    tcg_out_nop(s);
913}
914
915static void tcg_out_mb(TCGContext *s, TCGArg a0)
916{
917    /* Note that the TCG memory order constants mirror the Sparc MEMBAR.  */
918    tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
919}
920
921#ifdef CONFIG_SOFTMMU
922static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
923static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
924
925static void build_trampolines(TCGContext *s)
926{
927    static void * const qemu_ld_helpers[] = {
928        [MO_UB]   = helper_ret_ldub_mmu,
929        [MO_SB]   = helper_ret_ldsb_mmu,
930        [MO_LEUW] = helper_le_lduw_mmu,
931        [MO_LESW] = helper_le_ldsw_mmu,
932        [MO_LEUL] = helper_le_ldul_mmu,
933        [MO_LEUQ] = helper_le_ldq_mmu,
934        [MO_BEUW] = helper_be_lduw_mmu,
935        [MO_BESW] = helper_be_ldsw_mmu,
936        [MO_BEUL] = helper_be_ldul_mmu,
937        [MO_BEUQ] = helper_be_ldq_mmu,
938    };
939    static void * const qemu_st_helpers[] = {
940        [MO_UB]   = helper_ret_stb_mmu,
941        [MO_LEUW] = helper_le_stw_mmu,
942        [MO_LEUL] = helper_le_stl_mmu,
943        [MO_LEUQ] = helper_le_stq_mmu,
944        [MO_BEUW] = helper_be_stw_mmu,
945        [MO_BEUL] = helper_be_stl_mmu,
946        [MO_BEUQ] = helper_be_stq_mmu,
947    };
948
949    int i;
950
951    for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
952        if (qemu_ld_helpers[i] == NULL) {
953            continue;
954        }
955
956        /* May as well align the trampoline.  */
957        while ((uintptr_t)s->code_ptr & 15) {
958            tcg_out_nop(s);
959        }
960        qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
961
962        /* Set the retaddr operand.  */
963        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
964        /* Tail call.  */
965        tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
966        /* delay slot -- set the env argument */
967        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
968    }
969
970    for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
971        if (qemu_st_helpers[i] == NULL) {
972            continue;
973        }
974
975        /* May as well align the trampoline.  */
976        while ((uintptr_t)s->code_ptr & 15) {
977            tcg_out_nop(s);
978        }
979        qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
980
981        /* Set the retaddr operand.  */
982        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
983
984        /* Tail call.  */
985        tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
986        /* delay slot -- set the env argument */
987        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
988    }
989}
990#else
991static const tcg_insn_unit *qemu_unalign_ld_trampoline;
992static const tcg_insn_unit *qemu_unalign_st_trampoline;
993
994static void build_trampolines(TCGContext *s)
995{
996    for (int ld = 0; ld < 2; ++ld) {
997        void *helper;
998
999        while ((uintptr_t)s->code_ptr & 15) {
1000            tcg_out_nop(s);
1001        }
1002
1003        if (ld) {
1004            helper = helper_unaligned_ld;
1005            qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
1006        } else {
1007            helper = helper_unaligned_st;
1008            qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
1009        }
1010
1011        /* Tail call.  */
1012        tcg_out_jmpl_const(s, helper, true, true);
1013        /* delay slot -- set the env argument */
1014        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
1015    }
1016}
1017#endif
1018
1019/* Generate global QEMU prologue and epilogue code */
1020static void tcg_target_qemu_prologue(TCGContext *s)
1021{
1022    int tmp_buf_size, frame_size;
1023
1024    /*
1025     * The TCG temp buffer is at the top of the frame, immediately
1026     * below the frame pointer.  Use the logical (aligned) offset here;
1027     * the stack bias is applied in temp_allocate_frame().
1028     */
1029    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1030    tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
1031
1032    /*
1033     * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1034     * otherwise the minimal frame usable by callees.
1035     */
1036    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1037    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1038    frame_size += TCG_TARGET_STACK_ALIGN - 1;
1039    frame_size &= -TCG_TARGET_STACK_ALIGN;
1040    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1041              INSN_IMM13(-frame_size));
1042
1043#ifndef CONFIG_SOFTMMU
1044    if (guest_base != 0) {
1045        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1046                         guest_base, true, TCG_REG_T1);
1047        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1048    }
1049#endif
1050
1051    /* We choose TCG_REG_TB such that no move is required.  */
1052    QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1053    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1054
1055    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1056    /* delay slot */
1057    tcg_out_nop(s);
1058
1059    /* Epilogue for goto_ptr.  */
1060    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1061    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1062    /* delay slot */
1063    tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1064
1065    build_trampolines(s);
1066}
1067
1068static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1069{
1070    int i;
1071    for (i = 0; i < count; ++i) {
1072        p[i] = NOP;
1073    }
1074}
1075
1076#if defined(CONFIG_SOFTMMU)
1077
1078/* We expect to use a 13-bit negative offset from ENV.  */
1079QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1080QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1081
1082/* Perform the TLB load and compare.
1083
1084   Inputs:
1085   ADDRLO and ADDRHI contain the possible two parts of the address.
1086
1087   MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1088
1089   WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1090   This should be offsetof addr_read or addr_write.
1091
1092   The result of the TLB comparison is in %[ix]cc.  The sanitized address
1093   is in the returned register, maybe %o0.  The TLB addend is in %o1.  */
1094
1095static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1096                               MemOp opc, int which)
1097{
1098    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1099    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1100    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1101    const TCGReg r0 = TCG_REG_O0;
1102    const TCGReg r1 = TCG_REG_O1;
1103    const TCGReg r2 = TCG_REG_O2;
1104    unsigned s_bits = opc & MO_SIZE;
1105    unsigned a_bits = get_alignment_bits(opc);
1106    tcg_target_long compare_mask;
1107
1108    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
1109    tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1110    tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1111
1112    /* Extract the page index, shifted into place for tlb index.  */
1113    tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1114                   SHIFT_SRL);
1115    tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1116
1117    /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2.  */
1118    tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1119
1120    /* Load the tlb comparator and the addend.  */
1121    tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1122    tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1123
1124    /* Mask out the page offset, except for the required alignment.
1125       We don't support unaligned accesses.  */
1126    if (a_bits < s_bits) {
1127        a_bits = s_bits;
1128    }
1129    compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1130    if (check_fit_tl(compare_mask, 13)) {
1131        tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1132    } else {
1133        tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1134        tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1135    }
1136    tcg_out_cmp(s, r0, r2, 0);
1137
1138    /* If the guest address must be zero-extended, do so now.  */
1139    if (TARGET_LONG_BITS == 32) {
1140        tcg_out_ext32u(s, r0, addr);
1141        return r0;
1142    }
1143    return addr;
1144}
1145#endif /* CONFIG_SOFTMMU */
1146
1147static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1148    [MO_UB]   = LDUB,
1149    [MO_SB]   = LDSB,
1150    [MO_UB | MO_LE] = LDUB,
1151    [MO_SB | MO_LE] = LDSB,
1152
1153    [MO_BEUW] = LDUH,
1154    [MO_BESW] = LDSH,
1155    [MO_BEUL] = LDUW,
1156    [MO_BESL] = LDSW,
1157    [MO_BEUQ] = LDX,
1158    [MO_BESQ] = LDX,
1159
1160    [MO_LEUW] = LDUH_LE,
1161    [MO_LESW] = LDSH_LE,
1162    [MO_LEUL] = LDUW_LE,
1163    [MO_LESL] = LDSW_LE,
1164    [MO_LEUQ] = LDX_LE,
1165    [MO_LESQ] = LDX_LE,
1166};
1167
1168static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1169    [MO_UB]   = STB,
1170
1171    [MO_BEUW] = STH,
1172    [MO_BEUL] = STW,
1173    [MO_BEUQ] = STX,
1174
1175    [MO_LEUW] = STH_LE,
1176    [MO_LEUL] = STW_LE,
1177    [MO_LEUQ] = STX_LE,
1178};
1179
1180static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1181                            MemOpIdx oi, TCGType data_type)
1182{
1183    MemOp memop = get_memop(oi);
1184    tcg_insn_unit *label_ptr;
1185
1186#ifdef CONFIG_SOFTMMU
1187    unsigned memi = get_mmuidx(oi);
1188    TCGReg addrz;
1189    const tcg_insn_unit *func;
1190
1191    addrz = tcg_out_tlb_load(s, addr, memi, memop,
1192                             offsetof(CPUTLBEntry, addr_read));
1193
1194    /* The fast path is exactly one insn.  Thus we can perform the
1195       entire TLB Hit in the (annulled) delay slot of the branch
1196       over the TLB Miss case.  */
1197
1198    /* beq,a,pt %[xi]cc, label0 */
1199    label_ptr = s->code_ptr;
1200    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1201                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1202    /* delay slot */
1203    tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1204                    qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1205
1206    /* TLB Miss.  */
1207
1208    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1209
1210    /* We use the helpers to extend SB and SW data, leaving the case
1211       of SL needing explicit extending below.  */
1212    if ((memop & MO_SSIZE) == MO_SL) {
1213        func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1214    } else {
1215        func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1216    }
1217    tcg_debug_assert(func != NULL);
1218    tcg_out_call_nodelay(s, func, false);
1219    /* delay slot */
1220    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1221
1222    /* We let the helper sign-extend SB and SW, but leave SL for here.  */
1223    if ((memop & MO_SSIZE) == MO_SL) {
1224        tcg_out_ext32s(s, data, TCG_REG_O0);
1225    } else {
1226        tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1227    }
1228
1229    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1230#else
1231    TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1232    unsigned a_bits = get_alignment_bits(memop);
1233    unsigned s_bits = memop & MO_SIZE;
1234    unsigned t_bits;
1235
1236    if (TARGET_LONG_BITS == 32) {
1237        tcg_out_ext32u(s, TCG_REG_T1, addr);
1238        addr = TCG_REG_T1;
1239    }
1240
1241    /*
1242     * Normal case: alignment equal to access size.
1243     */
1244    if (a_bits == s_bits) {
1245        tcg_out_ldst_rr(s, data, addr, index,
1246                        qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1247        return;
1248    }
1249
1250    /*
1251     * Test for at least natural alignment, and assume most accesses
1252     * will be aligned -- perform a straight load in the delay slot.
1253     * This is required to preserve atomicity for aligned accesses.
1254     */
1255    t_bits = MAX(a_bits, s_bits);
1256    tcg_debug_assert(t_bits < 13);
1257    tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1258
1259    /* beq,a,pt %icc, label */
1260    label_ptr = s->code_ptr;
1261    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1262    /* delay slot */
1263    tcg_out_ldst_rr(s, data, addr, index,
1264                    qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1265
1266    if (a_bits >= s_bits) {
1267        /*
1268         * Overalignment: A successful alignment test will perform the memory
1269         * operation in the delay slot, and failure need only invoke the
1270         * handler for SIGBUS.
1271         */
1272        tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1273        /* delay slot -- move to low part of argument reg */
1274        tcg_out_mov_delay(s, TCG_REG_O1, addr);
1275    } else {
1276        /* Underalignment: load by pieces of minimum alignment. */
1277        int ld_opc, a_size, s_size, i;
1278
1279        /*
1280         * Force full address into T1 early; avoids problems with
1281         * overlap between @addr and @data.
1282         */
1283        tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1284
1285        a_size = 1 << a_bits;
1286        s_size = 1 << s_bits;
1287        if ((memop & MO_BSWAP) == MO_BE) {
1288            ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1289            tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1290            ld_opc = qemu_ld_opc[a_bits | MO_BE];
1291            for (i = a_size; i < s_size; i += a_size) {
1292                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1293                tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1294                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1295            }
1296        } else if (a_bits == 0) {
1297            ld_opc = LDUB;
1298            tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1299            for (i = a_size; i < s_size; i += a_size) {
1300                if ((memop & MO_SIGN) && i == s_size - a_size) {
1301                    ld_opc = LDSB;
1302                }
1303                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1304                tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1305                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1306            }
1307        } else {
1308            ld_opc = qemu_ld_opc[a_bits | MO_LE];
1309            tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1310            for (i = a_size; i < s_size; i += a_size) {
1311                tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1312                if ((memop & MO_SIGN) && i == s_size - a_size) {
1313                    ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1314                }
1315                tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1316                tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1317                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1318            }
1319        }
1320    }
1321
1322    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1323#endif /* CONFIG_SOFTMMU */
1324}
1325
1326static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1327                            MemOpIdx oi, TCGType data_type)
1328{
1329    MemOp memop = get_memop(oi);
1330    tcg_insn_unit *label_ptr;
1331
1332#ifdef CONFIG_SOFTMMU
1333    unsigned memi = get_mmuidx(oi);
1334    TCGReg addrz;
1335    const tcg_insn_unit *func;
1336
1337    addrz = tcg_out_tlb_load(s, addr, memi, memop,
1338                             offsetof(CPUTLBEntry, addr_write));
1339
1340    /* The fast path is exactly one insn.  Thus we can perform the entire
1341       TLB Hit in the (annulled) delay slot of the branch over TLB Miss.  */
1342    /* beq,a,pt %[xi]cc, label0 */
1343    label_ptr = s->code_ptr;
1344    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1345                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1346    /* delay slot */
1347    tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1348                    qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1349
1350    /* TLB Miss.  */
1351
1352    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1353    tcg_out_movext(s, (memop & MO_SIZE) == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32,
1354                   TCG_REG_O2, data_type, memop & MO_SIZE, data);
1355
1356    func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1357    tcg_debug_assert(func != NULL);
1358    tcg_out_call_nodelay(s, func, false);
1359    /* delay slot */
1360    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
1361
1362    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1363#else
1364    TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1365    unsigned a_bits = get_alignment_bits(memop);
1366    unsigned s_bits = memop & MO_SIZE;
1367    unsigned t_bits;
1368
1369    if (TARGET_LONG_BITS == 32) {
1370        tcg_out_ext32u(s, TCG_REG_T1, addr);
1371        addr = TCG_REG_T1;
1372    }
1373
1374    /*
1375     * Normal case: alignment equal to access size.
1376     */
1377    if (a_bits == s_bits) {
1378        tcg_out_ldst_rr(s, data, addr, index,
1379                        qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1380        return;
1381    }
1382
1383    /*
1384     * Test for at least natural alignment, and assume most accesses
1385     * will be aligned -- perform a straight store in the delay slot.
1386     * This is required to preserve atomicity for aligned accesses.
1387     */
1388    t_bits = MAX(a_bits, s_bits);
1389    tcg_debug_assert(t_bits < 13);
1390    tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1391
1392    /* beq,a,pt %icc, label */
1393    label_ptr = s->code_ptr;
1394    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1395    /* delay slot */
1396    tcg_out_ldst_rr(s, data, addr, index,
1397                    qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1398
1399    if (a_bits >= s_bits) {
1400        /*
1401         * Overalignment: A successful alignment test will perform the memory
1402         * operation in the delay slot, and failure need only invoke the
1403         * handler for SIGBUS.
1404         */
1405        tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1406        /* delay slot -- move to low part of argument reg */
1407        tcg_out_mov_delay(s, TCG_REG_O1, addr);
1408    } else {
1409        /* Underalignment: store by pieces of minimum alignment. */
1410        int st_opc, a_size, s_size, i;
1411
1412        /*
1413         * Force full address into T1 early; avoids problems with
1414         * overlap between @addr and @data.
1415         */
1416        tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1417
1418        a_size = 1 << a_bits;
1419        s_size = 1 << s_bits;
1420        if ((memop & MO_BSWAP) == MO_BE) {
1421            st_opc = qemu_st_opc[a_bits | MO_BE];
1422            for (i = 0; i < s_size; i += a_size) {
1423                TCGReg d = data;
1424                int shift = (s_size - a_size - i) * 8;
1425                if (shift) {
1426                    d = TCG_REG_T2;
1427                    tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1428                }
1429                tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1430            }
1431        } else if (a_bits == 0) {
1432            tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1433            for (i = 1; i < s_size; i++) {
1434                tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1435                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1436            }
1437        } else {
1438            /* Note that ST*A with immediate asi must use indexed address. */
1439            st_opc = qemu_st_opc[a_bits + MO_LE];
1440            tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1441            for (i = a_size; i < s_size; i += a_size) {
1442                tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1443                tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1444                tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1445            }
1446        }
1447    }
1448
1449    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1450#endif /* CONFIG_SOFTMMU */
1451}
1452
1453static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1454{
1455    if (check_fit_ptr(a0, 13)) {
1456        tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1457        tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1458        return;
1459    } else {
1460        intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1461        if (check_fit_ptr(tb_diff, 13)) {
1462            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1463            /* Note that TCG_REG_TB has been unwound to O1.  */
1464            tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1465            return;
1466        }
1467    }
1468    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1469    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1470    tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1471}
1472
1473static void tcg_out_goto_tb(TCGContext *s, int which)
1474{
1475    ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1476
1477    /* Load link and indirect branch. */
1478    set_jmp_insn_offset(s, which);
1479    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1480    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1481    /* delay slot */
1482    tcg_out_nop(s);
1483    set_jmp_reset_offset(s, which);
1484
1485    /*
1486     * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1487     * to the beginning of this TB.
1488     */
1489    off = -tcg_current_code_size(s);
1490    if (check_fit_i32(off, 13)) {
1491        tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1492    } else {
1493        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1494        tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1495    }
1496}
1497
1498void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1499                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1500{
1501}
1502
1503static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1504                       const TCGArg args[TCG_MAX_OP_ARGS],
1505                       const int const_args[TCG_MAX_OP_ARGS])
1506{
1507    TCGArg a0, a1, a2;
1508    int c, c2;
1509
1510    /* Hoist the loads of the most common arguments.  */
1511    a0 = args[0];
1512    a1 = args[1];
1513    a2 = args[2];
1514    c2 = const_args[2];
1515
1516    switch (opc) {
1517    case INDEX_op_goto_ptr:
1518        tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1519        tcg_out_mov_delay(s, TCG_REG_TB, a0);
1520        break;
1521    case INDEX_op_br:
1522        tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1523        tcg_out_nop(s);
1524        break;
1525
1526#define OP_32_64(x)                             \
1527        glue(glue(case INDEX_op_, x), _i32):    \
1528        glue(glue(case INDEX_op_, x), _i64)
1529
1530    OP_32_64(ld8u):
1531        tcg_out_ldst(s, a0, a1, a2, LDUB);
1532        break;
1533    OP_32_64(ld8s):
1534        tcg_out_ldst(s, a0, a1, a2, LDSB);
1535        break;
1536    OP_32_64(ld16u):
1537        tcg_out_ldst(s, a0, a1, a2, LDUH);
1538        break;
1539    OP_32_64(ld16s):
1540        tcg_out_ldst(s, a0, a1, a2, LDSH);
1541        break;
1542    case INDEX_op_ld_i32:
1543    case INDEX_op_ld32u_i64:
1544        tcg_out_ldst(s, a0, a1, a2, LDUW);
1545        break;
1546    OP_32_64(st8):
1547        tcg_out_ldst(s, a0, a1, a2, STB);
1548        break;
1549    OP_32_64(st16):
1550        tcg_out_ldst(s, a0, a1, a2, STH);
1551        break;
1552    case INDEX_op_st_i32:
1553    case INDEX_op_st32_i64:
1554        tcg_out_ldst(s, a0, a1, a2, STW);
1555        break;
1556    OP_32_64(add):
1557        c = ARITH_ADD;
1558        goto gen_arith;
1559    OP_32_64(sub):
1560        c = ARITH_SUB;
1561        goto gen_arith;
1562    OP_32_64(and):
1563        c = ARITH_AND;
1564        goto gen_arith;
1565    OP_32_64(andc):
1566        c = ARITH_ANDN;
1567        goto gen_arith;
1568    OP_32_64(or):
1569        c = ARITH_OR;
1570        goto gen_arith;
1571    OP_32_64(orc):
1572        c = ARITH_ORN;
1573        goto gen_arith;
1574    OP_32_64(xor):
1575        c = ARITH_XOR;
1576        goto gen_arith;
1577    case INDEX_op_shl_i32:
1578        c = SHIFT_SLL;
1579    do_shift32:
1580        /* Limit immediate shift count lest we create an illegal insn.  */
1581        tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1582        break;
1583    case INDEX_op_shr_i32:
1584        c = SHIFT_SRL;
1585        goto do_shift32;
1586    case INDEX_op_sar_i32:
1587        c = SHIFT_SRA;
1588        goto do_shift32;
1589    case INDEX_op_mul_i32:
1590        c = ARITH_UMUL;
1591        goto gen_arith;
1592
1593    OP_32_64(neg):
1594	c = ARITH_SUB;
1595	goto gen_arith1;
1596    OP_32_64(not):
1597	c = ARITH_ORN;
1598	goto gen_arith1;
1599
1600    case INDEX_op_div_i32:
1601        tcg_out_div32(s, a0, a1, a2, c2, 0);
1602        break;
1603    case INDEX_op_divu_i32:
1604        tcg_out_div32(s, a0, a1, a2, c2, 1);
1605        break;
1606
1607    case INDEX_op_brcond_i32:
1608        tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1609        break;
1610    case INDEX_op_setcond_i32:
1611        tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1612        break;
1613    case INDEX_op_movcond_i32:
1614        tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1615        break;
1616
1617    case INDEX_op_add2_i32:
1618        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1619                            args[4], const_args[4], args[5], const_args[5],
1620                            ARITH_ADDCC, ARITH_ADDC);
1621        break;
1622    case INDEX_op_sub2_i32:
1623        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1624                            args[4], const_args[4], args[5], const_args[5],
1625                            ARITH_SUBCC, ARITH_SUBC);
1626        break;
1627    case INDEX_op_mulu2_i32:
1628        c = ARITH_UMUL;
1629        goto do_mul2;
1630    case INDEX_op_muls2_i32:
1631        c = ARITH_SMUL;
1632    do_mul2:
1633        /* The 32-bit multiply insns produce a full 64-bit result. */
1634        tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1635        tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1636        break;
1637
1638    case INDEX_op_qemu_ld_i32:
1639        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1640        break;
1641    case INDEX_op_qemu_ld_i64:
1642        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1643        break;
1644    case INDEX_op_qemu_st_i32:
1645        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1646        break;
1647    case INDEX_op_qemu_st_i64:
1648        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1649        break;
1650
1651    case INDEX_op_ld32s_i64:
1652        tcg_out_ldst(s, a0, a1, a2, LDSW);
1653        break;
1654    case INDEX_op_ld_i64:
1655        tcg_out_ldst(s, a0, a1, a2, LDX);
1656        break;
1657    case INDEX_op_st_i64:
1658        tcg_out_ldst(s, a0, a1, a2, STX);
1659        break;
1660    case INDEX_op_shl_i64:
1661        c = SHIFT_SLLX;
1662    do_shift64:
1663        /* Limit immediate shift count lest we create an illegal insn.  */
1664        tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1665        break;
1666    case INDEX_op_shr_i64:
1667        c = SHIFT_SRLX;
1668        goto do_shift64;
1669    case INDEX_op_sar_i64:
1670        c = SHIFT_SRAX;
1671        goto do_shift64;
1672    case INDEX_op_mul_i64:
1673        c = ARITH_MULX;
1674        goto gen_arith;
1675    case INDEX_op_div_i64:
1676        c = ARITH_SDIVX;
1677        goto gen_arith;
1678    case INDEX_op_divu_i64:
1679        c = ARITH_UDIVX;
1680        goto gen_arith;
1681    case INDEX_op_extrh_i64_i32:
1682        tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1683        break;
1684
1685    case INDEX_op_brcond_i64:
1686        tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1687        break;
1688    case INDEX_op_setcond_i64:
1689        tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1690        break;
1691    case INDEX_op_movcond_i64:
1692        tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1693        break;
1694    case INDEX_op_add2_i64:
1695        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1696                            const_args[4], args[5], const_args[5], false);
1697        break;
1698    case INDEX_op_sub2_i64:
1699        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1700                            const_args[4], args[5], const_args[5], true);
1701        break;
1702    case INDEX_op_muluh_i64:
1703        tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1704        break;
1705
1706    gen_arith:
1707        tcg_out_arithc(s, a0, a1, a2, c2, c);
1708        break;
1709
1710    gen_arith1:
1711	tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1712	break;
1713
1714    case INDEX_op_mb:
1715        tcg_out_mb(s, a0);
1716        break;
1717
1718    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1719    case INDEX_op_mov_i64:
1720    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1721    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1722    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1723    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
1724    case INDEX_op_ext8s_i64:
1725    case INDEX_op_ext8u_i32:
1726    case INDEX_op_ext8u_i64:
1727    case INDEX_op_ext16s_i32:
1728    case INDEX_op_ext16s_i64:
1729    case INDEX_op_ext16u_i32:
1730    case INDEX_op_ext16u_i64:
1731    case INDEX_op_ext32s_i64:
1732    case INDEX_op_ext32u_i64:
1733    case INDEX_op_ext_i32_i64:
1734    case INDEX_op_extu_i32_i64:
1735    case INDEX_op_extrl_i64_i32:
1736    default:
1737        g_assert_not_reached();
1738    }
1739}
1740
1741static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1742{
1743    switch (op) {
1744    case INDEX_op_goto_ptr:
1745        return C_O0_I1(r);
1746
1747    case INDEX_op_ld8u_i32:
1748    case INDEX_op_ld8u_i64:
1749    case INDEX_op_ld8s_i32:
1750    case INDEX_op_ld8s_i64:
1751    case INDEX_op_ld16u_i32:
1752    case INDEX_op_ld16u_i64:
1753    case INDEX_op_ld16s_i32:
1754    case INDEX_op_ld16s_i64:
1755    case INDEX_op_ld_i32:
1756    case INDEX_op_ld32u_i64:
1757    case INDEX_op_ld32s_i64:
1758    case INDEX_op_ld_i64:
1759    case INDEX_op_neg_i32:
1760    case INDEX_op_neg_i64:
1761    case INDEX_op_not_i32:
1762    case INDEX_op_not_i64:
1763    case INDEX_op_ext32s_i64:
1764    case INDEX_op_ext32u_i64:
1765    case INDEX_op_ext_i32_i64:
1766    case INDEX_op_extu_i32_i64:
1767    case INDEX_op_extrl_i64_i32:
1768    case INDEX_op_extrh_i64_i32:
1769        return C_O1_I1(r, r);
1770
1771    case INDEX_op_st8_i32:
1772    case INDEX_op_st8_i64:
1773    case INDEX_op_st16_i32:
1774    case INDEX_op_st16_i64:
1775    case INDEX_op_st_i32:
1776    case INDEX_op_st32_i64:
1777    case INDEX_op_st_i64:
1778        return C_O0_I2(rZ, r);
1779
1780    case INDEX_op_add_i32:
1781    case INDEX_op_add_i64:
1782    case INDEX_op_mul_i32:
1783    case INDEX_op_mul_i64:
1784    case INDEX_op_div_i32:
1785    case INDEX_op_div_i64:
1786    case INDEX_op_divu_i32:
1787    case INDEX_op_divu_i64:
1788    case INDEX_op_sub_i32:
1789    case INDEX_op_sub_i64:
1790    case INDEX_op_and_i32:
1791    case INDEX_op_and_i64:
1792    case INDEX_op_andc_i32:
1793    case INDEX_op_andc_i64:
1794    case INDEX_op_or_i32:
1795    case INDEX_op_or_i64:
1796    case INDEX_op_orc_i32:
1797    case INDEX_op_orc_i64:
1798    case INDEX_op_xor_i32:
1799    case INDEX_op_xor_i64:
1800    case INDEX_op_shl_i32:
1801    case INDEX_op_shl_i64:
1802    case INDEX_op_shr_i32:
1803    case INDEX_op_shr_i64:
1804    case INDEX_op_sar_i32:
1805    case INDEX_op_sar_i64:
1806    case INDEX_op_setcond_i32:
1807    case INDEX_op_setcond_i64:
1808        return C_O1_I2(r, rZ, rJ);
1809
1810    case INDEX_op_brcond_i32:
1811    case INDEX_op_brcond_i64:
1812        return C_O0_I2(rZ, rJ);
1813    case INDEX_op_movcond_i32:
1814    case INDEX_op_movcond_i64:
1815        return C_O1_I4(r, rZ, rJ, rI, 0);
1816    case INDEX_op_add2_i32:
1817    case INDEX_op_add2_i64:
1818    case INDEX_op_sub2_i32:
1819    case INDEX_op_sub2_i64:
1820        return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1821    case INDEX_op_mulu2_i32:
1822    case INDEX_op_muls2_i32:
1823        return C_O2_I2(r, r, rZ, rJ);
1824    case INDEX_op_muluh_i64:
1825        return C_O1_I2(r, r, r);
1826
1827    case INDEX_op_qemu_ld_i32:
1828    case INDEX_op_qemu_ld_i64:
1829        return C_O1_I1(r, s);
1830    case INDEX_op_qemu_st_i32:
1831    case INDEX_op_qemu_st_i64:
1832        return C_O0_I2(sZ, s);
1833
1834    default:
1835        g_assert_not_reached();
1836    }
1837}
1838
1839static void tcg_target_init(TCGContext *s)
1840{
1841    /*
1842     * Only probe for the platform and capabilities if we haven't already
1843     * determined maximum values at compile time.
1844     */
1845#ifndef use_vis3_instructions
1846    {
1847        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1848        use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1849    }
1850#endif
1851
1852    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1853    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1854
1855    tcg_target_call_clobber_regs = 0;
1856    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1857    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1858    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1859    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1860    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1861    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1862    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1863    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1864    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1865    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1866    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1867    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1868    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1869    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1870    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1871
1872    s->reserved_regs = 0;
1873    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1874    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1875    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1876    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1877    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1878    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1879    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1880    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1881}
1882
1883#define ELF_HOST_MACHINE  EM_SPARCV9
1884
1885typedef struct {
1886    DebugFrameHeader h;
1887    uint8_t fde_def_cfa[4];
1888    uint8_t fde_win_save;
1889    uint8_t fde_ret_save[3];
1890} DebugFrame;
1891
1892static const DebugFrame debug_frame = {
1893    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1894    .h.cie.id = -1,
1895    .h.cie.version = 1,
1896    .h.cie.code_align = 1,
1897    .h.cie.data_align = -sizeof(void *) & 0x7f,
1898    .h.cie.return_column = 15,            /* o7 */
1899
1900    /* Total FDE size does not include the "len" member.  */
1901    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1902
1903    .fde_def_cfa = {
1904        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
1905        (2047 & 0x7f) | 0x80, (2047 >> 7)
1906    },
1907    .fde_win_save = 0x2d,               /* DW_CFA_GNU_window_save */
1908    .fde_ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
1909};
1910
1911void tcg_register_jit(const void *buf, size_t buf_size)
1912{
1913    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1914}
1915