xref: /openbmc/qemu/tcg/sparc64/tcg-target.c.inc (revision 2a8af382)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* We only support generating code for 64-bit mode.  */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
30#include "../tcg-pool.c.inc"
31
32#ifdef CONFIG_DEBUG_TCG
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34    "%g0",
35    "%g1",
36    "%g2",
37    "%g3",
38    "%g4",
39    "%g5",
40    "%g6",
41    "%g7",
42    "%o0",
43    "%o1",
44    "%o2",
45    "%o3",
46    "%o4",
47    "%o5",
48    "%o6",
49    "%o7",
50    "%l0",
51    "%l1",
52    "%l2",
53    "%l3",
54    "%l4",
55    "%l5",
56    "%l6",
57    "%l7",
58    "%i0",
59    "%i1",
60    "%i2",
61    "%i3",
62    "%i4",
63    "%i5",
64    "%i6",
65    "%i7",
66};
67#endif
68
69#define TCG_CT_CONST_S11  0x100
70#define TCG_CT_CONST_S13  0x200
71#define TCG_CT_CONST_ZERO 0x400
72
73/*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78#ifdef CONFIG_SOFTMMU
79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80#else
81#define SOFTMMU_RESERVE_REGS 0
82#endif
83#define ALL_GENERAL_REGS     MAKE_64BIT_MASK(0, 32)
84#define ALL_QLDST_REGS       (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
85
86/* Define some temporary registers.  T2 is used for constant generation.  */
87#define TCG_REG_T1  TCG_REG_G1
88#define TCG_REG_T2  TCG_REG_O7
89
90#ifndef CONFIG_SOFTMMU
91# define TCG_GUEST_BASE_REG TCG_REG_I5
92#endif
93
94#define TCG_REG_TB  TCG_REG_I1
95
96static const int tcg_target_reg_alloc_order[] = {
97    TCG_REG_L0,
98    TCG_REG_L1,
99    TCG_REG_L2,
100    TCG_REG_L3,
101    TCG_REG_L4,
102    TCG_REG_L5,
103    TCG_REG_L6,
104    TCG_REG_L7,
105
106    TCG_REG_I0,
107    TCG_REG_I1,
108    TCG_REG_I2,
109    TCG_REG_I3,
110    TCG_REG_I4,
111    TCG_REG_I5,
112
113    TCG_REG_G2,
114    TCG_REG_G3,
115    TCG_REG_G4,
116    TCG_REG_G5,
117
118    TCG_REG_O0,
119    TCG_REG_O1,
120    TCG_REG_O2,
121    TCG_REG_O3,
122    TCG_REG_O4,
123    TCG_REG_O5,
124};
125
126static const int tcg_target_call_iarg_regs[6] = {
127    TCG_REG_O0,
128    TCG_REG_O1,
129    TCG_REG_O2,
130    TCG_REG_O3,
131    TCG_REG_O4,
132    TCG_REG_O5,
133};
134
135static const int tcg_target_call_oarg_regs[] = {
136    TCG_REG_O0,
137    TCG_REG_O1,
138    TCG_REG_O2,
139    TCG_REG_O3,
140};
141
142#define INSN_OP(x)  ((x) << 30)
143#define INSN_OP2(x) ((x) << 22)
144#define INSN_OP3(x) ((x) << 19)
145#define INSN_OPF(x) ((x) << 5)
146#define INSN_RD(x)  ((x) << 25)
147#define INSN_RS1(x) ((x) << 14)
148#define INSN_RS2(x) (x)
149#define INSN_ASI(x) ((x) << 5)
150
151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
156#define INSN_COND(x) ((x) << 25)
157
158#define COND_N     0x0
159#define COND_E     0x1
160#define COND_LE    0x2
161#define COND_L     0x3
162#define COND_LEU   0x4
163#define COND_CS    0x5
164#define COND_NEG   0x6
165#define COND_VS    0x7
166#define COND_A     0x8
167#define COND_NE    0x9
168#define COND_G     0xa
169#define COND_GE    0xb
170#define COND_GU    0xc
171#define COND_CC    0xd
172#define COND_POS   0xe
173#define COND_VC    0xf
174#define BA         (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
175
176#define RCOND_Z    1
177#define RCOND_LEZ  2
178#define RCOND_LZ   3
179#define RCOND_NZ   5
180#define RCOND_GZ   6
181#define RCOND_GEZ  7
182
183#define MOVCC_ICC  (1 << 18)
184#define MOVCC_XCC  (1 << 18 | 1 << 12)
185
186#define BPCC_ICC   0
187#define BPCC_XCC   (2 << 20)
188#define BPCC_PT    (1 << 19)
189#define BPCC_PN    0
190#define BPCC_A     (1 << 29)
191
192#define BPR_PT     BPCC_PT
193
194#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
196#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
199#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
201#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
202#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
203#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
216
217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
219
220#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
221#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
222#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
223
224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
227
228#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
229#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
230#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
231#define RETURN     (INSN_OP(2) | INSN_OP3(0x39))
232#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
233#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
234#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
235#define CALL       INSN_OP(1)
236#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
237#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
238#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
239#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
240#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
241#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
242#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
243#define STB        (INSN_OP(3) | INSN_OP3(0x05))
244#define STH        (INSN_OP(3) | INSN_OP3(0x06))
245#define STW        (INSN_OP(3) | INSN_OP3(0x04))
246#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
247#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
248#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
249#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
250#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
251#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
252#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
253#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
254#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
255#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
256#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
257#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
258
259#define MEMBAR     (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
260
261#define NOP        (SETHI | INSN_RD(TCG_REG_G0) | 0)
262
263#ifndef ASI_PRIMARY_LITTLE
264#define ASI_PRIMARY_LITTLE 0x88
265#endif
266
267#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
274#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
276
277#ifndef use_vis3_instructions
278bool use_vis3_instructions;
279#endif
280
281static bool check_fit_i64(int64_t val, unsigned int bits)
282{
283    return val == sextract64(val, 0, bits);
284}
285
286static bool check_fit_i32(int32_t val, unsigned int bits)
287{
288    return val == sextract32(val, 0, bits);
289}
290
291#define check_fit_tl    check_fit_i64
292#define check_fit_ptr   check_fit_i64
293
294static bool patch_reloc(tcg_insn_unit *src_rw, int type,
295                        intptr_t value, intptr_t addend)
296{
297    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298    uint32_t insn = *src_rw;
299    intptr_t pcrel;
300
301    value += addend;
302    pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
303
304    switch (type) {
305    case R_SPARC_WDISP16:
306        if (!check_fit_ptr(pcrel >> 2, 16)) {
307            return false;
308        }
309        insn &= ~INSN_OFF16(-1);
310        insn |= INSN_OFF16(pcrel);
311        break;
312    case R_SPARC_WDISP19:
313        if (!check_fit_ptr(pcrel >> 2, 19)) {
314            return false;
315        }
316        insn &= ~INSN_OFF19(-1);
317        insn |= INSN_OFF19(pcrel);
318        break;
319    case R_SPARC_13:
320        if (!check_fit_ptr(value, 13)) {
321            return false;
322        }
323        insn &= ~INSN_IMM13(-1);
324        insn |= INSN_IMM13(value);
325        break;
326    default:
327        g_assert_not_reached();
328    }
329
330    *src_rw = insn;
331    return true;
332}
333
334/* test if a constant matches the constraint */
335static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
336{
337    if (ct & TCG_CT_CONST) {
338        return 1;
339    }
340
341    if (type == TCG_TYPE_I32) {
342        val = (int32_t)val;
343    }
344
345    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
346        return 1;
347    } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
348        return 1;
349    } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
350        return 1;
351    } else {
352        return 0;
353    }
354}
355
356static void tcg_out_nop(TCGContext *s)
357{
358    tcg_out32(s, NOP);
359}
360
361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
362                          TCGReg rs2, int op)
363{
364    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
365}
366
367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
368                           int32_t offset, int op)
369{
370    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
371}
372
373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
374			   int32_t val2, int val2const, int op)
375{
376    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
377              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
378}
379
380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
381{
382    if (ret != arg) {
383        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
384    }
385    return true;
386}
387
388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
389{
390    if (ret != arg) {
391        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
392    } else {
393        tcg_out_nop(s);
394    }
395}
396
397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
398{
399    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
400}
401
402static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
403{
404    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
405}
406
407static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
408{
409    if (check_fit_i32(arg, 13)) {
410        /* A 13-bit constant sign-extended to 64-bits.  */
411        tcg_out_movi_imm13(s, ret, arg);
412    } else {
413        /* A 32-bit constant zero-extended to 64 bits.  */
414        tcg_out_sethi(s, ret, arg);
415        if (arg & 0x3ff) {
416            tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
417        }
418    }
419}
420
421static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
422                             tcg_target_long arg, bool in_prologue,
423                             TCGReg scratch)
424{
425    tcg_target_long hi, lo = (int32_t)arg;
426    tcg_target_long test, lsb;
427
428    /* A 32-bit constant, or 32-bit zero-extended to 64-bits.  */
429    if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430        tcg_out_movi_imm32(s, ret, arg);
431        return;
432    }
433
434    /* A 13-bit constant sign-extended to 64-bits.  */
435    if (check_fit_tl(arg, 13)) {
436        tcg_out_movi_imm13(s, ret, arg);
437        return;
438    }
439
440    /* A 13-bit constant relative to the TB.  */
441    if (!in_prologue) {
442        test = tcg_tbrel_diff(s, (void *)arg);
443        if (check_fit_ptr(test, 13)) {
444            tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
445            return;
446        }
447    }
448
449    /* A 32-bit constant sign-extended to 64-bits.  */
450    if (arg == lo) {
451        tcg_out_sethi(s, ret, ~arg);
452        tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
453        return;
454    }
455
456    /* A 32-bit constant, shifted.  */
457    lsb = ctz64(arg);
458    test = (tcg_target_long)arg >> lsb;
459    if (lsb > 10 && test == extract64(test, 0, 21)) {
460        tcg_out_sethi(s, ret, test << 10);
461        tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
462        return;
463    } else if (test == (uint32_t)test || test == (int32_t)test) {
464        tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
465        tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
466        return;
467    }
468
469    /* Use the constant pool, if possible. */
470    if (!in_prologue) {
471        new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
472                       tcg_tbrel_diff(s, NULL));
473        tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
474        return;
475    }
476
477    /* A 64-bit constant decomposed into 2 32-bit pieces.  */
478    if (check_fit_i32(lo, 13)) {
479        hi = (arg - lo) >> 32;
480        tcg_out_movi_imm32(s, ret, hi);
481        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
482        tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
483    } else {
484        hi = arg >> 32;
485        tcg_out_movi_imm32(s, ret, hi);
486        tcg_out_movi_imm32(s, scratch, lo);
487        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
488        tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
489    }
490}
491
492static void tcg_out_movi(TCGContext *s, TCGType type,
493                         TCGReg ret, tcg_target_long arg)
494{
495    tcg_debug_assert(ret != TCG_REG_T2);
496    tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
497}
498
499static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
500                            TCGReg a2, int op)
501{
502    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
503}
504
505static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
506                         intptr_t offset, int op)
507{
508    if (check_fit_ptr(offset, 13)) {
509        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
510                  INSN_IMM13(offset));
511    } else {
512        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
513        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
514    }
515}
516
517static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
518                       TCGReg arg1, intptr_t arg2)
519{
520    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
521}
522
523static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
524                       TCGReg arg1, intptr_t arg2)
525{
526    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
527}
528
529static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
530                        TCGReg base, intptr_t ofs)
531{
532    if (val == 0) {
533        tcg_out_st(s, type, TCG_REG_G0, base, ofs);
534        return true;
535    }
536    return false;
537}
538
539static void tcg_out_sety(TCGContext *s, TCGReg rs)
540{
541    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
542}
543
544static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
545                          int32_t val2, int val2const, int uns)
546{
547    /* Load Y with the sign/zero extension of RS1 to 64-bits.  */
548    if (uns) {
549        tcg_out_sety(s, TCG_REG_G0);
550    } else {
551        tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
552        tcg_out_sety(s, TCG_REG_T1);
553    }
554
555    tcg_out_arithc(s, rd, rs1, val2, val2const,
556                   uns ? ARITH_UDIV : ARITH_SDIV);
557}
558
559static const uint8_t tcg_cond_to_bcond[] = {
560    [TCG_COND_EQ] = COND_E,
561    [TCG_COND_NE] = COND_NE,
562    [TCG_COND_LT] = COND_L,
563    [TCG_COND_GE] = COND_GE,
564    [TCG_COND_LE] = COND_LE,
565    [TCG_COND_GT] = COND_G,
566    [TCG_COND_LTU] = COND_CS,
567    [TCG_COND_GEU] = COND_CC,
568    [TCG_COND_LEU] = COND_LEU,
569    [TCG_COND_GTU] = COND_GU,
570};
571
572static const uint8_t tcg_cond_to_rcond[] = {
573    [TCG_COND_EQ] = RCOND_Z,
574    [TCG_COND_NE] = RCOND_NZ,
575    [TCG_COND_LT] = RCOND_LZ,
576    [TCG_COND_GT] = RCOND_GZ,
577    [TCG_COND_LE] = RCOND_LEZ,
578    [TCG_COND_GE] = RCOND_GEZ
579};
580
581static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
582{
583    tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
584}
585
586static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
587{
588    int off19 = 0;
589
590    if (l->has_value) {
591        off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
592    } else {
593        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
594    }
595    tcg_out_bpcc0(s, scond, flags, off19);
596}
597
598static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
599{
600    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
601}
602
603static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
604                               int32_t arg2, int const_arg2, TCGLabel *l)
605{
606    tcg_out_cmp(s, arg1, arg2, const_arg2);
607    tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
608    tcg_out_nop(s);
609}
610
611static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
612                          int32_t v1, int v1const)
613{
614    tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
615              | INSN_RS1(tcg_cond_to_bcond[cond])
616              | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
617}
618
619static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
620                                TCGReg c1, int32_t c2, int c2const,
621                                int32_t v1, int v1const)
622{
623    tcg_out_cmp(s, c1, c2, c2const);
624    tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
625}
626
627static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
628                               int32_t arg2, int const_arg2, TCGLabel *l)
629{
630    /* For 64-bit signed comparisons vs zero, we can avoid the compare.  */
631    if (arg2 == 0 && !is_unsigned_cond(cond)) {
632        int off16 = 0;
633
634        if (l->has_value) {
635            off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
636        } else {
637            tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
638        }
639        tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
640                  | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
641    } else {
642        tcg_out_cmp(s, arg1, arg2, const_arg2);
643        tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
644    }
645    tcg_out_nop(s);
646}
647
648static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
649                         int32_t v1, int v1const)
650{
651    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
652              | (tcg_cond_to_rcond[cond] << 10)
653              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
654}
655
656static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
657                                TCGReg c1, int32_t c2, int c2const,
658                                int32_t v1, int v1const)
659{
660    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
661       Note that the immediate range is one bit smaller, so we must check
662       for that as well.  */
663    if (c2 == 0 && !is_unsigned_cond(cond)
664        && (!v1const || check_fit_i32(v1, 10))) {
665        tcg_out_movr(s, cond, ret, c1, v1, v1const);
666    } else {
667        tcg_out_cmp(s, c1, c2, c2const);
668        tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
669    }
670}
671
672static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
673                                TCGReg c1, int32_t c2, int c2const)
674{
675    /* For 32-bit comparisons, we can play games with ADDC/SUBC.  */
676    switch (cond) {
677    case TCG_COND_LTU:
678    case TCG_COND_GEU:
679        /* The result of the comparison is in the carry bit.  */
680        break;
681
682    case TCG_COND_EQ:
683    case TCG_COND_NE:
684        /* For equality, we can transform to inequality vs zero.  */
685        if (c2 != 0) {
686            tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
687            c2 = TCG_REG_T1;
688        } else {
689            c2 = c1;
690        }
691        c1 = TCG_REG_G0, c2const = 0;
692        cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
693	break;
694
695    case TCG_COND_GTU:
696    case TCG_COND_LEU:
697        /* If we don't need to load a constant into a register, we can
698           swap the operands on GTU/LEU.  There's no benefit to loading
699           the constant into a temporary register.  */
700        if (!c2const || c2 == 0) {
701            TCGReg t = c1;
702            c1 = c2;
703            c2 = t;
704            c2const = 0;
705            cond = tcg_swap_cond(cond);
706            break;
707        }
708        /* FALLTHRU */
709
710    default:
711        tcg_out_cmp(s, c1, c2, c2const);
712        tcg_out_movi_imm13(s, ret, 0);
713        tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
714        return;
715    }
716
717    tcg_out_cmp(s, c1, c2, c2const);
718    if (cond == TCG_COND_LTU) {
719        tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
720    } else {
721        tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
722    }
723}
724
725static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
726                                TCGReg c1, int32_t c2, int c2const)
727{
728    if (use_vis3_instructions) {
729        switch (cond) {
730        case TCG_COND_NE:
731            if (c2 != 0) {
732                break;
733            }
734            c2 = c1, c2const = 0, c1 = TCG_REG_G0;
735            /* FALLTHRU */
736        case TCG_COND_LTU:
737            tcg_out_cmp(s, c1, c2, c2const);
738            tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
739            return;
740        default:
741            break;
742        }
743    }
744
745    /* For 64-bit signed comparisons vs zero, we can avoid the compare
746       if the input does not overlap the output.  */
747    if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
748        tcg_out_movi_imm13(s, ret, 0);
749        tcg_out_movr(s, cond, ret, c1, 1, 1);
750    } else {
751        tcg_out_cmp(s, c1, c2, c2const);
752        tcg_out_movi_imm13(s, ret, 0);
753        tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
754    }
755}
756
757static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
758                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
759                                int32_t bh, int bhconst, int opl, int oph)
760{
761    TCGReg tmp = TCG_REG_T1;
762
763    /* Note that the low parts are fully consumed before tmp is set.  */
764    if (rl != ah && (bhconst || rl != bh)) {
765        tmp = rl;
766    }
767
768    tcg_out_arithc(s, tmp, al, bl, blconst, opl);
769    tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
770    tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
771}
772
773static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
774                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
775                                int32_t bh, int bhconst, bool is_sub)
776{
777    TCGReg tmp = TCG_REG_T1;
778
779    /* Note that the low parts are fully consumed before tmp is set.  */
780    if (rl != ah && (bhconst || rl != bh)) {
781        tmp = rl;
782    }
783
784    tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
785
786    if (use_vis3_instructions && !is_sub) {
787        /* Note that ADDXC doesn't accept immediates.  */
788        if (bhconst && bh != 0) {
789           tcg_out_movi_imm13(s, TCG_REG_T2, bh);
790           bh = TCG_REG_T2;
791        }
792        tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
793    } else if (bh == TCG_REG_G0) {
794	/* If we have a zero, we can perform the operation in two insns,
795           with the arithmetic first, and a conditional move into place.  */
796	if (rh == ah) {
797            tcg_out_arithi(s, TCG_REG_T2, ah, 1,
798			   is_sub ? ARITH_SUB : ARITH_ADD);
799            tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
800	} else {
801            tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
802	    tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
803	}
804    } else {
805        /*
806         * Otherwise adjust BH as if there is carry into T2.
807         * Note that constant BH is constrained to 11 bits for the MOVCC,
808         * so the adjustment fits 12 bits.
809         */
810        if (bhconst) {
811            tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
812        } else {
813            tcg_out_arithi(s, TCG_REG_T2, bh, 1,
814                           is_sub ? ARITH_SUB : ARITH_ADD);
815        }
816        /* ... smoosh T2 back to original BH if carry is clear ... */
817        tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
818	/* ... and finally perform the arithmetic with the new operand.  */
819        tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
820    }
821
822    tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
823}
824
825static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
826                               bool in_prologue, bool tail_call)
827{
828    uintptr_t desti = (uintptr_t)dest;
829
830    /* Be careful not to clobber %o7 for a tail call. */
831    tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
832                     desti & ~0xfff, in_prologue,
833                     tail_call ? TCG_REG_G2 : TCG_REG_O7);
834    tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
835                   TCG_REG_T1, desti & 0xfff, JMPL);
836}
837
838static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
839                                 bool in_prologue)
840{
841    ptrdiff_t disp = tcg_pcrel_diff(s, dest);
842
843    if (disp == (int32_t)disp) {
844        tcg_out32(s, CALL | (uint32_t)disp >> 2);
845    } else {
846        tcg_out_jmpl_const(s, dest, in_prologue, false);
847    }
848}
849
850static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
851                         const TCGHelperInfo *info)
852{
853    tcg_out_call_nodelay(s, dest, false);
854    tcg_out_nop(s);
855}
856
857static void tcg_out_mb(TCGContext *s, TCGArg a0)
858{
859    /* Note that the TCG memory order constants mirror the Sparc MEMBAR.  */
860    tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
861}
862
863#ifdef CONFIG_SOFTMMU
864static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
865static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
866
867static void emit_extend(TCGContext *s, TCGReg r, int op)
868{
869    /* Emit zero extend of 8, 16 or 32 bit data as
870     * required by the MO_* value op; do nothing for 64 bit.
871     */
872    switch (op & MO_SIZE) {
873    case MO_8:
874        tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
875        break;
876    case MO_16:
877        tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
878        tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
879        break;
880    case MO_32:
881        tcg_out_arith(s, r, r, 0, SHIFT_SRL);
882        break;
883    case MO_64:
884        break;
885    }
886}
887
888static void build_trampolines(TCGContext *s)
889{
890    static void * const qemu_ld_helpers[] = {
891        [MO_UB]   = helper_ret_ldub_mmu,
892        [MO_SB]   = helper_ret_ldsb_mmu,
893        [MO_LEUW] = helper_le_lduw_mmu,
894        [MO_LESW] = helper_le_ldsw_mmu,
895        [MO_LEUL] = helper_le_ldul_mmu,
896        [MO_LEUQ] = helper_le_ldq_mmu,
897        [MO_BEUW] = helper_be_lduw_mmu,
898        [MO_BESW] = helper_be_ldsw_mmu,
899        [MO_BEUL] = helper_be_ldul_mmu,
900        [MO_BEUQ] = helper_be_ldq_mmu,
901    };
902    static void * const qemu_st_helpers[] = {
903        [MO_UB]   = helper_ret_stb_mmu,
904        [MO_LEUW] = helper_le_stw_mmu,
905        [MO_LEUL] = helper_le_stl_mmu,
906        [MO_LEUQ] = helper_le_stq_mmu,
907        [MO_BEUW] = helper_be_stw_mmu,
908        [MO_BEUL] = helper_be_stl_mmu,
909        [MO_BEUQ] = helper_be_stq_mmu,
910    };
911
912    int i;
913
914    for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
915        if (qemu_ld_helpers[i] == NULL) {
916            continue;
917        }
918
919        /* May as well align the trampoline.  */
920        while ((uintptr_t)s->code_ptr & 15) {
921            tcg_out_nop(s);
922        }
923        qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
924
925        /* Set the retaddr operand.  */
926        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
927        /* Tail call.  */
928        tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
929        /* delay slot -- set the env argument */
930        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
931    }
932
933    for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
934        if (qemu_st_helpers[i] == NULL) {
935            continue;
936        }
937
938        /* May as well align the trampoline.  */
939        while ((uintptr_t)s->code_ptr & 15) {
940            tcg_out_nop(s);
941        }
942        qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
943
944        emit_extend(s, TCG_REG_O2, i);
945
946        /* Set the retaddr operand.  */
947        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
948
949        /* Tail call.  */
950        tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
951        /* delay slot -- set the env argument */
952        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
953    }
954}
955#else
956static const tcg_insn_unit *qemu_unalign_ld_trampoline;
957static const tcg_insn_unit *qemu_unalign_st_trampoline;
958
959static void build_trampolines(TCGContext *s)
960{
961    for (int ld = 0; ld < 2; ++ld) {
962        void *helper;
963
964        while ((uintptr_t)s->code_ptr & 15) {
965            tcg_out_nop(s);
966        }
967
968        if (ld) {
969            helper = helper_unaligned_ld;
970            qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
971        } else {
972            helper = helper_unaligned_st;
973            qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
974        }
975
976        /* Tail call.  */
977        tcg_out_jmpl_const(s, helper, true, true);
978        /* delay slot -- set the env argument */
979        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
980    }
981}
982#endif
983
984/* Generate global QEMU prologue and epilogue code */
985static void tcg_target_qemu_prologue(TCGContext *s)
986{
987    int tmp_buf_size, frame_size;
988
989    /*
990     * The TCG temp buffer is at the top of the frame, immediately
991     * below the frame pointer.  Use the logical (aligned) offset here;
992     * the stack bias is applied in temp_allocate_frame().
993     */
994    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
995    tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
996
997    /*
998     * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
999     * otherwise the minimal frame usable by callees.
1000     */
1001    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1002    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1003    frame_size += TCG_TARGET_STACK_ALIGN - 1;
1004    frame_size &= -TCG_TARGET_STACK_ALIGN;
1005    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1006              INSN_IMM13(-frame_size));
1007
1008#ifndef CONFIG_SOFTMMU
1009    if (guest_base != 0) {
1010        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1011                         guest_base, true, TCG_REG_T1);
1012        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1013    }
1014#endif
1015
1016    /* We choose TCG_REG_TB such that no move is required.  */
1017    QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1018    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1019
1020    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1021    /* delay slot */
1022    tcg_out_nop(s);
1023
1024    /* Epilogue for goto_ptr.  */
1025    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1026    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1027    /* delay slot */
1028    tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1029
1030    build_trampolines(s);
1031}
1032
1033static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1034{
1035    int i;
1036    for (i = 0; i < count; ++i) {
1037        p[i] = NOP;
1038    }
1039}
1040
1041#if defined(CONFIG_SOFTMMU)
1042
1043/* We expect to use a 13-bit negative offset from ENV.  */
1044QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1045QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1046
1047/* Perform the TLB load and compare.
1048
1049   Inputs:
1050   ADDRLO and ADDRHI contain the possible two parts of the address.
1051
1052   MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1053
1054   WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1055   This should be offsetof addr_read or addr_write.
1056
1057   The result of the TLB comparison is in %[ix]cc.  The sanitized address
1058   is in the returned register, maybe %o0.  The TLB addend is in %o1.  */
1059
1060static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1061                               MemOp opc, int which)
1062{
1063    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1064    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1065    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1066    const TCGReg r0 = TCG_REG_O0;
1067    const TCGReg r1 = TCG_REG_O1;
1068    const TCGReg r2 = TCG_REG_O2;
1069    unsigned s_bits = opc & MO_SIZE;
1070    unsigned a_bits = get_alignment_bits(opc);
1071    tcg_target_long compare_mask;
1072
1073    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
1074    tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1075    tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1076
1077    /* Extract the page index, shifted into place for tlb index.  */
1078    tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1079                   SHIFT_SRL);
1080    tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1081
1082    /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2.  */
1083    tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1084
1085    /* Load the tlb comparator and the addend.  */
1086    tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1087    tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1088
1089    /* Mask out the page offset, except for the required alignment.
1090       We don't support unaligned accesses.  */
1091    if (a_bits < s_bits) {
1092        a_bits = s_bits;
1093    }
1094    compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1095    if (check_fit_tl(compare_mask, 13)) {
1096        tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1097    } else {
1098        tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1099        tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1100    }
1101    tcg_out_cmp(s, r0, r2, 0);
1102
1103    /* If the guest address must be zero-extended, do so now.  */
1104    if (TARGET_LONG_BITS == 32) {
1105        tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1106        return r0;
1107    }
1108    return addr;
1109}
1110#endif /* CONFIG_SOFTMMU */
1111
1112static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1113    [MO_UB]   = LDUB,
1114    [MO_SB]   = LDSB,
1115    [MO_UB | MO_LE] = LDUB,
1116    [MO_SB | MO_LE] = LDSB,
1117
1118    [MO_BEUW] = LDUH,
1119    [MO_BESW] = LDSH,
1120    [MO_BEUL] = LDUW,
1121    [MO_BESL] = LDSW,
1122    [MO_BEUQ] = LDX,
1123    [MO_BESQ] = LDX,
1124
1125    [MO_LEUW] = LDUH_LE,
1126    [MO_LESW] = LDSH_LE,
1127    [MO_LEUL] = LDUW_LE,
1128    [MO_LESL] = LDSW_LE,
1129    [MO_LEUQ] = LDX_LE,
1130    [MO_LESQ] = LDX_LE,
1131};
1132
1133static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1134    [MO_UB]   = STB,
1135
1136    [MO_BEUW] = STH,
1137    [MO_BEUL] = STW,
1138    [MO_BEUQ] = STX,
1139
1140    [MO_LEUW] = STH_LE,
1141    [MO_LEUL] = STW_LE,
1142    [MO_LEUQ] = STX_LE,
1143};
1144
1145static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1146                            MemOpIdx oi, bool is_64)
1147{
1148    MemOp memop = get_memop(oi);
1149    tcg_insn_unit *label_ptr;
1150
1151#ifdef CONFIG_SOFTMMU
1152    unsigned memi = get_mmuidx(oi);
1153    TCGReg addrz;
1154    const tcg_insn_unit *func;
1155
1156    addrz = tcg_out_tlb_load(s, addr, memi, memop,
1157                             offsetof(CPUTLBEntry, addr_read));
1158
1159    /* The fast path is exactly one insn.  Thus we can perform the
1160       entire TLB Hit in the (annulled) delay slot of the branch
1161       over the TLB Miss case.  */
1162
1163    /* beq,a,pt %[xi]cc, label0 */
1164    label_ptr = s->code_ptr;
1165    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1166                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1167    /* delay slot */
1168    tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1169                    qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1170
1171    /* TLB Miss.  */
1172
1173    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1174
1175    /* We use the helpers to extend SB and SW data, leaving the case
1176       of SL needing explicit extending below.  */
1177    if ((memop & MO_SSIZE) == MO_SL) {
1178        func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1179    } else {
1180        func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1181    }
1182    tcg_debug_assert(func != NULL);
1183    tcg_out_call_nodelay(s, func, false);
1184    /* delay slot */
1185    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1186
1187    /* We let the helper sign-extend SB and SW, but leave SL for here.  */
1188    if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1189        tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1190    } else {
1191        tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1192    }
1193
1194    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1195#else
1196    TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1197    unsigned a_bits = get_alignment_bits(memop);
1198    unsigned s_bits = memop & MO_SIZE;
1199    unsigned t_bits;
1200
1201    if (TARGET_LONG_BITS == 32) {
1202        tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1203        addr = TCG_REG_T1;
1204    }
1205
1206    /*
1207     * Normal case: alignment equal to access size.
1208     */
1209    if (a_bits == s_bits) {
1210        tcg_out_ldst_rr(s, data, addr, index,
1211                        qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1212        return;
1213    }
1214
1215    /*
1216     * Test for at least natural alignment, and assume most accesses
1217     * will be aligned -- perform a straight load in the delay slot.
1218     * This is required to preserve atomicity for aligned accesses.
1219     */
1220    t_bits = MAX(a_bits, s_bits);
1221    tcg_debug_assert(t_bits < 13);
1222    tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1223
1224    /* beq,a,pt %icc, label */
1225    label_ptr = s->code_ptr;
1226    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1227    /* delay slot */
1228    tcg_out_ldst_rr(s, data, addr, index,
1229                    qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1230
1231    if (a_bits >= s_bits) {
1232        /*
1233         * Overalignment: A successful alignment test will perform the memory
1234         * operation in the delay slot, and failure need only invoke the
1235         * handler for SIGBUS.
1236         */
1237        tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1238        /* delay slot -- move to low part of argument reg */
1239        tcg_out_mov_delay(s, TCG_REG_O1, addr);
1240    } else {
1241        /* Underalignment: load by pieces of minimum alignment. */
1242        int ld_opc, a_size, s_size, i;
1243
1244        /*
1245         * Force full address into T1 early; avoids problems with
1246         * overlap between @addr and @data.
1247         */
1248        tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1249
1250        a_size = 1 << a_bits;
1251        s_size = 1 << s_bits;
1252        if ((memop & MO_BSWAP) == MO_BE) {
1253            ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1254            tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1255            ld_opc = qemu_ld_opc[a_bits | MO_BE];
1256            for (i = a_size; i < s_size; i += a_size) {
1257                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1258                tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1259                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1260            }
1261        } else if (a_bits == 0) {
1262            ld_opc = LDUB;
1263            tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1264            for (i = a_size; i < s_size; i += a_size) {
1265                if ((memop & MO_SIGN) && i == s_size - a_size) {
1266                    ld_opc = LDSB;
1267                }
1268                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1269                tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1270                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1271            }
1272        } else {
1273            ld_opc = qemu_ld_opc[a_bits | MO_LE];
1274            tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1275            for (i = a_size; i < s_size; i += a_size) {
1276                tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1277                if ((memop & MO_SIGN) && i == s_size - a_size) {
1278                    ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1279                }
1280                tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1281                tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1282                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1283            }
1284        }
1285    }
1286
1287    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1288#endif /* CONFIG_SOFTMMU */
1289}
1290
1291static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1292                            MemOpIdx oi)
1293{
1294    MemOp memop = get_memop(oi);
1295    tcg_insn_unit *label_ptr;
1296
1297#ifdef CONFIG_SOFTMMU
1298    unsigned memi = get_mmuidx(oi);
1299    TCGReg addrz;
1300    const tcg_insn_unit *func;
1301
1302    addrz = tcg_out_tlb_load(s, addr, memi, memop,
1303                             offsetof(CPUTLBEntry, addr_write));
1304
1305    /* The fast path is exactly one insn.  Thus we can perform the entire
1306       TLB Hit in the (annulled) delay slot of the branch over TLB Miss.  */
1307    /* beq,a,pt %[xi]cc, label0 */
1308    label_ptr = s->code_ptr;
1309    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1310                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1311    /* delay slot */
1312    tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1313                    qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1314
1315    /* TLB Miss.  */
1316
1317    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1318    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
1319
1320    func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1321    tcg_debug_assert(func != NULL);
1322    tcg_out_call_nodelay(s, func, false);
1323    /* delay slot */
1324    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
1325
1326    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1327#else
1328    TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1329    unsigned a_bits = get_alignment_bits(memop);
1330    unsigned s_bits = memop & MO_SIZE;
1331    unsigned t_bits;
1332
1333    if (TARGET_LONG_BITS == 32) {
1334        tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1335        addr = TCG_REG_T1;
1336    }
1337
1338    /*
1339     * Normal case: alignment equal to access size.
1340     */
1341    if (a_bits == s_bits) {
1342        tcg_out_ldst_rr(s, data, addr, index,
1343                        qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1344        return;
1345    }
1346
1347    /*
1348     * Test for at least natural alignment, and assume most accesses
1349     * will be aligned -- perform a straight store in the delay slot.
1350     * This is required to preserve atomicity for aligned accesses.
1351     */
1352    t_bits = MAX(a_bits, s_bits);
1353    tcg_debug_assert(t_bits < 13);
1354    tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1355
1356    /* beq,a,pt %icc, label */
1357    label_ptr = s->code_ptr;
1358    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1359    /* delay slot */
1360    tcg_out_ldst_rr(s, data, addr, index,
1361                    qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1362
1363    if (a_bits >= s_bits) {
1364        /*
1365         * Overalignment: A successful alignment test will perform the memory
1366         * operation in the delay slot, and failure need only invoke the
1367         * handler for SIGBUS.
1368         */
1369        tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1370        /* delay slot -- move to low part of argument reg */
1371        tcg_out_mov_delay(s, TCG_REG_O1, addr);
1372    } else {
1373        /* Underalignment: store by pieces of minimum alignment. */
1374        int st_opc, a_size, s_size, i;
1375
1376        /*
1377         * Force full address into T1 early; avoids problems with
1378         * overlap between @addr and @data.
1379         */
1380        tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1381
1382        a_size = 1 << a_bits;
1383        s_size = 1 << s_bits;
1384        if ((memop & MO_BSWAP) == MO_BE) {
1385            st_opc = qemu_st_opc[a_bits | MO_BE];
1386            for (i = 0; i < s_size; i += a_size) {
1387                TCGReg d = data;
1388                int shift = (s_size - a_size - i) * 8;
1389                if (shift) {
1390                    d = TCG_REG_T2;
1391                    tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1392                }
1393                tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1394            }
1395        } else if (a_bits == 0) {
1396            tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1397            for (i = 1; i < s_size; i++) {
1398                tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1399                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1400            }
1401        } else {
1402            /* Note that ST*A with immediate asi must use indexed address. */
1403            st_opc = qemu_st_opc[a_bits + MO_LE];
1404            tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1405            for (i = a_size; i < s_size; i += a_size) {
1406                tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1407                tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1408                tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1409            }
1410        }
1411    }
1412
1413    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1414#endif /* CONFIG_SOFTMMU */
1415}
1416
1417static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1418{
1419    if (check_fit_ptr(a0, 13)) {
1420        tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1421        tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1422        return;
1423    } else {
1424        intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1425        if (check_fit_ptr(tb_diff, 13)) {
1426            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1427            /* Note that TCG_REG_TB has been unwound to O1.  */
1428            tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1429            return;
1430        }
1431    }
1432    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1433    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1434    tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1435}
1436
1437static void tcg_out_goto_tb(TCGContext *s, int which)
1438{
1439    ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1440
1441    /* Direct branch will be patched by tb_target_set_jmp_target. */
1442    set_jmp_insn_offset(s, which);
1443    tcg_out32(s, CALL);
1444    /* delay slot */
1445    tcg_debug_assert(check_fit_ptr(off, 13));
1446    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1447    set_jmp_reset_offset(s, which);
1448
1449    /*
1450     * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1451     * to the beginning of this TB.
1452     */
1453    off = -tcg_current_code_size(s);
1454    if (check_fit_i32(off, 13)) {
1455        tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1456    } else {
1457        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1458        tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1459    }
1460}
1461
1462void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1463                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1464{
1465    uintptr_t addr = tb->jmp_target_addr[n];
1466    intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2;
1467    tcg_insn_unit insn;
1468
1469    br_disp >>= 2;
1470    if (check_fit_ptr(br_disp, 19)) {
1471        /* ba,pt %icc, addr */
1472        insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1473                         | BPCC_ICC | BPCC_PT, 0, 19, br_disp);
1474    } else if (check_fit_ptr(br_disp, 22)) {
1475        /* ba addr */
1476        insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A),
1477                         0, 22, br_disp);
1478    } else {
1479        /* The code_gen_buffer can't be larger than 2GB.  */
1480        tcg_debug_assert(check_fit_ptr(br_disp, 30));
1481        /* call addr */
1482        insn = deposit32(CALL, 0, 30, br_disp);
1483    }
1484
1485    qatomic_set((uint32_t *)jmp_rw, insn);
1486    flush_idcache_range(jmp_rx, jmp_rw, 4);
1487}
1488
1489static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1490                       const TCGArg args[TCG_MAX_OP_ARGS],
1491                       const int const_args[TCG_MAX_OP_ARGS])
1492{
1493    TCGArg a0, a1, a2;
1494    int c, c2;
1495
1496    /* Hoist the loads of the most common arguments.  */
1497    a0 = args[0];
1498    a1 = args[1];
1499    a2 = args[2];
1500    c2 = const_args[2];
1501
1502    switch (opc) {
1503    case INDEX_op_goto_ptr:
1504        tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1505        tcg_out_mov_delay(s, TCG_REG_TB, a0);
1506        break;
1507    case INDEX_op_br:
1508        tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1509        tcg_out_nop(s);
1510        break;
1511
1512#define OP_32_64(x)                             \
1513        glue(glue(case INDEX_op_, x), _i32):    \
1514        glue(glue(case INDEX_op_, x), _i64)
1515
1516    OP_32_64(ld8u):
1517        tcg_out_ldst(s, a0, a1, a2, LDUB);
1518        break;
1519    OP_32_64(ld8s):
1520        tcg_out_ldst(s, a0, a1, a2, LDSB);
1521        break;
1522    OP_32_64(ld16u):
1523        tcg_out_ldst(s, a0, a1, a2, LDUH);
1524        break;
1525    OP_32_64(ld16s):
1526        tcg_out_ldst(s, a0, a1, a2, LDSH);
1527        break;
1528    case INDEX_op_ld_i32:
1529    case INDEX_op_ld32u_i64:
1530        tcg_out_ldst(s, a0, a1, a2, LDUW);
1531        break;
1532    OP_32_64(st8):
1533        tcg_out_ldst(s, a0, a1, a2, STB);
1534        break;
1535    OP_32_64(st16):
1536        tcg_out_ldst(s, a0, a1, a2, STH);
1537        break;
1538    case INDEX_op_st_i32:
1539    case INDEX_op_st32_i64:
1540        tcg_out_ldst(s, a0, a1, a2, STW);
1541        break;
1542    OP_32_64(add):
1543        c = ARITH_ADD;
1544        goto gen_arith;
1545    OP_32_64(sub):
1546        c = ARITH_SUB;
1547        goto gen_arith;
1548    OP_32_64(and):
1549        c = ARITH_AND;
1550        goto gen_arith;
1551    OP_32_64(andc):
1552        c = ARITH_ANDN;
1553        goto gen_arith;
1554    OP_32_64(or):
1555        c = ARITH_OR;
1556        goto gen_arith;
1557    OP_32_64(orc):
1558        c = ARITH_ORN;
1559        goto gen_arith;
1560    OP_32_64(xor):
1561        c = ARITH_XOR;
1562        goto gen_arith;
1563    case INDEX_op_shl_i32:
1564        c = SHIFT_SLL;
1565    do_shift32:
1566        /* Limit immediate shift count lest we create an illegal insn.  */
1567        tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1568        break;
1569    case INDEX_op_shr_i32:
1570        c = SHIFT_SRL;
1571        goto do_shift32;
1572    case INDEX_op_sar_i32:
1573        c = SHIFT_SRA;
1574        goto do_shift32;
1575    case INDEX_op_mul_i32:
1576        c = ARITH_UMUL;
1577        goto gen_arith;
1578
1579    OP_32_64(neg):
1580	c = ARITH_SUB;
1581	goto gen_arith1;
1582    OP_32_64(not):
1583	c = ARITH_ORN;
1584	goto gen_arith1;
1585
1586    case INDEX_op_div_i32:
1587        tcg_out_div32(s, a0, a1, a2, c2, 0);
1588        break;
1589    case INDEX_op_divu_i32:
1590        tcg_out_div32(s, a0, a1, a2, c2, 1);
1591        break;
1592
1593    case INDEX_op_brcond_i32:
1594        tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1595        break;
1596    case INDEX_op_setcond_i32:
1597        tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1598        break;
1599    case INDEX_op_movcond_i32:
1600        tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1601        break;
1602
1603    case INDEX_op_add2_i32:
1604        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1605                            args[4], const_args[4], args[5], const_args[5],
1606                            ARITH_ADDCC, ARITH_ADDC);
1607        break;
1608    case INDEX_op_sub2_i32:
1609        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1610                            args[4], const_args[4], args[5], const_args[5],
1611                            ARITH_SUBCC, ARITH_SUBC);
1612        break;
1613    case INDEX_op_mulu2_i32:
1614        c = ARITH_UMUL;
1615        goto do_mul2;
1616    case INDEX_op_muls2_i32:
1617        c = ARITH_SMUL;
1618    do_mul2:
1619        /* The 32-bit multiply insns produce a full 64-bit result. */
1620        tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1621        tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1622        break;
1623
1624    case INDEX_op_qemu_ld_i32:
1625        tcg_out_qemu_ld(s, a0, a1, a2, false);
1626        break;
1627    case INDEX_op_qemu_ld_i64:
1628        tcg_out_qemu_ld(s, a0, a1, a2, true);
1629        break;
1630    case INDEX_op_qemu_st_i32:
1631    case INDEX_op_qemu_st_i64:
1632        tcg_out_qemu_st(s, a0, a1, a2);
1633        break;
1634
1635    case INDEX_op_ld32s_i64:
1636        tcg_out_ldst(s, a0, a1, a2, LDSW);
1637        break;
1638    case INDEX_op_ld_i64:
1639        tcg_out_ldst(s, a0, a1, a2, LDX);
1640        break;
1641    case INDEX_op_st_i64:
1642        tcg_out_ldst(s, a0, a1, a2, STX);
1643        break;
1644    case INDEX_op_shl_i64:
1645        c = SHIFT_SLLX;
1646    do_shift64:
1647        /* Limit immediate shift count lest we create an illegal insn.  */
1648        tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1649        break;
1650    case INDEX_op_shr_i64:
1651        c = SHIFT_SRLX;
1652        goto do_shift64;
1653    case INDEX_op_sar_i64:
1654        c = SHIFT_SRAX;
1655        goto do_shift64;
1656    case INDEX_op_mul_i64:
1657        c = ARITH_MULX;
1658        goto gen_arith;
1659    case INDEX_op_div_i64:
1660        c = ARITH_SDIVX;
1661        goto gen_arith;
1662    case INDEX_op_divu_i64:
1663        c = ARITH_UDIVX;
1664        goto gen_arith;
1665    case INDEX_op_ext_i32_i64:
1666    case INDEX_op_ext32s_i64:
1667        tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1668        break;
1669    case INDEX_op_extu_i32_i64:
1670    case INDEX_op_ext32u_i64:
1671        tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1672        break;
1673    case INDEX_op_extrl_i64_i32:
1674        tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1675        break;
1676    case INDEX_op_extrh_i64_i32:
1677        tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1678        break;
1679
1680    case INDEX_op_brcond_i64:
1681        tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1682        break;
1683    case INDEX_op_setcond_i64:
1684        tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1685        break;
1686    case INDEX_op_movcond_i64:
1687        tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1688        break;
1689    case INDEX_op_add2_i64:
1690        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1691                            const_args[4], args[5], const_args[5], false);
1692        break;
1693    case INDEX_op_sub2_i64:
1694        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1695                            const_args[4], args[5], const_args[5], true);
1696        break;
1697    case INDEX_op_muluh_i64:
1698        tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1699        break;
1700
1701    gen_arith:
1702        tcg_out_arithc(s, a0, a1, a2, c2, c);
1703        break;
1704
1705    gen_arith1:
1706	tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1707	break;
1708
1709    case INDEX_op_mb:
1710        tcg_out_mb(s, a0);
1711        break;
1712
1713    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1714    case INDEX_op_mov_i64:
1715    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1716    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1717    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1718    default:
1719        tcg_abort();
1720    }
1721}
1722
1723static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1724{
1725    switch (op) {
1726    case INDEX_op_goto_ptr:
1727        return C_O0_I1(r);
1728
1729    case INDEX_op_ld8u_i32:
1730    case INDEX_op_ld8u_i64:
1731    case INDEX_op_ld8s_i32:
1732    case INDEX_op_ld8s_i64:
1733    case INDEX_op_ld16u_i32:
1734    case INDEX_op_ld16u_i64:
1735    case INDEX_op_ld16s_i32:
1736    case INDEX_op_ld16s_i64:
1737    case INDEX_op_ld_i32:
1738    case INDEX_op_ld32u_i64:
1739    case INDEX_op_ld32s_i64:
1740    case INDEX_op_ld_i64:
1741    case INDEX_op_neg_i32:
1742    case INDEX_op_neg_i64:
1743    case INDEX_op_not_i32:
1744    case INDEX_op_not_i64:
1745    case INDEX_op_ext32s_i64:
1746    case INDEX_op_ext32u_i64:
1747    case INDEX_op_ext_i32_i64:
1748    case INDEX_op_extu_i32_i64:
1749    case INDEX_op_extrl_i64_i32:
1750    case INDEX_op_extrh_i64_i32:
1751        return C_O1_I1(r, r);
1752
1753    case INDEX_op_st8_i32:
1754    case INDEX_op_st8_i64:
1755    case INDEX_op_st16_i32:
1756    case INDEX_op_st16_i64:
1757    case INDEX_op_st_i32:
1758    case INDEX_op_st32_i64:
1759    case INDEX_op_st_i64:
1760        return C_O0_I2(rZ, r);
1761
1762    case INDEX_op_add_i32:
1763    case INDEX_op_add_i64:
1764    case INDEX_op_mul_i32:
1765    case INDEX_op_mul_i64:
1766    case INDEX_op_div_i32:
1767    case INDEX_op_div_i64:
1768    case INDEX_op_divu_i32:
1769    case INDEX_op_divu_i64:
1770    case INDEX_op_sub_i32:
1771    case INDEX_op_sub_i64:
1772    case INDEX_op_and_i32:
1773    case INDEX_op_and_i64:
1774    case INDEX_op_andc_i32:
1775    case INDEX_op_andc_i64:
1776    case INDEX_op_or_i32:
1777    case INDEX_op_or_i64:
1778    case INDEX_op_orc_i32:
1779    case INDEX_op_orc_i64:
1780    case INDEX_op_xor_i32:
1781    case INDEX_op_xor_i64:
1782    case INDEX_op_shl_i32:
1783    case INDEX_op_shl_i64:
1784    case INDEX_op_shr_i32:
1785    case INDEX_op_shr_i64:
1786    case INDEX_op_sar_i32:
1787    case INDEX_op_sar_i64:
1788    case INDEX_op_setcond_i32:
1789    case INDEX_op_setcond_i64:
1790        return C_O1_I2(r, rZ, rJ);
1791
1792    case INDEX_op_brcond_i32:
1793    case INDEX_op_brcond_i64:
1794        return C_O0_I2(rZ, rJ);
1795    case INDEX_op_movcond_i32:
1796    case INDEX_op_movcond_i64:
1797        return C_O1_I4(r, rZ, rJ, rI, 0);
1798    case INDEX_op_add2_i32:
1799    case INDEX_op_add2_i64:
1800    case INDEX_op_sub2_i32:
1801    case INDEX_op_sub2_i64:
1802        return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1803    case INDEX_op_mulu2_i32:
1804    case INDEX_op_muls2_i32:
1805        return C_O2_I2(r, r, rZ, rJ);
1806    case INDEX_op_muluh_i64:
1807        return C_O1_I2(r, r, r);
1808
1809    case INDEX_op_qemu_ld_i32:
1810    case INDEX_op_qemu_ld_i64:
1811        return C_O1_I1(r, s);
1812    case INDEX_op_qemu_st_i32:
1813    case INDEX_op_qemu_st_i64:
1814        return C_O0_I2(sZ, s);
1815
1816    default:
1817        g_assert_not_reached();
1818    }
1819}
1820
1821static void tcg_target_init(TCGContext *s)
1822{
1823    /*
1824     * Only probe for the platform and capabilities if we haven't already
1825     * determined maximum values at compile time.
1826     */
1827#ifndef use_vis3_instructions
1828    {
1829        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1830        use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1831    }
1832#endif
1833
1834    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1835    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1836
1837    tcg_target_call_clobber_regs = 0;
1838    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1839    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1840    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1841    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1842    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1843    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1844    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1845    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1846    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1847    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1848    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1849    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1850    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1851    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1852    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1853
1854    s->reserved_regs = 0;
1855    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1856    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1857    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1858    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1859    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1860    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1861    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1862    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1863}
1864
1865#define ELF_HOST_MACHINE  EM_SPARCV9
1866
1867typedef struct {
1868    DebugFrameHeader h;
1869    uint8_t fde_def_cfa[4];
1870    uint8_t fde_win_save;
1871    uint8_t fde_ret_save[3];
1872} DebugFrame;
1873
1874static const DebugFrame debug_frame = {
1875    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1876    .h.cie.id = -1,
1877    .h.cie.version = 1,
1878    .h.cie.code_align = 1,
1879    .h.cie.data_align = -sizeof(void *) & 0x7f,
1880    .h.cie.return_column = 15,            /* o7 */
1881
1882    /* Total FDE size does not include the "len" member.  */
1883    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1884
1885    .fde_def_cfa = {
1886        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
1887        (2047 & 0x7f) | 0x80, (2047 >> 7)
1888    },
1889    .fde_win_save = 0x2d,               /* DW_CFA_GNU_window_save */
1890    .fde_ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
1891};
1892
1893void tcg_register_jit(const void *buf, size_t buf_size)
1894{
1895    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1896}
1897