xref: /openbmc/qemu/tcg/sparc64/tcg-target.c.inc (revision 2e1cacfb)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* We only support generating code for 64-bit mode.  */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
30#include "../tcg-ldst.c.inc"
31#include "../tcg-pool.c.inc"
32
33#ifdef CONFIG_DEBUG_TCG
34static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
35    "%g0",
36    "%g1",
37    "%g2",
38    "%g3",
39    "%g4",
40    "%g5",
41    "%g6",
42    "%g7",
43    "%o0",
44    "%o1",
45    "%o2",
46    "%o3",
47    "%o4",
48    "%o5",
49    "%o6",
50    "%o7",
51    "%l0",
52    "%l1",
53    "%l2",
54    "%l3",
55    "%l4",
56    "%l5",
57    "%l6",
58    "%l7",
59    "%i0",
60    "%i1",
61    "%i2",
62    "%i3",
63    "%i4",
64    "%i5",
65    "%i6",
66    "%i7",
67};
68#endif
69
70#define TCG_CT_CONST_S11  0x100
71#define TCG_CT_CONST_S13  0x200
72#define TCG_CT_CONST_ZERO 0x400
73
74#define ALL_GENERAL_REGS  MAKE_64BIT_MASK(0, 32)
75
76/* Define some temporary registers.  T3 is used for constant generation.  */
77#define TCG_REG_T1  TCG_REG_G1
78#define TCG_REG_T2  TCG_REG_G2
79#define TCG_REG_T3  TCG_REG_O7
80
81#ifndef CONFIG_SOFTMMU
82# define TCG_GUEST_BASE_REG TCG_REG_I5
83#endif
84
85#define TCG_REG_TB  TCG_REG_I1
86
87static const int tcg_target_reg_alloc_order[] = {
88    TCG_REG_L0,
89    TCG_REG_L1,
90    TCG_REG_L2,
91    TCG_REG_L3,
92    TCG_REG_L4,
93    TCG_REG_L5,
94    TCG_REG_L6,
95    TCG_REG_L7,
96
97    TCG_REG_I0,
98    TCG_REG_I1,
99    TCG_REG_I2,
100    TCG_REG_I3,
101    TCG_REG_I4,
102    TCG_REG_I5,
103
104    TCG_REG_G3,
105    TCG_REG_G4,
106    TCG_REG_G5,
107
108    TCG_REG_O0,
109    TCG_REG_O1,
110    TCG_REG_O2,
111    TCG_REG_O3,
112    TCG_REG_O4,
113    TCG_REG_O5,
114};
115
116static const int tcg_target_call_iarg_regs[6] = {
117    TCG_REG_O0,
118    TCG_REG_O1,
119    TCG_REG_O2,
120    TCG_REG_O3,
121    TCG_REG_O4,
122    TCG_REG_O5,
123};
124
125static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
126{
127    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
128    tcg_debug_assert(slot >= 0 && slot <= 3);
129    return TCG_REG_O0 + slot;
130}
131
132#define INSN_OP(x)  ((x) << 30)
133#define INSN_OP2(x) ((x) << 22)
134#define INSN_OP3(x) ((x) << 19)
135#define INSN_OPF(x) ((x) << 5)
136#define INSN_RD(x)  ((x) << 25)
137#define INSN_RS1(x) ((x) << 14)
138#define INSN_RS2(x) (x)
139#define INSN_ASI(x) ((x) << 5)
140
141#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
142#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
143#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
144#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
145#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
146#define INSN_COND(x) ((x) << 25)
147
148#define COND_N     0x0
149#define COND_E     0x1
150#define COND_LE    0x2
151#define COND_L     0x3
152#define COND_LEU   0x4
153#define COND_CS    0x5
154#define COND_NEG   0x6
155#define COND_VS    0x7
156#define COND_A     0x8
157#define COND_NE    0x9
158#define COND_G     0xa
159#define COND_GE    0xb
160#define COND_GU    0xc
161#define COND_CC    0xd
162#define COND_POS   0xe
163#define COND_VC    0xf
164#define BA         (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
165
166#define RCOND_Z    1
167#define RCOND_LEZ  2
168#define RCOND_LZ   3
169#define RCOND_NZ   5
170#define RCOND_GZ   6
171#define RCOND_GEZ  7
172
173#define MOVCC_ICC  (1 << 18)
174#define MOVCC_XCC  (1 << 18 | 1 << 12)
175
176#define BPCC_ICC   0
177#define BPCC_XCC   (2 << 20)
178#define BPCC_PT    (1 << 19)
179#define BPCC_PN    0
180#define BPCC_A     (1 << 29)
181
182#define BPR_PT     BPCC_PT
183
184#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
185#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
186#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
187#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
188#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
189#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
190#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
191#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
192#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
193#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
194#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
195#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
196#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
197#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
198#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
199#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
200#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
201#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
202#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
203#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
204#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
205#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
206
207#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
208#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
209
210#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
211#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
212#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
213
214#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
215#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
216#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
217
218#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
219#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
220#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
221#define RETURN     (INSN_OP(2) | INSN_OP3(0x39))
222#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
223#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
224#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
225#define CALL       INSN_OP(1)
226#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
227#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
228#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
229#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
230#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
231#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
232#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
233#define STB        (INSN_OP(3) | INSN_OP3(0x05))
234#define STH        (INSN_OP(3) | INSN_OP3(0x06))
235#define STW        (INSN_OP(3) | INSN_OP3(0x04))
236#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
237#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
238#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
239#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
240#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
241#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
242#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
243#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
244#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
245#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
246#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
247#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
248
249#define MEMBAR     (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
250
251#define NOP        (SETHI | INSN_RD(TCG_REG_G0) | 0)
252
253#ifndef ASI_PRIMARY_LITTLE
254#define ASI_PRIMARY_LITTLE 0x88
255#endif
256
257#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
258#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
259#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
260#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
261#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
262
263#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
264#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
265#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
266
267#ifndef use_vis3_instructions
268bool use_vis3_instructions;
269#endif
270
271static bool check_fit_i64(int64_t val, unsigned int bits)
272{
273    return val == sextract64(val, 0, bits);
274}
275
276static bool check_fit_i32(int32_t val, unsigned int bits)
277{
278    return val == sextract32(val, 0, bits);
279}
280
281#define check_fit_tl    check_fit_i64
282#define check_fit_ptr   check_fit_i64
283
284static bool patch_reloc(tcg_insn_unit *src_rw, int type,
285                        intptr_t value, intptr_t addend)
286{
287    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
288    uint32_t insn = *src_rw;
289    intptr_t pcrel;
290
291    value += addend;
292    pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
293
294    switch (type) {
295    case R_SPARC_WDISP16:
296        if (!check_fit_ptr(pcrel >> 2, 16)) {
297            return false;
298        }
299        insn &= ~INSN_OFF16(-1);
300        insn |= INSN_OFF16(pcrel);
301        break;
302    case R_SPARC_WDISP19:
303        if (!check_fit_ptr(pcrel >> 2, 19)) {
304            return false;
305        }
306        insn &= ~INSN_OFF19(-1);
307        insn |= INSN_OFF19(pcrel);
308        break;
309    case R_SPARC_13:
310        if (!check_fit_ptr(value, 13)) {
311            return false;
312        }
313        insn &= ~INSN_IMM13(-1);
314        insn |= INSN_IMM13(value);
315        break;
316    default:
317        g_assert_not_reached();
318    }
319
320    *src_rw = insn;
321    return true;
322}
323
324/* test if a constant matches the constraint */
325static bool tcg_target_const_match(int64_t val, int ct,
326                                   TCGType type, TCGCond cond, int vece)
327{
328    if (ct & TCG_CT_CONST) {
329        return 1;
330    }
331
332    if (type == TCG_TYPE_I32) {
333        val = (int32_t)val;
334    }
335
336    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
337        return 1;
338    } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
339        return 1;
340    } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
341        return 1;
342    } else {
343        return 0;
344    }
345}
346
347static void tcg_out_nop(TCGContext *s)
348{
349    tcg_out32(s, NOP);
350}
351
352static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
353                          TCGReg rs2, int op)
354{
355    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
356}
357
358static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
359                           int32_t offset, int op)
360{
361    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
362}
363
364static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
365			   int32_t val2, int val2const, int op)
366{
367    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
368              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
369}
370
371static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
372{
373    if (ret != arg) {
374        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
375    }
376    return true;
377}
378
379static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
380{
381    if (ret != arg) {
382        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
383    } else {
384        tcg_out_nop(s);
385    }
386}
387
388static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
389{
390    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
391}
392
393/* A 13-bit constant sign-extended to 64 bits.  */
394static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
395{
396    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
397}
398
399/* A 32-bit constant sign-extended to 64 bits.  */
400static void tcg_out_movi_s32(TCGContext *s, TCGReg ret, int32_t arg)
401{
402    tcg_out_sethi(s, ret, ~arg);
403    tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
404}
405
406/* A 32-bit constant zero-extended to 64 bits.  */
407static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg)
408{
409    tcg_out_sethi(s, ret, arg);
410    if (arg & 0x3ff) {
411        tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
412    }
413}
414
415static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
416                             tcg_target_long arg, bool in_prologue,
417                             TCGReg scratch)
418{
419    tcg_target_long hi, lo = (int32_t)arg;
420    tcg_target_long test, lsb;
421
422    /* A 13-bit constant sign-extended to 64-bits.  */
423    if (check_fit_tl(arg, 13)) {
424        tcg_out_movi_s13(s, ret, arg);
425        return;
426    }
427
428    /* A 32-bit constant, or 32-bit zero-extended to 64-bits.  */
429    if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430        tcg_out_movi_u32(s, ret, arg);
431        return;
432    }
433
434    /* A 13-bit constant relative to the TB.  */
435    if (!in_prologue) {
436        test = tcg_tbrel_diff(s, (void *)arg);
437        if (check_fit_ptr(test, 13)) {
438            tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
439            return;
440        }
441    }
442
443    /* A 32-bit constant sign-extended to 64-bits.  */
444    if (arg == lo) {
445        tcg_out_movi_s32(s, ret, arg);
446        return;
447    }
448
449    /* A 32-bit constant, shifted.  */
450    lsb = ctz64(arg);
451    test = (tcg_target_long)arg >> lsb;
452    if (lsb > 10 && test == extract64(test, 0, 21)) {
453        tcg_out_sethi(s, ret, test << 10);
454        tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
455        return;
456    } else if (test == (uint32_t)test || test == (int32_t)test) {
457        tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
458        tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
459        return;
460    }
461
462    /* Use the constant pool, if possible. */
463    if (!in_prologue) {
464        new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
465                       tcg_tbrel_diff(s, NULL));
466        tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
467        return;
468    }
469
470    /* A 64-bit constant decomposed into 2 32-bit pieces.  */
471    if (check_fit_i32(lo, 13)) {
472        hi = (arg - lo) >> 32;
473        tcg_out_movi_u32(s, ret, hi);
474        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
475        tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
476    } else {
477        hi = arg >> 32;
478        tcg_out_movi_u32(s, ret, hi);
479        tcg_out_movi_u32(s, scratch, lo);
480        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
481        tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
482    }
483}
484
485static void tcg_out_movi(TCGContext *s, TCGType type,
486                         TCGReg ret, tcg_target_long arg)
487{
488    tcg_debug_assert(ret != TCG_REG_T3);
489    tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3);
490}
491
492static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
493{
494    g_assert_not_reached();
495}
496
497static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
498{
499    g_assert_not_reached();
500}
501
502static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
503{
504    tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
505}
506
507static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
508{
509    tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
510    tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
511}
512
513static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
514{
515    tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
516}
517
518static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
519{
520    tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
521}
522
523static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
524{
525    tcg_out_ext32s(s, rd, rs);
526}
527
528static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
529{
530    tcg_out_ext32u(s, rd, rs);
531}
532
533static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
534{
535    tcg_out_ext32u(s, rd, rs);
536}
537
538static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
539{
540    return false;
541}
542
543static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
544                             tcg_target_long imm)
545{
546    /* This function is only used for passing structs by reference. */
547    g_assert_not_reached();
548}
549
550static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
551                            TCGReg a2, int op)
552{
553    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
554}
555
556static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
557                         intptr_t offset, int op)
558{
559    if (check_fit_ptr(offset, 13)) {
560        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
561                  INSN_IMM13(offset));
562    } else {
563        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
564        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
565    }
566}
567
568static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
569                       TCGReg arg1, intptr_t arg2)
570{
571    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
572}
573
574static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
575                       TCGReg arg1, intptr_t arg2)
576{
577    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
578}
579
580static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
581                        TCGReg base, intptr_t ofs)
582{
583    if (val == 0) {
584        tcg_out_st(s, type, TCG_REG_G0, base, ofs);
585        return true;
586    }
587    return false;
588}
589
590static void tcg_out_sety(TCGContext *s, TCGReg rs)
591{
592    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
593}
594
595static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
596                          int32_t val2, int val2const, int uns)
597{
598    /* Load Y with the sign/zero extension of RS1 to 64-bits.  */
599    if (uns) {
600        tcg_out_sety(s, TCG_REG_G0);
601    } else {
602        tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
603        tcg_out_sety(s, TCG_REG_T1);
604    }
605
606    tcg_out_arithc(s, rd, rs1, val2, val2const,
607                   uns ? ARITH_UDIV : ARITH_SDIV);
608}
609
610static const uint8_t tcg_cond_to_bcond[16] = {
611    [TCG_COND_EQ] = COND_E,
612    [TCG_COND_NE] = COND_NE,
613    [TCG_COND_TSTEQ] = COND_E,
614    [TCG_COND_TSTNE] = COND_NE,
615    [TCG_COND_LT] = COND_L,
616    [TCG_COND_GE] = COND_GE,
617    [TCG_COND_LE] = COND_LE,
618    [TCG_COND_GT] = COND_G,
619    [TCG_COND_LTU] = COND_CS,
620    [TCG_COND_GEU] = COND_CC,
621    [TCG_COND_LEU] = COND_LEU,
622    [TCG_COND_GTU] = COND_GU,
623};
624
625static const uint8_t tcg_cond_to_rcond[16] = {
626    [TCG_COND_EQ] = RCOND_Z,
627    [TCG_COND_NE] = RCOND_NZ,
628    [TCG_COND_LT] = RCOND_LZ,
629    [TCG_COND_GT] = RCOND_GZ,
630    [TCG_COND_LE] = RCOND_LEZ,
631    [TCG_COND_GE] = RCOND_GEZ
632};
633
634static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
635{
636    tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
637}
638
639static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
640{
641    int off19 = 0;
642
643    if (l->has_value) {
644        off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
645    } else {
646        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
647    }
648    tcg_out_bpcc0(s, scond, flags, off19);
649}
650
651static void tcg_out_cmp(TCGContext *s, TCGCond cond,
652                        TCGReg c1, int32_t c2, int c2const)
653{
654    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const,
655                   is_tst_cond(cond) ? ARITH_ANDCC : ARITH_SUBCC);
656}
657
658static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
659                               int32_t arg2, int const_arg2, TCGLabel *l)
660{
661    tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
662    tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
663    tcg_out_nop(s);
664}
665
666static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
667                          int32_t v1, int v1const)
668{
669    tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
670              | INSN_RS1(tcg_cond_to_bcond[cond])
671              | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
672}
673
674static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
675                                TCGReg c1, int32_t c2, int c2const,
676                                int32_t v1, int v1const)
677{
678    tcg_out_cmp(s, cond, c1, c2, c2const);
679    tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
680}
681
682static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
683                               int32_t arg2, int const_arg2, TCGLabel *l)
684{
685    /* For 64-bit signed comparisons vs zero, we can avoid the compare.  */
686    int rcond = tcg_cond_to_rcond[cond];
687    if (arg2 == 0 && rcond) {
688        int off16 = 0;
689
690        if (l->has_value) {
691            off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
692        } else {
693            tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
694        }
695        tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
696                  | INSN_COND(rcond) | off16);
697    } else {
698        tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
699        tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
700    }
701    tcg_out_nop(s);
702}
703
704static void tcg_out_movr(TCGContext *s, int rcond, TCGReg ret, TCGReg c1,
705                         int32_t v1, int v1const)
706{
707    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (rcond << 10)
708              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
709}
710
711static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
712                                TCGReg c1, int32_t c2, int c2const,
713                                int32_t v1, int v1const)
714{
715    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
716       Note that the immediate range is one bit smaller, so we must check
717       for that as well.  */
718    int rcond = tcg_cond_to_rcond[cond];
719    if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) {
720        tcg_out_movr(s, rcond, ret, c1, v1, v1const);
721    } else {
722        tcg_out_cmp(s, cond, c1, c2, c2const);
723        tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
724    }
725}
726
727static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
728                                TCGReg c1, int32_t c2, int c2const, bool neg)
729{
730    /* For 32-bit comparisons, we can play games with ADDC/SUBC.  */
731    switch (cond) {
732    case TCG_COND_LTU:
733    case TCG_COND_GEU:
734        /* The result of the comparison is in the carry bit.  */
735        break;
736
737    case TCG_COND_EQ:
738    case TCG_COND_NE:
739        /* For equality, we can transform to inequality vs zero.  */
740        if (c2 != 0) {
741            tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
742            c2 = TCG_REG_T1;
743        } else {
744            c2 = c1;
745        }
746        c1 = TCG_REG_G0, c2const = 0;
747        cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
748	break;
749
750    case TCG_COND_TSTEQ:
751    case TCG_COND_TSTNE:
752        /* Transform to inequality vs zero.  */
753        tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_AND);
754        c1 = TCG_REG_G0;
755        c2 = TCG_REG_T1, c2const = 0;
756        cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
757	break;
758
759    case TCG_COND_GTU:
760    case TCG_COND_LEU:
761        /* If we don't need to load a constant into a register, we can
762           swap the operands on GTU/LEU.  There's no benefit to loading
763           the constant into a temporary register.  */
764        if (!c2const || c2 == 0) {
765            TCGReg t = c1;
766            c1 = c2;
767            c2 = t;
768            c2const = 0;
769            cond = tcg_swap_cond(cond);
770            break;
771        }
772        /* FALLTHRU */
773
774    default:
775        tcg_out_cmp(s, cond, c1, c2, c2const);
776        tcg_out_movi_s13(s, ret, 0);
777        tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
778        return;
779    }
780
781    tcg_out_cmp(s, cond, c1, c2, c2const);
782    if (cond == TCG_COND_LTU) {
783        if (neg) {
784            /* 0 - 0 - C = -C = (C ? -1 : 0) */
785            tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC);
786        } else {
787            /* 0 + 0 + C =  C = (C ? 1 : 0) */
788            tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
789        }
790    } else {
791        if (neg) {
792            /* 0 + -1 + C = C - 1 = (C ? 0 : -1) */
793            tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC);
794        } else {
795            /* 0 - -1 - C = 1 - C = (C ? 0 : 1) */
796            tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
797        }
798    }
799}
800
801static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
802                                TCGReg c1, int32_t c2, int c2const, bool neg)
803{
804    int rcond;
805
806    if (use_vis3_instructions && !neg) {
807        switch (cond) {
808        case TCG_COND_NE:
809            if (c2 != 0) {
810                break;
811            }
812            c2 = c1, c2const = 0, c1 = TCG_REG_G0;
813            /* FALLTHRU */
814        case TCG_COND_LTU:
815            tcg_out_cmp(s, cond, c1, c2, c2const);
816            tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
817            return;
818        default:
819            break;
820        }
821    }
822
823    /* For 64-bit signed comparisons vs zero, we can avoid the compare
824       if the input does not overlap the output.  */
825    rcond = tcg_cond_to_rcond[cond];
826    if (c2 == 0 && rcond && c1 != ret) {
827        tcg_out_movi_s13(s, ret, 0);
828        tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1);
829    } else {
830        tcg_out_cmp(s, cond, c1, c2, c2const);
831        tcg_out_movi_s13(s, ret, 0);
832        tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
833    }
834}
835
836static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
837                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
838                                int32_t bh, int bhconst, int opl, int oph)
839{
840    TCGReg tmp = TCG_REG_T1;
841
842    /* Note that the low parts are fully consumed before tmp is set.  */
843    if (rl != ah && (bhconst || rl != bh)) {
844        tmp = rl;
845    }
846
847    tcg_out_arithc(s, tmp, al, bl, blconst, opl);
848    tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
849    tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
850}
851
852static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
853                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
854                                int32_t bh, int bhconst, bool is_sub)
855{
856    TCGReg tmp = TCG_REG_T1;
857
858    /* Note that the low parts are fully consumed before tmp is set.  */
859    if (rl != ah && (bhconst || rl != bh)) {
860        tmp = rl;
861    }
862
863    tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
864
865    if (use_vis3_instructions && !is_sub) {
866        /* Note that ADDXC doesn't accept immediates.  */
867        if (bhconst && bh != 0) {
868           tcg_out_movi_s13(s, TCG_REG_T2, bh);
869           bh = TCG_REG_T2;
870        }
871        tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
872    } else if (bh == TCG_REG_G0) {
873	/* If we have a zero, we can perform the operation in two insns,
874           with the arithmetic first, and a conditional move into place.  */
875	if (rh == ah) {
876            tcg_out_arithi(s, TCG_REG_T2, ah, 1,
877			   is_sub ? ARITH_SUB : ARITH_ADD);
878            tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
879	} else {
880            tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
881	    tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
882	}
883    } else {
884        /*
885         * Otherwise adjust BH as if there is carry into T2.
886         * Note that constant BH is constrained to 11 bits for the MOVCC,
887         * so the adjustment fits 12 bits.
888         */
889        if (bhconst) {
890            tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
891        } else {
892            tcg_out_arithi(s, TCG_REG_T2, bh, 1,
893                           is_sub ? ARITH_SUB : ARITH_ADD);
894        }
895        /* ... smoosh T2 back to original BH if carry is clear ... */
896        tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
897	/* ... and finally perform the arithmetic with the new operand.  */
898        tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
899    }
900
901    tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
902}
903
904static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
905                               bool in_prologue, bool tail_call)
906{
907    uintptr_t desti = (uintptr_t)dest;
908
909    tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
910                     desti & ~0xfff, in_prologue, TCG_REG_T2);
911    tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
912                   TCG_REG_T1, desti & 0xfff, JMPL);
913}
914
915static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
916                                 bool in_prologue)
917{
918    ptrdiff_t disp = tcg_pcrel_diff(s, dest);
919
920    if (disp == (int32_t)disp) {
921        tcg_out32(s, CALL | (uint32_t)disp >> 2);
922    } else {
923        tcg_out_jmpl_const(s, dest, in_prologue, false);
924    }
925}
926
927static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
928                         const TCGHelperInfo *info)
929{
930    tcg_out_call_nodelay(s, dest, false);
931    tcg_out_nop(s);
932}
933
934static void tcg_out_mb(TCGContext *s, TCGArg a0)
935{
936    /* Note that the TCG memory order constants mirror the Sparc MEMBAR.  */
937    tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
938}
939
940/* Generate global QEMU prologue and epilogue code */
941static void tcg_target_qemu_prologue(TCGContext *s)
942{
943    int tmp_buf_size, frame_size;
944
945    /*
946     * The TCG temp buffer is at the top of the frame, immediately
947     * below the frame pointer.  Use the logical (aligned) offset here;
948     * the stack bias is applied in temp_allocate_frame().
949     */
950    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
951    tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
952
953    /*
954     * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
955     * otherwise the minimal frame usable by callees.
956     */
957    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
958    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
959    frame_size += TCG_TARGET_STACK_ALIGN - 1;
960    frame_size &= -TCG_TARGET_STACK_ALIGN;
961    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
962              INSN_IMM13(-frame_size));
963
964#ifndef CONFIG_SOFTMMU
965    if (guest_base != 0) {
966        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
967                         guest_base, true, TCG_REG_T1);
968        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
969    }
970#endif
971
972    /* We choose TCG_REG_TB such that no move is required.  */
973    QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
974    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
975
976    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
977    /* delay slot */
978    tcg_out_nop(s);
979
980    /* Epilogue for goto_ptr.  */
981    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
982    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
983    /* delay slot */
984    tcg_out_movi_s13(s, TCG_REG_O0, 0);
985}
986
987static void tcg_out_tb_start(TCGContext *s)
988{
989    /* nothing to do */
990}
991
992static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
993{
994    int i;
995    for (i = 0; i < count; ++i) {
996        p[i] = NOP;
997    }
998}
999
1000static const TCGLdstHelperParam ldst_helper_param = {
1001    .ntmp = 1, .tmp = { TCG_REG_T1 }
1002};
1003
1004static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1005{
1006    MemOp opc = get_memop(lb->oi);
1007    MemOp sgn;
1008
1009    if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1010                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1011        return false;
1012    }
1013
1014    /* Use inline tcg_out_ext32s; otherwise let the helper sign-extend. */
1015    sgn = (opc & MO_SIZE) < MO_32 ? MO_SIGN : 0;
1016
1017    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1018    tcg_out_call(s, qemu_ld_helpers[opc & (MO_SIZE | sgn)], NULL);
1019    tcg_out_ld_helper_ret(s, lb, sgn, &ldst_helper_param);
1020
1021    tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1022    return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1023                       (intptr_t)lb->raddr, 0);
1024}
1025
1026static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1027{
1028    MemOp opc = get_memop(lb->oi);
1029
1030    if (!patch_reloc(lb->label_ptr[0], R_SPARC_WDISP19,
1031                     (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 0)) {
1032        return false;
1033    }
1034
1035    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1036    tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE], NULL);
1037
1038    tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
1039    return patch_reloc(s->code_ptr - 1, R_SPARC_WDISP19,
1040                       (intptr_t)lb->raddr, 0);
1041}
1042
1043typedef struct {
1044    TCGReg base;
1045    TCGReg index;
1046    TCGAtomAlign aa;
1047} HostAddress;
1048
1049bool tcg_target_has_memory_bswap(MemOp memop)
1050{
1051    return true;
1052}
1053
1054/* We expect to use a 13-bit negative offset from ENV.  */
1055#define MIN_TLB_MASK_TABLE_OFS  -(1 << 12)
1056
1057/*
1058 * For system-mode, perform the TLB load and compare.
1059 * For user-mode, perform any required alignment tests.
1060 * In both cases, return a TCGLabelQemuLdst structure if the slow path
1061 * is required and fill in @h with the host address for the fast path.
1062 */
1063static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1064                                           TCGReg addr_reg, MemOpIdx oi,
1065                                           bool is_ld)
1066{
1067    TCGType addr_type = s->addr_type;
1068    TCGLabelQemuLdst *ldst = NULL;
1069    MemOp opc = get_memop(oi);
1070    MemOp s_bits = opc & MO_SIZE;
1071    unsigned a_mask;
1072
1073    /* We don't support unaligned accesses. */
1074    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1075    h->aa.align = MAX(h->aa.align, s_bits);
1076    a_mask = (1u << h->aa.align) - 1;
1077
1078#ifdef CONFIG_SOFTMMU
1079    int mem_index = get_mmuidx(oi);
1080    int fast_off = tlb_mask_table_ofs(s, mem_index);
1081    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1082    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1083    int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1084                        : offsetof(CPUTLBEntry, addr_write);
1085    int add_off = offsetof(CPUTLBEntry, addend);
1086    int compare_mask;
1087    int cc;
1088
1089    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
1090    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off);
1091    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off);
1092
1093    /* Extract the page index, shifted into place for tlb index.  */
1094    tcg_out_arithi(s, TCG_REG_T1, addr_reg,
1095                   s->page_bits - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
1096    tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T2, ARITH_AND);
1097
1098    /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2.  */
1099    tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
1100
1101    /*
1102     * Load the tlb comparator and the addend.
1103     * Always load the entire 64-bit comparator for simplicity.
1104     * We will ignore the high bits via BPCC_ICC below.
1105     */
1106    tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off);
1107    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
1108    h->base = TCG_REG_T1;
1109
1110    /* Mask out the page offset, except for the required alignment. */
1111    compare_mask = s->page_mask | a_mask;
1112    if (check_fit_tl(compare_mask, 13)) {
1113        tcg_out_arithi(s, TCG_REG_T3, addr_reg, compare_mask, ARITH_AND);
1114    } else {
1115        tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
1116        tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
1117    }
1118    tcg_out_cmp(s, TCG_COND_NE, TCG_REG_T2, TCG_REG_T3, 0);
1119
1120    ldst = new_ldst_label(s);
1121    ldst->is_ld = is_ld;
1122    ldst->oi = oi;
1123    ldst->addrlo_reg = addr_reg;
1124    ldst->label_ptr[0] = s->code_ptr;
1125
1126    /* bne,pn %[xi]cc, label0 */
1127    cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC;
1128    tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0);
1129#else
1130    /*
1131     * If the size equals the required alignment, we can skip the test
1132     * and allow host SIGBUS to deliver SIGBUS to the guest.
1133     * Otherwise, test for at least natural alignment and defer
1134     * everything else to the helper functions.
1135     */
1136    if (s_bits != memop_alignment_bits(opc)) {
1137        tcg_debug_assert(check_fit_tl(a_mask, 13));
1138        tcg_out_arithi(s, TCG_REG_G0, addr_reg, a_mask, ARITH_ANDCC);
1139
1140        ldst = new_ldst_label(s);
1141        ldst->is_ld = is_ld;
1142        ldst->oi = oi;
1143        ldst->addrlo_reg = addr_reg;
1144        ldst->label_ptr[0] = s->code_ptr;
1145
1146        /* bne,pn %icc, label0 */
1147        tcg_out_bpcc0(s, COND_NE, BPCC_PN | BPCC_ICC, 0);
1148    }
1149    h->base = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0;
1150#endif
1151
1152    /* If the guest address must be zero-extended, do in the delay slot.  */
1153    if (addr_type == TCG_TYPE_I32) {
1154        tcg_out_ext32u(s, TCG_REG_T2, addr_reg);
1155        h->index = TCG_REG_T2;
1156    } else {
1157        if (ldst) {
1158            tcg_out_nop(s);
1159        }
1160        h->index = addr_reg;
1161    }
1162    return ldst;
1163}
1164
1165static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1166                            MemOpIdx oi, TCGType data_type)
1167{
1168    static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1169        [MO_UB]   = LDUB,
1170        [MO_SB]   = LDSB,
1171        [MO_UB | MO_LE] = LDUB,
1172        [MO_SB | MO_LE] = LDSB,
1173
1174        [MO_BEUW] = LDUH,
1175        [MO_BESW] = LDSH,
1176        [MO_BEUL] = LDUW,
1177        [MO_BESL] = LDSW,
1178        [MO_BEUQ] = LDX,
1179        [MO_BESQ] = LDX,
1180
1181        [MO_LEUW] = LDUH_LE,
1182        [MO_LESW] = LDSH_LE,
1183        [MO_LEUL] = LDUW_LE,
1184        [MO_LESL] = LDSW_LE,
1185        [MO_LEUQ] = LDX_LE,
1186        [MO_LESQ] = LDX_LE,
1187    };
1188
1189    TCGLabelQemuLdst *ldst;
1190    HostAddress h;
1191
1192    ldst = prepare_host_addr(s, &h, addr, oi, true);
1193
1194    tcg_out_ldst_rr(s, data, h.base, h.index,
1195                    ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
1196
1197    if (ldst) {
1198        ldst->type = data_type;
1199        ldst->datalo_reg = data;
1200        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1201    }
1202}
1203
1204static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1205                            MemOpIdx oi, TCGType data_type)
1206{
1207    static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1208        [MO_UB]   = STB,
1209
1210        [MO_BEUW] = STH,
1211        [MO_BEUL] = STW,
1212        [MO_BEUQ] = STX,
1213
1214        [MO_LEUW] = STH_LE,
1215        [MO_LEUL] = STW_LE,
1216        [MO_LEUQ] = STX_LE,
1217    };
1218
1219    TCGLabelQemuLdst *ldst;
1220    HostAddress h;
1221
1222    ldst = prepare_host_addr(s, &h, addr, oi, false);
1223
1224    tcg_out_ldst_rr(s, data, h.base, h.index,
1225                    st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
1226
1227    if (ldst) {
1228        ldst->type = data_type;
1229        ldst->datalo_reg = data;
1230        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1231    }
1232}
1233
1234static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1235{
1236    if (check_fit_ptr(a0, 13)) {
1237        tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1238        tcg_out_movi_s13(s, TCG_REG_O0, a0);
1239        return;
1240    } else {
1241        intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1242        if (check_fit_ptr(tb_diff, 13)) {
1243            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1244            /* Note that TCG_REG_TB has been unwound to O1.  */
1245            tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1246            return;
1247        }
1248    }
1249    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1250    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1251    tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1252}
1253
1254static void tcg_out_goto_tb(TCGContext *s, int which)
1255{
1256    ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1257
1258    /* Load link and indirect branch. */
1259    set_jmp_insn_offset(s, which);
1260    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1261    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1262    /* delay slot */
1263    tcg_out_nop(s);
1264    set_jmp_reset_offset(s, which);
1265
1266    /*
1267     * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1268     * to the beginning of this TB.
1269     */
1270    off = -tcg_current_code_size(s);
1271    if (check_fit_i32(off, 13)) {
1272        tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1273    } else {
1274        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1275        tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1276    }
1277}
1278
1279void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1280                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1281{
1282}
1283
1284static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1285                       const TCGArg args[TCG_MAX_OP_ARGS],
1286                       const int const_args[TCG_MAX_OP_ARGS])
1287{
1288    TCGArg a0, a1, a2;
1289    int c, c2;
1290
1291    /* Hoist the loads of the most common arguments.  */
1292    a0 = args[0];
1293    a1 = args[1];
1294    a2 = args[2];
1295    c2 = const_args[2];
1296
1297    switch (opc) {
1298    case INDEX_op_goto_ptr:
1299        tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1300        tcg_out_mov_delay(s, TCG_REG_TB, a0);
1301        break;
1302    case INDEX_op_br:
1303        tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1304        tcg_out_nop(s);
1305        break;
1306
1307#define OP_32_64(x)                             \
1308        glue(glue(case INDEX_op_, x), _i32):    \
1309        glue(glue(case INDEX_op_, x), _i64)
1310
1311    OP_32_64(ld8u):
1312        tcg_out_ldst(s, a0, a1, a2, LDUB);
1313        break;
1314    OP_32_64(ld8s):
1315        tcg_out_ldst(s, a0, a1, a2, LDSB);
1316        break;
1317    OP_32_64(ld16u):
1318        tcg_out_ldst(s, a0, a1, a2, LDUH);
1319        break;
1320    OP_32_64(ld16s):
1321        tcg_out_ldst(s, a0, a1, a2, LDSH);
1322        break;
1323    case INDEX_op_ld_i32:
1324    case INDEX_op_ld32u_i64:
1325        tcg_out_ldst(s, a0, a1, a2, LDUW);
1326        break;
1327    OP_32_64(st8):
1328        tcg_out_ldst(s, a0, a1, a2, STB);
1329        break;
1330    OP_32_64(st16):
1331        tcg_out_ldst(s, a0, a1, a2, STH);
1332        break;
1333    case INDEX_op_st_i32:
1334    case INDEX_op_st32_i64:
1335        tcg_out_ldst(s, a0, a1, a2, STW);
1336        break;
1337    OP_32_64(add):
1338        c = ARITH_ADD;
1339        goto gen_arith;
1340    OP_32_64(sub):
1341        c = ARITH_SUB;
1342        goto gen_arith;
1343    OP_32_64(and):
1344        c = ARITH_AND;
1345        goto gen_arith;
1346    OP_32_64(andc):
1347        c = ARITH_ANDN;
1348        goto gen_arith;
1349    OP_32_64(or):
1350        c = ARITH_OR;
1351        goto gen_arith;
1352    OP_32_64(orc):
1353        c = ARITH_ORN;
1354        goto gen_arith;
1355    OP_32_64(xor):
1356        c = ARITH_XOR;
1357        goto gen_arith;
1358    case INDEX_op_shl_i32:
1359        c = SHIFT_SLL;
1360    do_shift32:
1361        /* Limit immediate shift count lest we create an illegal insn.  */
1362        tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1363        break;
1364    case INDEX_op_shr_i32:
1365        c = SHIFT_SRL;
1366        goto do_shift32;
1367    case INDEX_op_sar_i32:
1368        c = SHIFT_SRA;
1369        goto do_shift32;
1370    case INDEX_op_mul_i32:
1371        c = ARITH_UMUL;
1372        goto gen_arith;
1373
1374    OP_32_64(neg):
1375	c = ARITH_SUB;
1376	goto gen_arith1;
1377    OP_32_64(not):
1378	c = ARITH_ORN;
1379	goto gen_arith1;
1380
1381    case INDEX_op_div_i32:
1382        tcg_out_div32(s, a0, a1, a2, c2, 0);
1383        break;
1384    case INDEX_op_divu_i32:
1385        tcg_out_div32(s, a0, a1, a2, c2, 1);
1386        break;
1387
1388    case INDEX_op_brcond_i32:
1389        tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1390        break;
1391    case INDEX_op_setcond_i32:
1392        tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
1393        break;
1394    case INDEX_op_negsetcond_i32:
1395        tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
1396        break;
1397    case INDEX_op_movcond_i32:
1398        tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1399        break;
1400
1401    case INDEX_op_add2_i32:
1402        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1403                            args[4], const_args[4], args[5], const_args[5],
1404                            ARITH_ADDCC, ARITH_ADDC);
1405        break;
1406    case INDEX_op_sub2_i32:
1407        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1408                            args[4], const_args[4], args[5], const_args[5],
1409                            ARITH_SUBCC, ARITH_SUBC);
1410        break;
1411    case INDEX_op_mulu2_i32:
1412        c = ARITH_UMUL;
1413        goto do_mul2;
1414    case INDEX_op_muls2_i32:
1415        c = ARITH_SMUL;
1416    do_mul2:
1417        /* The 32-bit multiply insns produce a full 64-bit result. */
1418        tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1419        tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1420        break;
1421
1422    case INDEX_op_qemu_ld_a32_i32:
1423    case INDEX_op_qemu_ld_a64_i32:
1424        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1425        break;
1426    case INDEX_op_qemu_ld_a32_i64:
1427    case INDEX_op_qemu_ld_a64_i64:
1428        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1429        break;
1430    case INDEX_op_qemu_st_a32_i32:
1431    case INDEX_op_qemu_st_a64_i32:
1432        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1433        break;
1434    case INDEX_op_qemu_st_a32_i64:
1435    case INDEX_op_qemu_st_a64_i64:
1436        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1437        break;
1438
1439    case INDEX_op_ld32s_i64:
1440        tcg_out_ldst(s, a0, a1, a2, LDSW);
1441        break;
1442    case INDEX_op_ld_i64:
1443        tcg_out_ldst(s, a0, a1, a2, LDX);
1444        break;
1445    case INDEX_op_st_i64:
1446        tcg_out_ldst(s, a0, a1, a2, STX);
1447        break;
1448    case INDEX_op_shl_i64:
1449        c = SHIFT_SLLX;
1450    do_shift64:
1451        /* Limit immediate shift count lest we create an illegal insn.  */
1452        tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1453        break;
1454    case INDEX_op_shr_i64:
1455        c = SHIFT_SRLX;
1456        goto do_shift64;
1457    case INDEX_op_sar_i64:
1458        c = SHIFT_SRAX;
1459        goto do_shift64;
1460    case INDEX_op_mul_i64:
1461        c = ARITH_MULX;
1462        goto gen_arith;
1463    case INDEX_op_div_i64:
1464        c = ARITH_SDIVX;
1465        goto gen_arith;
1466    case INDEX_op_divu_i64:
1467        c = ARITH_UDIVX;
1468        goto gen_arith;
1469
1470    case INDEX_op_brcond_i64:
1471        tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1472        break;
1473    case INDEX_op_setcond_i64:
1474        tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
1475        break;
1476    case INDEX_op_negsetcond_i64:
1477        tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
1478        break;
1479    case INDEX_op_movcond_i64:
1480        tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1481        break;
1482    case INDEX_op_add2_i64:
1483        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1484                            const_args[4], args[5], const_args[5], false);
1485        break;
1486    case INDEX_op_sub2_i64:
1487        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1488                            const_args[4], args[5], const_args[5], true);
1489        break;
1490    case INDEX_op_muluh_i64:
1491        tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1492        break;
1493
1494    gen_arith:
1495        tcg_out_arithc(s, a0, a1, a2, c2, c);
1496        break;
1497
1498    gen_arith1:
1499	tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1500	break;
1501
1502    case INDEX_op_mb:
1503        tcg_out_mb(s, a0);
1504        break;
1505
1506    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1507    case INDEX_op_mov_i64:
1508    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1509    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1510    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1511    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
1512    case INDEX_op_ext8s_i64:
1513    case INDEX_op_ext8u_i32:
1514    case INDEX_op_ext8u_i64:
1515    case INDEX_op_ext16s_i32:
1516    case INDEX_op_ext16s_i64:
1517    case INDEX_op_ext16u_i32:
1518    case INDEX_op_ext16u_i64:
1519    case INDEX_op_ext32s_i64:
1520    case INDEX_op_ext32u_i64:
1521    case INDEX_op_ext_i32_i64:
1522    case INDEX_op_extu_i32_i64:
1523    default:
1524        g_assert_not_reached();
1525    }
1526}
1527
1528static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1529{
1530    switch (op) {
1531    case INDEX_op_goto_ptr:
1532        return C_O0_I1(r);
1533
1534    case INDEX_op_ld8u_i32:
1535    case INDEX_op_ld8u_i64:
1536    case INDEX_op_ld8s_i32:
1537    case INDEX_op_ld8s_i64:
1538    case INDEX_op_ld16u_i32:
1539    case INDEX_op_ld16u_i64:
1540    case INDEX_op_ld16s_i32:
1541    case INDEX_op_ld16s_i64:
1542    case INDEX_op_ld_i32:
1543    case INDEX_op_ld32u_i64:
1544    case INDEX_op_ld32s_i64:
1545    case INDEX_op_ld_i64:
1546    case INDEX_op_neg_i32:
1547    case INDEX_op_neg_i64:
1548    case INDEX_op_not_i32:
1549    case INDEX_op_not_i64:
1550    case INDEX_op_ext32s_i64:
1551    case INDEX_op_ext32u_i64:
1552    case INDEX_op_ext_i32_i64:
1553    case INDEX_op_extu_i32_i64:
1554    case INDEX_op_qemu_ld_a32_i32:
1555    case INDEX_op_qemu_ld_a64_i32:
1556    case INDEX_op_qemu_ld_a32_i64:
1557    case INDEX_op_qemu_ld_a64_i64:
1558        return C_O1_I1(r, r);
1559
1560    case INDEX_op_st8_i32:
1561    case INDEX_op_st8_i64:
1562    case INDEX_op_st16_i32:
1563    case INDEX_op_st16_i64:
1564    case INDEX_op_st_i32:
1565    case INDEX_op_st32_i64:
1566    case INDEX_op_st_i64:
1567    case INDEX_op_qemu_st_a32_i32:
1568    case INDEX_op_qemu_st_a64_i32:
1569    case INDEX_op_qemu_st_a32_i64:
1570    case INDEX_op_qemu_st_a64_i64:
1571        return C_O0_I2(rZ, r);
1572
1573    case INDEX_op_add_i32:
1574    case INDEX_op_add_i64:
1575    case INDEX_op_mul_i32:
1576    case INDEX_op_mul_i64:
1577    case INDEX_op_div_i32:
1578    case INDEX_op_div_i64:
1579    case INDEX_op_divu_i32:
1580    case INDEX_op_divu_i64:
1581    case INDEX_op_sub_i32:
1582    case INDEX_op_sub_i64:
1583    case INDEX_op_and_i32:
1584    case INDEX_op_and_i64:
1585    case INDEX_op_andc_i32:
1586    case INDEX_op_andc_i64:
1587    case INDEX_op_or_i32:
1588    case INDEX_op_or_i64:
1589    case INDEX_op_orc_i32:
1590    case INDEX_op_orc_i64:
1591    case INDEX_op_xor_i32:
1592    case INDEX_op_xor_i64:
1593    case INDEX_op_shl_i32:
1594    case INDEX_op_shl_i64:
1595    case INDEX_op_shr_i32:
1596    case INDEX_op_shr_i64:
1597    case INDEX_op_sar_i32:
1598    case INDEX_op_sar_i64:
1599    case INDEX_op_setcond_i32:
1600    case INDEX_op_setcond_i64:
1601    case INDEX_op_negsetcond_i32:
1602    case INDEX_op_negsetcond_i64:
1603        return C_O1_I2(r, rZ, rJ);
1604
1605    case INDEX_op_brcond_i32:
1606    case INDEX_op_brcond_i64:
1607        return C_O0_I2(rZ, rJ);
1608    case INDEX_op_movcond_i32:
1609    case INDEX_op_movcond_i64:
1610        return C_O1_I4(r, rZ, rJ, rI, 0);
1611    case INDEX_op_add2_i32:
1612    case INDEX_op_add2_i64:
1613    case INDEX_op_sub2_i32:
1614    case INDEX_op_sub2_i64:
1615        return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1616    case INDEX_op_mulu2_i32:
1617    case INDEX_op_muls2_i32:
1618        return C_O2_I2(r, r, rZ, rJ);
1619    case INDEX_op_muluh_i64:
1620        return C_O1_I2(r, r, r);
1621
1622    default:
1623        g_assert_not_reached();
1624    }
1625}
1626
1627static void tcg_target_init(TCGContext *s)
1628{
1629    /*
1630     * Only probe for the platform and capabilities if we haven't already
1631     * determined maximum values at compile time.
1632     */
1633#ifndef use_vis3_instructions
1634    {
1635        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1636        use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1637    }
1638#endif
1639
1640    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1641    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1642
1643    tcg_target_call_clobber_regs = 0;
1644    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1645    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1646    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1647    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1648    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1649    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1650    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1651    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1652    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1653    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1654    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1655    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1656    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1657    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1658    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1659
1660    s->reserved_regs = 0;
1661    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1662    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1663    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1664    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1665    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1666    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1667    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1668    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1669    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */
1670}
1671
1672#define ELF_HOST_MACHINE  EM_SPARCV9
1673
1674typedef struct {
1675    DebugFrameHeader h;
1676    uint8_t fde_def_cfa[4];
1677    uint8_t fde_win_save;
1678    uint8_t fde_ret_save[3];
1679} DebugFrame;
1680
1681static const DebugFrame debug_frame = {
1682    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1683    .h.cie.id = -1,
1684    .h.cie.version = 1,
1685    .h.cie.code_align = 1,
1686    .h.cie.data_align = -sizeof(void *) & 0x7f,
1687    .h.cie.return_column = 15,            /* o7 */
1688
1689    /* Total FDE size does not include the "len" member.  */
1690    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1691
1692    .fde_def_cfa = {
1693        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
1694        (2047 & 0x7f) | 0x80, (2047 >> 7)
1695    },
1696    .fde_win_save = 0x2d,               /* DW_CFA_GNU_window_save */
1697    .fde_ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
1698};
1699
1700void tcg_register_jit(const void *buf, size_t buf_size)
1701{
1702    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1703}
1704