xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision d2c3ecadea89832ab82566e881bc3a288b020473)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_div_i32:
83    case INDEX_op_div_i64:
84    case INDEX_op_divu_i32:
85    case INDEX_op_divu_i64:
86    case INDEX_op_rem_i32:
87    case INDEX_op_rem_i64:
88    case INDEX_op_remu_i32:
89    case INDEX_op_remu_i64:
90    case INDEX_op_shl_i32:
91    case INDEX_op_shl_i64:
92    case INDEX_op_shr_i32:
93    case INDEX_op_shr_i64:
94    case INDEX_op_sar_i32:
95    case INDEX_op_sar_i64:
96    case INDEX_op_rotl_i32:
97    case INDEX_op_rotl_i64:
98    case INDEX_op_rotr_i32:
99    case INDEX_op_rotr_i64:
100    case INDEX_op_setcond_i32:
101    case INDEX_op_setcond_i64:
102    case INDEX_op_deposit_i32:
103    case INDEX_op_deposit_i64:
104    case INDEX_op_clz_i32:
105    case INDEX_op_clz_i64:
106    case INDEX_op_ctz_i32:
107    case INDEX_op_ctz_i64:
108        return C_O1_I2(r, r, r);
109
110    case INDEX_op_brcond_i32:
111    case INDEX_op_brcond_i64:
112        return C_O0_I2(r, r);
113
114    case INDEX_op_add2_i32:
115    case INDEX_op_add2_i64:
116    case INDEX_op_sub2_i32:
117    case INDEX_op_sub2_i64:
118        return C_O2_I4(r, r, r, r, r, r);
119
120#if TCG_TARGET_REG_BITS == 32
121    case INDEX_op_brcond2_i32:
122        return C_O0_I4(r, r, r, r);
123#endif
124
125    case INDEX_op_mulu2_i32:
126    case INDEX_op_mulu2_i64:
127    case INDEX_op_muls2_i32:
128    case INDEX_op_muls2_i64:
129        return C_O2_I2(r, r, r, r);
130
131    case INDEX_op_movcond_i32:
132    case INDEX_op_movcond_i64:
133    case INDEX_op_setcond2_i32:
134        return C_O1_I4(r, r, r, r, r);
135
136    case INDEX_op_qemu_ld_i32:
137        return C_O1_I1(r, r);
138    case INDEX_op_qemu_ld_i64:
139        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
140    case INDEX_op_qemu_st_i32:
141        return C_O0_I2(r, r);
142    case INDEX_op_qemu_st_i64:
143        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
144
145    default:
146        return C_NotImplemented;
147    }
148}
149
150static const int tcg_target_reg_alloc_order[] = {
151    TCG_REG_R4,
152    TCG_REG_R5,
153    TCG_REG_R6,
154    TCG_REG_R7,
155    TCG_REG_R8,
156    TCG_REG_R9,
157    TCG_REG_R10,
158    TCG_REG_R11,
159    TCG_REG_R12,
160    TCG_REG_R13,
161    TCG_REG_R14,
162    TCG_REG_R15,
163    /* Either 2 or 4 of these are call clobbered, so use them last. */
164    TCG_REG_R3,
165    TCG_REG_R2,
166    TCG_REG_R1,
167    TCG_REG_R0,
168};
169
170/* No call arguments via registers.  All will be stored on the "stack". */
171static const int tcg_target_call_iarg_regs[] = { };
172
173static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
174{
175    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
176    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
177    return TCG_REG_R0 + slot;
178}
179
180#ifdef CONFIG_DEBUG_TCG
181static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
182    "r00",
183    "r01",
184    "r02",
185    "r03",
186    "r04",
187    "r05",
188    "r06",
189    "r07",
190    "r08",
191    "r09",
192    "r10",
193    "r11",
194    "r12",
195    "r13",
196    "r14",
197    "r15",
198};
199#endif
200
201static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
202                        intptr_t value, intptr_t addend)
203{
204    intptr_t diff = value - (intptr_t)(code_ptr + 1);
205
206    tcg_debug_assert(addend == 0);
207    tcg_debug_assert(type == 20);
208
209    if (diff == sextract32(diff, 0, type)) {
210        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
211        return true;
212    }
213    return false;
214}
215
216static void stack_bounds_check(TCGReg base, intptr_t offset)
217{
218    if (base == TCG_REG_CALL_STACK) {
219        tcg_debug_assert(offset >= 0);
220        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
221                                   TCG_STATIC_FRAME_SIZE));
222    }
223}
224
225static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
226{
227    tcg_insn_unit insn = 0;
228
229    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
230    insn = deposit32(insn, 0, 8, op);
231    tcg_out32(s, insn);
232}
233
234static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
235{
236    tcg_insn_unit insn = 0;
237    intptr_t diff;
238
239    /* Special case for exit_tb: map null -> 0. */
240    if (p0 == NULL) {
241        diff = 0;
242    } else {
243        diff = p0 - (void *)(s->code_ptr + 1);
244        tcg_debug_assert(diff != 0);
245        if (diff != sextract32(diff, 0, 20)) {
246            tcg_raise_tb_overflow(s);
247        }
248    }
249    insn = deposit32(insn, 0, 8, op);
250    insn = deposit32(insn, 12, 20, diff);
251    tcg_out32(s, insn);
252}
253
254static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
255{
256    tcg_insn_unit insn = 0;
257
258    insn = deposit32(insn, 0, 8, op);
259    insn = deposit32(insn, 8, 4, r0);
260    tcg_out32(s, insn);
261}
262
263static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
264{
265    tcg_out32(s, (uint8_t)op);
266}
267
268static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
269{
270    tcg_insn_unit insn = 0;
271
272    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
273    insn = deposit32(insn, 0, 8, op);
274    insn = deposit32(insn, 8, 4, r0);
275    insn = deposit32(insn, 12, 20, i1);
276    tcg_out32(s, insn);
277}
278
279static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
280{
281    tcg_insn_unit insn = 0;
282
283    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
284    insn = deposit32(insn, 0, 8, op);
285    insn = deposit32(insn, 8, 4, r0);
286    tcg_out32(s, insn);
287}
288
289static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
290{
291    tcg_insn_unit insn = 0;
292
293    insn = deposit32(insn, 0, 8, op);
294    insn = deposit32(insn, 8, 4, r0);
295    insn = deposit32(insn, 12, 4, r1);
296    tcg_out32(s, insn);
297}
298
299static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
300                           TCGReg r0, TCGReg r1, TCGArg m2)
301{
302    tcg_insn_unit insn = 0;
303
304    tcg_debug_assert(m2 == extract32(m2, 0, 16));
305    insn = deposit32(insn, 0, 8, op);
306    insn = deposit32(insn, 8, 4, r0);
307    insn = deposit32(insn, 12, 4, r1);
308    insn = deposit32(insn, 16, 16, m2);
309    tcg_out32(s, insn);
310}
311
312static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
313                           TCGReg r0, TCGReg r1, TCGReg r2)
314{
315    tcg_insn_unit insn = 0;
316
317    insn = deposit32(insn, 0, 8, op);
318    insn = deposit32(insn, 8, 4, r0);
319    insn = deposit32(insn, 12, 4, r1);
320    insn = deposit32(insn, 16, 4, r2);
321    tcg_out32(s, insn);
322}
323
324static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
325                           TCGReg r0, TCGReg r1, intptr_t i2)
326{
327    tcg_insn_unit insn = 0;
328
329    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
330    insn = deposit32(insn, 0, 8, op);
331    insn = deposit32(insn, 8, 4, r0);
332    insn = deposit32(insn, 12, 4, r1);
333    insn = deposit32(insn, 16, 16, i2);
334    tcg_out32(s, insn);
335}
336
337static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
338                            TCGReg r1, uint8_t b2, uint8_t b3)
339{
340    tcg_insn_unit insn = 0;
341
342    tcg_debug_assert(b2 == extract32(b2, 0, 6));
343    tcg_debug_assert(b3 == extract32(b3, 0, 6));
344    insn = deposit32(insn, 0, 8, op);
345    insn = deposit32(insn, 8, 4, r0);
346    insn = deposit32(insn, 12, 4, r1);
347    insn = deposit32(insn, 16, 6, b2);
348    insn = deposit32(insn, 22, 6, b3);
349    tcg_out32(s, insn);
350}
351
352static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
353                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
354{
355    tcg_insn_unit insn = 0;
356
357    insn = deposit32(insn, 0, 8, op);
358    insn = deposit32(insn, 8, 4, r0);
359    insn = deposit32(insn, 12, 4, r1);
360    insn = deposit32(insn, 16, 4, r2);
361    insn = deposit32(insn, 20, 4, c3);
362    tcg_out32(s, insn);
363}
364
365static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
366                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
367{
368    tcg_insn_unit insn = 0;
369
370    tcg_debug_assert(b3 == extract32(b3, 0, 6));
371    tcg_debug_assert(b4 == extract32(b4, 0, 6));
372    insn = deposit32(insn, 0, 8, op);
373    insn = deposit32(insn, 8, 4, r0);
374    insn = deposit32(insn, 12, 4, r1);
375    insn = deposit32(insn, 16, 4, r2);
376    insn = deposit32(insn, 20, 6, b3);
377    insn = deposit32(insn, 26, 6, b4);
378    tcg_out32(s, insn);
379}
380
381static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
382                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
383{
384    tcg_insn_unit insn = 0;
385
386    insn = deposit32(insn, 0, 8, op);
387    insn = deposit32(insn, 8, 4, r0);
388    insn = deposit32(insn, 12, 4, r1);
389    insn = deposit32(insn, 16, 4, r2);
390    insn = deposit32(insn, 20, 4, r3);
391    tcg_out32(s, insn);
392}
393
394static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
395                              TCGReg r0, TCGReg r1, TCGReg r2,
396                              TCGReg r3, TCGReg r4, TCGCond c5)
397{
398    tcg_insn_unit insn = 0;
399
400    insn = deposit32(insn, 0, 8, op);
401    insn = deposit32(insn, 8, 4, r0);
402    insn = deposit32(insn, 12, 4, r1);
403    insn = deposit32(insn, 16, 4, r2);
404    insn = deposit32(insn, 20, 4, r3);
405    insn = deposit32(insn, 24, 4, r4);
406    insn = deposit32(insn, 28, 4, c5);
407    tcg_out32(s, insn);
408}
409
410static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
411                              TCGReg r0, TCGReg r1, TCGReg r2,
412                              TCGReg r3, TCGReg r4, TCGReg r5)
413{
414    tcg_insn_unit insn = 0;
415
416    insn = deposit32(insn, 0, 8, op);
417    insn = deposit32(insn, 8, 4, r0);
418    insn = deposit32(insn, 12, 4, r1);
419    insn = deposit32(insn, 16, 4, r2);
420    insn = deposit32(insn, 20, 4, r3);
421    insn = deposit32(insn, 24, 4, r4);
422    insn = deposit32(insn, 28, 4, r5);
423    tcg_out32(s, insn);
424}
425
426static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
427                         TCGReg base, intptr_t offset)
428{
429    stack_bounds_check(base, offset);
430    if (offset != sextract32(offset, 0, 16)) {
431        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
432        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
433        base = TCG_REG_TMP;
434        offset = 0;
435    }
436    tcg_out_op_rrs(s, op, val, base, offset);
437}
438
439static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
440                       intptr_t offset)
441{
442    switch (type) {
443    case TCG_TYPE_I32:
444        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
445        break;
446#if TCG_TARGET_REG_BITS == 64
447    case TCG_TYPE_I64:
448        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
449        break;
450#endif
451    default:
452        g_assert_not_reached();
453    }
454}
455
456static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
457{
458    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
459    return true;
460}
461
462static void tcg_out_movi(TCGContext *s, TCGType type,
463                         TCGReg ret, tcg_target_long arg)
464{
465    switch (type) {
466    case TCG_TYPE_I32:
467#if TCG_TARGET_REG_BITS == 64
468        arg = (int32_t)arg;
469        /* fall through */
470    case TCG_TYPE_I64:
471#endif
472        break;
473    default:
474        g_assert_not_reached();
475    }
476
477    if (arg == sextract32(arg, 0, 20)) {
478        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
479    } else {
480        tcg_insn_unit insn = 0;
481
482        new_pool_label(s, arg, 20, s->code_ptr, 0);
483        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
484        insn = deposit32(insn, 8, 4, ret);
485        tcg_out32(s, insn);
486    }
487}
488
489static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
490                            TCGReg rs, unsigned pos, unsigned len)
491{
492    TCGOpcode opc = type == TCG_TYPE_I32 ?
493                    INDEX_op_extract_i32 :
494                    INDEX_op_extract_i64;
495    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
496}
497
498static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
499                             TCGReg rs, unsigned pos, unsigned len)
500{
501    TCGOpcode opc = type == TCG_TYPE_I32 ?
502                    INDEX_op_sextract_i32 :
503                    INDEX_op_sextract_i64;
504    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
505}
506
507static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
508{
509    tcg_out_sextract(s, type, rd, rs, 0, 8);
510}
511
512static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
513{
514    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
515}
516
517static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
518{
519    tcg_out_sextract(s, type, rd, rs, 0, 16);
520}
521
522static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
523{
524    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
525}
526
527static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
528{
529    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
530    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
531}
532
533static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
534{
535    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
536    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
537}
538
539static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
540{
541    tcg_out_ext32s(s, rd, rs);
542}
543
544static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
545{
546    tcg_out_ext32u(s, rd, rs);
547}
548
549static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
550{
551    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
552    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
553}
554
555static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
556{
557    return false;
558}
559
560static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
561                             tcg_target_long imm)
562{
563    /* This function is only used for passing structs by reference. */
564    g_assert_not_reached();
565}
566
567static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
568                         const TCGHelperInfo *info)
569{
570    ffi_cif *cif = info->cif;
571    tcg_insn_unit insn = 0;
572    uint8_t which;
573
574    if (cif->rtype == &ffi_type_void) {
575        which = 0;
576    } else {
577        tcg_debug_assert(cif->rtype->size == 4 ||
578                         cif->rtype->size == 8 ||
579                         cif->rtype->size == 16);
580        which = ctz32(cif->rtype->size) - 1;
581    }
582    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
583    insn = deposit32(insn, 0, 8, INDEX_op_call);
584    insn = deposit32(insn, 8, 4, which);
585    tcg_out32(s, insn);
586}
587
588#if TCG_TARGET_REG_BITS == 64
589# define CASE_32_64(x) \
590        case glue(glue(INDEX_op_, x), _i64): \
591        case glue(glue(INDEX_op_, x), _i32):
592# define CASE_64(x) \
593        case glue(glue(INDEX_op_, x), _i64):
594#else
595# define CASE_32_64(x) \
596        case glue(glue(INDEX_op_, x), _i32):
597# define CASE_64(x)
598#endif
599
600static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
601{
602    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
603}
604
605static void tcg_out_goto_tb(TCGContext *s, int which)
606{
607    /* indirect jump method. */
608    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
609    set_jmp_reset_offset(s, which);
610}
611
612void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
613                              uintptr_t jmp_rx, uintptr_t jmp_rw)
614{
615    /* Always indirect, nothing to do */
616}
617
618static void tgen_add(TCGContext *s, TCGType type,
619                     TCGReg a0, TCGReg a1, TCGReg a2)
620{
621    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
622}
623
624static const TCGOutOpBinary outop_add = {
625    .base.static_constraint = C_O1_I2(r, r, r),
626    .out_rrr = tgen_add,
627};
628
629static void tgen_and(TCGContext *s, TCGType type,
630                     TCGReg a0, TCGReg a1, TCGReg a2)
631{
632    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
633}
634
635static const TCGOutOpBinary outop_and = {
636    .base.static_constraint = C_O1_I2(r, r, r),
637    .out_rrr = tgen_and,
638};
639
640static void tgen_andc(TCGContext *s, TCGType type,
641                      TCGReg a0, TCGReg a1, TCGReg a2)
642{
643    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
644}
645
646static const TCGOutOpBinary outop_andc = {
647    .base.static_constraint = C_O1_I2(r, r, r),
648    .out_rrr = tgen_andc,
649};
650
651static void tgen_eqv(TCGContext *s, TCGType type,
652                     TCGReg a0, TCGReg a1, TCGReg a2)
653{
654    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
655}
656
657static const TCGOutOpBinary outop_eqv = {
658    .base.static_constraint = C_O1_I2(r, r, r),
659    .out_rrr = tgen_eqv,
660};
661
662static void tgen_mul(TCGContext *s, TCGType type,
663                     TCGReg a0, TCGReg a1, TCGReg a2)
664{
665    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
666}
667
668static const TCGOutOpBinary outop_mul = {
669    .base.static_constraint = C_O1_I2(r, r, r),
670    .out_rrr = tgen_mul,
671};
672
673static void tgen_nand(TCGContext *s, TCGType type,
674                     TCGReg a0, TCGReg a1, TCGReg a2)
675{
676    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
677}
678
679static const TCGOutOpBinary outop_nand = {
680    .base.static_constraint = C_O1_I2(r, r, r),
681    .out_rrr = tgen_nand,
682};
683
684static void tgen_nor(TCGContext *s, TCGType type,
685                     TCGReg a0, TCGReg a1, TCGReg a2)
686{
687    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
688}
689
690static const TCGOutOpBinary outop_nor = {
691    .base.static_constraint = C_O1_I2(r, r, r),
692    .out_rrr = tgen_nor,
693};
694
695static void tgen_or(TCGContext *s, TCGType type,
696                     TCGReg a0, TCGReg a1, TCGReg a2)
697{
698    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
699}
700
701static const TCGOutOpBinary outop_or = {
702    .base.static_constraint = C_O1_I2(r, r, r),
703    .out_rrr = tgen_or,
704};
705
706static void tgen_orc(TCGContext *s, TCGType type,
707                     TCGReg a0, TCGReg a1, TCGReg a2)
708{
709    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
710}
711
712static const TCGOutOpBinary outop_orc = {
713    .base.static_constraint = C_O1_I2(r, r, r),
714    .out_rrr = tgen_orc,
715};
716
717static void tgen_sub(TCGContext *s, TCGType type,
718                     TCGReg a0, TCGReg a1, TCGReg a2)
719{
720    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
721}
722
723static const TCGOutOpSubtract outop_sub = {
724    .base.static_constraint = C_O1_I2(r, r, r),
725    .out_rrr = tgen_sub,
726};
727
728static void tgen_xor(TCGContext *s, TCGType type,
729                     TCGReg a0, TCGReg a1, TCGReg a2)
730{
731    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
732}
733
734static const TCGOutOpBinary outop_xor = {
735    .base.static_constraint = C_O1_I2(r, r, r),
736    .out_rrr = tgen_xor,
737};
738
739static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
740{
741    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
742}
743
744static const TCGOutOpUnary outop_neg = {
745    .base.static_constraint = C_O1_I1(r, r),
746    .out_rr = tgen_neg,
747};
748
749static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
750{
751    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
752}
753
754static const TCGOutOpUnary outop_not = {
755    .base.static_constraint = C_O1_I1(r, r),
756    .out_rr = tgen_not,
757};
758
759
760static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
761                       const TCGArg args[TCG_MAX_OP_ARGS],
762                       const int const_args[TCG_MAX_OP_ARGS])
763{
764    int width;
765
766    switch (opc) {
767    case INDEX_op_goto_ptr:
768        tcg_out_op_r(s, opc, args[0]);
769        break;
770
771    case INDEX_op_br:
772        tcg_out_op_l(s, opc, arg_label(args[0]));
773        break;
774
775    CASE_32_64(setcond)
776        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
777        break;
778
779    CASE_32_64(movcond)
780    case INDEX_op_setcond2_i32:
781        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
782                          args[3], args[4], args[5]);
783        break;
784
785    CASE_32_64(ld8u)
786    CASE_32_64(ld8s)
787    CASE_32_64(ld16u)
788    CASE_32_64(ld16s)
789    case INDEX_op_ld_i32:
790    CASE_64(ld32u)
791    CASE_64(ld32s)
792    CASE_64(ld)
793    CASE_32_64(st8)
794    CASE_32_64(st16)
795    case INDEX_op_st_i32:
796    CASE_64(st32)
797    CASE_64(st)
798        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
799        break;
800
801    CASE_32_64(shl)
802    CASE_32_64(shr)
803    CASE_32_64(sar)
804    CASE_32_64(rotl)     /* Optional (TCG_TARGET_HAS_rot_*). */
805    CASE_32_64(rotr)     /* Optional (TCG_TARGET_HAS_rot_*). */
806    CASE_32_64(div)      /* Optional (TCG_TARGET_HAS_div_*). */
807    CASE_32_64(divu)     /* Optional (TCG_TARGET_HAS_div_*). */
808    CASE_32_64(rem)      /* Optional (TCG_TARGET_HAS_div_*). */
809    CASE_32_64(remu)     /* Optional (TCG_TARGET_HAS_div_*). */
810    CASE_32_64(clz)      /* Optional (TCG_TARGET_HAS_clz_*). */
811    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
812        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
813        break;
814
815    CASE_32_64(deposit)
816        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
817        break;
818
819    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
820    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
821        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
822        break;
823
824    CASE_32_64(brcond)
825        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
826                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
827                        TCG_REG_TMP, args[0], args[1], args[2]);
828        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
829        break;
830
831    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
832    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
833    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
834        tcg_out_op_rr(s, opc, args[0], args[1]);
835        break;
836
837    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
838    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
839        width = 16;
840        goto do_bswap;
841    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
842        width = 32;
843    do_bswap:
844        /* The base tci bswaps zero-extend, and ignore high bits. */
845        tcg_out_op_rr(s, opc, args[0], args[1]);
846        if (args[2] & TCG_BSWAP_OS) {
847            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
848        }
849        break;
850
851    CASE_32_64(add2)
852    CASE_32_64(sub2)
853        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
854                          args[3], args[4], args[5]);
855        break;
856
857#if TCG_TARGET_REG_BITS == 32
858    case INDEX_op_brcond2_i32:
859        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
860                          args[0], args[1], args[2], args[3], args[4]);
861        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
862        break;
863#endif
864
865    CASE_32_64(mulu2)
866    CASE_32_64(muls2)
867        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
868        break;
869
870    case INDEX_op_qemu_ld_i64:
871    case INDEX_op_qemu_st_i64:
872        if (TCG_TARGET_REG_BITS == 32) {
873            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
874            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
875            break;
876        }
877        /* fall through */
878    case INDEX_op_qemu_ld_i32:
879    case INDEX_op_qemu_st_i32:
880        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
881            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
882            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
883        } else {
884            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
885        }
886        break;
887
888    case INDEX_op_mb:
889        tcg_out_op_v(s, opc);
890        break;
891
892    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
893    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
894    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
895    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
896    case INDEX_op_extu_i32_i64:
897    case INDEX_op_extrl_i64_i32:
898    default:
899        g_assert_not_reached();
900    }
901}
902
903static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
904                       intptr_t offset)
905{
906    switch (type) {
907    case TCG_TYPE_I32:
908        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
909        break;
910#if TCG_TARGET_REG_BITS == 64
911    case TCG_TYPE_I64:
912        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
913        break;
914#endif
915    default:
916        g_assert_not_reached();
917    }
918}
919
920static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
921                               TCGReg base, intptr_t ofs)
922{
923    return false;
924}
925
926/* Test if a constant matches the constraint. */
927static bool tcg_target_const_match(int64_t val, int ct,
928                                   TCGType type, TCGCond cond, int vece)
929{
930    return ct & TCG_CT_CONST;
931}
932
933static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
934{
935    memset(p, 0, sizeof(*p) * count);
936}
937
938static void tcg_target_init(TCGContext *s)
939{
940    /* The current code uses uint8_t for tcg operations. */
941    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
942
943    /* Registers available for 32 bit operations. */
944    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
945    /* Registers available for 64 bit operations. */
946    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
947    /*
948     * The interpreter "registers" are in the local stack frame and
949     * cannot be clobbered by the called helper functions.  However,
950     * the interpreter assumes a 128-bit return value and assigns to
951     * the return value registers.
952     */
953    tcg_target_call_clobber_regs =
954        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
955
956    s->reserved_regs = 0;
957    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
958    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
959
960    /* The call arguments come first, followed by the temp storage. */
961    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
962                  TCG_STATIC_FRAME_SIZE);
963}
964
965/* Generate global QEMU prologue and epilogue code. */
966static inline void tcg_target_qemu_prologue(TCGContext *s)
967{
968}
969
970static void tcg_out_tb_start(TCGContext *s)
971{
972    /* nothing to do */
973}
974
975bool tcg_target_has_memory_bswap(MemOp memop)
976{
977    return true;
978}
979
980static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
981{
982    g_assert_not_reached();
983}
984
985static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
986{
987    g_assert_not_reached();
988}
989