xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 3ad5d4ccb4bdebdff4e90957bb2b8a93e5e418e2)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_extract_i32:
61    case INDEX_op_extract_i64:
62    case INDEX_op_sextract_i32:
63    case INDEX_op_sextract_i64:
64        return C_O1_I1(r, r);
65
66    case INDEX_op_st8_i32:
67    case INDEX_op_st16_i32:
68    case INDEX_op_st_i32:
69    case INDEX_op_st8_i64:
70    case INDEX_op_st16_i64:
71    case INDEX_op_st32_i64:
72    case INDEX_op_st_i64:
73        return C_O0_I2(r, r);
74
75    case INDEX_op_deposit_i32:
76    case INDEX_op_deposit_i64:
77        return C_O1_I2(r, r, r);
78
79    case INDEX_op_add2_i32:
80    case INDEX_op_add2_i64:
81    case INDEX_op_sub2_i32:
82    case INDEX_op_sub2_i64:
83        return C_O2_I4(r, r, r, r, r, r);
84
85    case INDEX_op_qemu_ld_i32:
86        return C_O1_I1(r, r);
87    case INDEX_op_qemu_ld_i64:
88        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
89    case INDEX_op_qemu_st_i32:
90        return C_O0_I2(r, r);
91    case INDEX_op_qemu_st_i64:
92        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
93
94    default:
95        return C_NotImplemented;
96    }
97}
98
99static const int tcg_target_reg_alloc_order[] = {
100    TCG_REG_R4,
101    TCG_REG_R5,
102    TCG_REG_R6,
103    TCG_REG_R7,
104    TCG_REG_R8,
105    TCG_REG_R9,
106    TCG_REG_R10,
107    TCG_REG_R11,
108    TCG_REG_R12,
109    TCG_REG_R13,
110    TCG_REG_R14,
111    TCG_REG_R15,
112    /* Either 2 or 4 of these are call clobbered, so use them last. */
113    TCG_REG_R3,
114    TCG_REG_R2,
115    TCG_REG_R1,
116    TCG_REG_R0,
117};
118
119/* No call arguments via registers.  All will be stored on the "stack". */
120static const int tcg_target_call_iarg_regs[] = { };
121
122static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
123{
124    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
125    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
126    return TCG_REG_R0 + slot;
127}
128
129#ifdef CONFIG_DEBUG_TCG
130static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
131    "r00",
132    "r01",
133    "r02",
134    "r03",
135    "r04",
136    "r05",
137    "r06",
138    "r07",
139    "r08",
140    "r09",
141    "r10",
142    "r11",
143    "r12",
144    "r13",
145    "r14",
146    "r15",
147};
148#endif
149
150static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
151                        intptr_t value, intptr_t addend)
152{
153    intptr_t diff = value - (intptr_t)(code_ptr + 1);
154
155    tcg_debug_assert(addend == 0);
156    tcg_debug_assert(type == 20);
157
158    if (diff == sextract32(diff, 0, type)) {
159        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
160        return true;
161    }
162    return false;
163}
164
165static void stack_bounds_check(TCGReg base, intptr_t offset)
166{
167    if (base == TCG_REG_CALL_STACK) {
168        tcg_debug_assert(offset >= 0);
169        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
170                                   TCG_STATIC_FRAME_SIZE));
171    }
172}
173
174static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
175{
176    tcg_insn_unit insn = 0;
177
178    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
179    insn = deposit32(insn, 0, 8, op);
180    tcg_out32(s, insn);
181}
182
183static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
184{
185    tcg_insn_unit insn = 0;
186    intptr_t diff;
187
188    /* Special case for exit_tb: map null -> 0. */
189    if (p0 == NULL) {
190        diff = 0;
191    } else {
192        diff = p0 - (void *)(s->code_ptr + 1);
193        tcg_debug_assert(diff != 0);
194        if (diff != sextract32(diff, 0, 20)) {
195            tcg_raise_tb_overflow(s);
196        }
197    }
198    insn = deposit32(insn, 0, 8, op);
199    insn = deposit32(insn, 12, 20, diff);
200    tcg_out32(s, insn);
201}
202
203static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
204{
205    tcg_insn_unit insn = 0;
206
207    insn = deposit32(insn, 0, 8, op);
208    insn = deposit32(insn, 8, 4, r0);
209    tcg_out32(s, insn);
210}
211
212static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
213{
214    tcg_out32(s, (uint8_t)op);
215}
216
217static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
218{
219    tcg_insn_unit insn = 0;
220
221    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
222    insn = deposit32(insn, 0, 8, op);
223    insn = deposit32(insn, 8, 4, r0);
224    insn = deposit32(insn, 12, 20, i1);
225    tcg_out32(s, insn);
226}
227
228static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
229{
230    tcg_insn_unit insn = 0;
231
232    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
233    insn = deposit32(insn, 0, 8, op);
234    insn = deposit32(insn, 8, 4, r0);
235    tcg_out32(s, insn);
236}
237
238static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
239{
240    tcg_insn_unit insn = 0;
241
242    insn = deposit32(insn, 0, 8, op);
243    insn = deposit32(insn, 8, 4, r0);
244    insn = deposit32(insn, 12, 4, r1);
245    tcg_out32(s, insn);
246}
247
248static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
249                           TCGReg r0, TCGReg r1, TCGArg m2)
250{
251    tcg_insn_unit insn = 0;
252
253    tcg_debug_assert(m2 == extract32(m2, 0, 16));
254    insn = deposit32(insn, 0, 8, op);
255    insn = deposit32(insn, 8, 4, r0);
256    insn = deposit32(insn, 12, 4, r1);
257    insn = deposit32(insn, 16, 16, m2);
258    tcg_out32(s, insn);
259}
260
261static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
262                           TCGReg r0, TCGReg r1, TCGReg r2)
263{
264    tcg_insn_unit insn = 0;
265
266    insn = deposit32(insn, 0, 8, op);
267    insn = deposit32(insn, 8, 4, r0);
268    insn = deposit32(insn, 12, 4, r1);
269    insn = deposit32(insn, 16, 4, r2);
270    tcg_out32(s, insn);
271}
272
273static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
274                           TCGReg r0, TCGReg r1, intptr_t i2)
275{
276    tcg_insn_unit insn = 0;
277
278    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
279    insn = deposit32(insn, 0, 8, op);
280    insn = deposit32(insn, 8, 4, r0);
281    insn = deposit32(insn, 12, 4, r1);
282    insn = deposit32(insn, 16, 16, i2);
283    tcg_out32(s, insn);
284}
285
286static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
287                            TCGReg r1, uint8_t b2, uint8_t b3)
288{
289    tcg_insn_unit insn = 0;
290
291    tcg_debug_assert(b2 == extract32(b2, 0, 6));
292    tcg_debug_assert(b3 == extract32(b3, 0, 6));
293    insn = deposit32(insn, 0, 8, op);
294    insn = deposit32(insn, 8, 4, r0);
295    insn = deposit32(insn, 12, 4, r1);
296    insn = deposit32(insn, 16, 6, b2);
297    insn = deposit32(insn, 22, 6, b3);
298    tcg_out32(s, insn);
299}
300
301static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
302                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
303{
304    tcg_insn_unit insn = 0;
305
306    insn = deposit32(insn, 0, 8, op);
307    insn = deposit32(insn, 8, 4, r0);
308    insn = deposit32(insn, 12, 4, r1);
309    insn = deposit32(insn, 16, 4, r2);
310    insn = deposit32(insn, 20, 4, c3);
311    tcg_out32(s, insn);
312}
313
314static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
315                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
316{
317    tcg_insn_unit insn = 0;
318
319    tcg_debug_assert(b3 == extract32(b3, 0, 6));
320    tcg_debug_assert(b4 == extract32(b4, 0, 6));
321    insn = deposit32(insn, 0, 8, op);
322    insn = deposit32(insn, 8, 4, r0);
323    insn = deposit32(insn, 12, 4, r1);
324    insn = deposit32(insn, 16, 4, r2);
325    insn = deposit32(insn, 20, 6, b3);
326    insn = deposit32(insn, 26, 6, b4);
327    tcg_out32(s, insn);
328}
329
330static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
331                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
332{
333    tcg_insn_unit insn = 0;
334
335    insn = deposit32(insn, 0, 8, op);
336    insn = deposit32(insn, 8, 4, r0);
337    insn = deposit32(insn, 12, 4, r1);
338    insn = deposit32(insn, 16, 4, r2);
339    insn = deposit32(insn, 20, 4, r3);
340    tcg_out32(s, insn);
341}
342
343static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
344                              TCGReg r0, TCGReg r1, TCGReg r2,
345                              TCGReg r3, TCGReg r4, TCGCond c5)
346{
347    tcg_insn_unit insn = 0;
348
349    insn = deposit32(insn, 0, 8, op);
350    insn = deposit32(insn, 8, 4, r0);
351    insn = deposit32(insn, 12, 4, r1);
352    insn = deposit32(insn, 16, 4, r2);
353    insn = deposit32(insn, 20, 4, r3);
354    insn = deposit32(insn, 24, 4, r4);
355    insn = deposit32(insn, 28, 4, c5);
356    tcg_out32(s, insn);
357}
358
359static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
360                              TCGReg r0, TCGReg r1, TCGReg r2,
361                              TCGReg r3, TCGReg r4, TCGReg r5)
362{
363    tcg_insn_unit insn = 0;
364
365    insn = deposit32(insn, 0, 8, op);
366    insn = deposit32(insn, 8, 4, r0);
367    insn = deposit32(insn, 12, 4, r1);
368    insn = deposit32(insn, 16, 4, r2);
369    insn = deposit32(insn, 20, 4, r3);
370    insn = deposit32(insn, 24, 4, r4);
371    insn = deposit32(insn, 28, 4, r5);
372    tcg_out32(s, insn);
373}
374
375static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
376                         TCGReg base, intptr_t offset)
377{
378    stack_bounds_check(base, offset);
379    if (offset != sextract32(offset, 0, 16)) {
380        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
381        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
382        base = TCG_REG_TMP;
383        offset = 0;
384    }
385    tcg_out_op_rrs(s, op, val, base, offset);
386}
387
388static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
389                       intptr_t offset)
390{
391    switch (type) {
392    case TCG_TYPE_I32:
393        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
394        break;
395#if TCG_TARGET_REG_BITS == 64
396    case TCG_TYPE_I64:
397        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
398        break;
399#endif
400    default:
401        g_assert_not_reached();
402    }
403}
404
405static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
406{
407    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
408    return true;
409}
410
411static void tcg_out_movi(TCGContext *s, TCGType type,
412                         TCGReg ret, tcg_target_long arg)
413{
414    switch (type) {
415    case TCG_TYPE_I32:
416#if TCG_TARGET_REG_BITS == 64
417        arg = (int32_t)arg;
418        /* fall through */
419    case TCG_TYPE_I64:
420#endif
421        break;
422    default:
423        g_assert_not_reached();
424    }
425
426    if (arg == sextract32(arg, 0, 20)) {
427        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
428    } else {
429        tcg_insn_unit insn = 0;
430
431        new_pool_label(s, arg, 20, s->code_ptr, 0);
432        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
433        insn = deposit32(insn, 8, 4, ret);
434        tcg_out32(s, insn);
435    }
436}
437
438static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
439                            TCGReg rs, unsigned pos, unsigned len)
440{
441    TCGOpcode opc = type == TCG_TYPE_I32 ?
442                    INDEX_op_extract_i32 :
443                    INDEX_op_extract_i64;
444    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
445}
446
447static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
448                             TCGReg rs, unsigned pos, unsigned len)
449{
450    TCGOpcode opc = type == TCG_TYPE_I32 ?
451                    INDEX_op_sextract_i32 :
452                    INDEX_op_sextract_i64;
453    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
454}
455
456static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
457{
458    tcg_out_sextract(s, type, rd, rs, 0, 8);
459}
460
461static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
462{
463    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
464}
465
466static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
467{
468    tcg_out_sextract(s, type, rd, rs, 0, 16);
469}
470
471static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
472{
473    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
474}
475
476static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
477{
478    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
479    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
480}
481
482static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
483{
484    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
485    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
486}
487
488static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
489{
490    tcg_out_ext32s(s, rd, rs);
491}
492
493static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
494{
495    tcg_out_ext32u(s, rd, rs);
496}
497
498static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
499{
500    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
501    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
502}
503
504static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
505{
506    return false;
507}
508
509static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
510                             tcg_target_long imm)
511{
512    /* This function is only used for passing structs by reference. */
513    g_assert_not_reached();
514}
515
516static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
517                         const TCGHelperInfo *info)
518{
519    ffi_cif *cif = info->cif;
520    tcg_insn_unit insn = 0;
521    uint8_t which;
522
523    if (cif->rtype == &ffi_type_void) {
524        which = 0;
525    } else {
526        tcg_debug_assert(cif->rtype->size == 4 ||
527                         cif->rtype->size == 8 ||
528                         cif->rtype->size == 16);
529        which = ctz32(cif->rtype->size) - 1;
530    }
531    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
532    insn = deposit32(insn, 0, 8, INDEX_op_call);
533    insn = deposit32(insn, 8, 4, which);
534    tcg_out32(s, insn);
535}
536
537#if TCG_TARGET_REG_BITS == 64
538# define CASE_32_64(x) \
539        case glue(glue(INDEX_op_, x), _i64): \
540        case glue(glue(INDEX_op_, x), _i32):
541# define CASE_64(x) \
542        case glue(glue(INDEX_op_, x), _i64):
543#else
544# define CASE_32_64(x) \
545        case glue(glue(INDEX_op_, x), _i32):
546# define CASE_64(x)
547#endif
548
549static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
550{
551    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
552}
553
554static void tcg_out_goto_tb(TCGContext *s, int which)
555{
556    /* indirect jump method. */
557    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
558    set_jmp_reset_offset(s, which);
559}
560
561void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
562                              uintptr_t jmp_rx, uintptr_t jmp_rw)
563{
564    /* Always indirect, nothing to do */
565}
566
567static void tgen_add(TCGContext *s, TCGType type,
568                     TCGReg a0, TCGReg a1, TCGReg a2)
569{
570    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
571}
572
573static const TCGOutOpBinary outop_add = {
574    .base.static_constraint = C_O1_I2(r, r, r),
575    .out_rrr = tgen_add,
576};
577
578static void tgen_and(TCGContext *s, TCGType type,
579                     TCGReg a0, TCGReg a1, TCGReg a2)
580{
581    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
582}
583
584static const TCGOutOpBinary outop_and = {
585    .base.static_constraint = C_O1_I2(r, r, r),
586    .out_rrr = tgen_and,
587};
588
589static void tgen_andc(TCGContext *s, TCGType type,
590                      TCGReg a0, TCGReg a1, TCGReg a2)
591{
592    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
593}
594
595static const TCGOutOpBinary outop_andc = {
596    .base.static_constraint = C_O1_I2(r, r, r),
597    .out_rrr = tgen_andc,
598};
599
600static void tgen_clz(TCGContext *s, TCGType type,
601                      TCGReg a0, TCGReg a1, TCGReg a2)
602{
603    TCGOpcode opc = (type == TCG_TYPE_I32
604                     ? INDEX_op_tci_clz32
605                     : INDEX_op_clz);
606    tcg_out_op_rrr(s, opc, a0, a1, a2);
607}
608
609static const TCGOutOpBinary outop_clz = {
610    .base.static_constraint = C_O1_I2(r, r, r),
611    .out_rrr = tgen_clz,
612};
613
614static void tgen_ctz(TCGContext *s, TCGType type,
615                      TCGReg a0, TCGReg a1, TCGReg a2)
616{
617    TCGOpcode opc = (type == TCG_TYPE_I32
618                     ? INDEX_op_tci_ctz32
619                     : INDEX_op_ctz);
620    tcg_out_op_rrr(s, opc, a0, a1, a2);
621}
622
623static const TCGOutOpBinary outop_ctz = {
624    .base.static_constraint = C_O1_I2(r, r, r),
625    .out_rrr = tgen_ctz,
626};
627
628static void tgen_divs(TCGContext *s, TCGType type,
629                      TCGReg a0, TCGReg a1, TCGReg a2)
630{
631    TCGOpcode opc = (type == TCG_TYPE_I32
632                     ? INDEX_op_tci_divs32
633                     : INDEX_op_divs);
634    tcg_out_op_rrr(s, opc, a0, a1, a2);
635}
636
637static const TCGOutOpBinary outop_divs = {
638    .base.static_constraint = C_O1_I2(r, r, r),
639    .out_rrr = tgen_divs,
640};
641
642static const TCGOutOpDivRem outop_divs2 = {
643    .base.static_constraint = C_NotImplemented,
644};
645
646static void tgen_divu(TCGContext *s, TCGType type,
647                      TCGReg a0, TCGReg a1, TCGReg a2)
648{
649    TCGOpcode opc = (type == TCG_TYPE_I32
650                     ? INDEX_op_tci_divu32
651                     : INDEX_op_divu);
652    tcg_out_op_rrr(s, opc, a0, a1, a2);
653}
654
655static const TCGOutOpBinary outop_divu = {
656    .base.static_constraint = C_O1_I2(r, r, r),
657    .out_rrr = tgen_divu,
658};
659
660static const TCGOutOpDivRem outop_divu2 = {
661    .base.static_constraint = C_NotImplemented,
662};
663
664static void tgen_eqv(TCGContext *s, TCGType type,
665                     TCGReg a0, TCGReg a1, TCGReg a2)
666{
667    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
668}
669
670static const TCGOutOpBinary outop_eqv = {
671    .base.static_constraint = C_O1_I2(r, r, r),
672    .out_rrr = tgen_eqv,
673};
674
675static void tgen_mul(TCGContext *s, TCGType type,
676                     TCGReg a0, TCGReg a1, TCGReg a2)
677{
678    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
679}
680
681static const TCGOutOpBinary outop_mul = {
682    .base.static_constraint = C_O1_I2(r, r, r),
683    .out_rrr = tgen_mul,
684};
685
686static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
687{
688    return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented;
689}
690
691static void tgen_muls2(TCGContext *s, TCGType type,
692                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
693{
694    tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3);
695}
696
697static const TCGOutOpMul2 outop_muls2 = {
698    .base.static_constraint = C_Dynamic,
699    .base.dynamic_constraint = cset_mul2,
700    .out_rrrr = tgen_muls2,
701};
702
703static const TCGOutOpBinary outop_mulsh = {
704    .base.static_constraint = C_NotImplemented,
705};
706
707static void tgen_mulu2(TCGContext *s, TCGType type,
708                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
709{
710    tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3);
711}
712
713static const TCGOutOpMul2 outop_mulu2 = {
714    .base.static_constraint = C_Dynamic,
715    .base.dynamic_constraint = cset_mul2,
716    .out_rrrr = tgen_mulu2,
717};
718
719static const TCGOutOpBinary outop_muluh = {
720    .base.static_constraint = C_NotImplemented,
721};
722
723static void tgen_nand(TCGContext *s, TCGType type,
724                     TCGReg a0, TCGReg a1, TCGReg a2)
725{
726    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
727}
728
729static const TCGOutOpBinary outop_nand = {
730    .base.static_constraint = C_O1_I2(r, r, r),
731    .out_rrr = tgen_nand,
732};
733
734static void tgen_nor(TCGContext *s, TCGType type,
735                     TCGReg a0, TCGReg a1, TCGReg a2)
736{
737    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
738}
739
740static const TCGOutOpBinary outop_nor = {
741    .base.static_constraint = C_O1_I2(r, r, r),
742    .out_rrr = tgen_nor,
743};
744
745static void tgen_or(TCGContext *s, TCGType type,
746                     TCGReg a0, TCGReg a1, TCGReg a2)
747{
748    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
749}
750
751static const TCGOutOpBinary outop_or = {
752    .base.static_constraint = C_O1_I2(r, r, r),
753    .out_rrr = tgen_or,
754};
755
756static void tgen_orc(TCGContext *s, TCGType type,
757                     TCGReg a0, TCGReg a1, TCGReg a2)
758{
759    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
760}
761
762static const TCGOutOpBinary outop_orc = {
763    .base.static_constraint = C_O1_I2(r, r, r),
764    .out_rrr = tgen_orc,
765};
766
767static void tgen_rems(TCGContext *s, TCGType type,
768                      TCGReg a0, TCGReg a1, TCGReg a2)
769{
770    TCGOpcode opc = (type == TCG_TYPE_I32
771                     ? INDEX_op_tci_rems32
772                     : INDEX_op_rems);
773    tcg_out_op_rrr(s, opc, a0, a1, a2);
774}
775
776static const TCGOutOpBinary outop_rems = {
777    .base.static_constraint = C_O1_I2(r, r, r),
778    .out_rrr = tgen_rems,
779};
780
781static void tgen_remu(TCGContext *s, TCGType type,
782                      TCGReg a0, TCGReg a1, TCGReg a2)
783{
784    TCGOpcode opc = (type == TCG_TYPE_I32
785                     ? INDEX_op_tci_remu32
786                     : INDEX_op_remu);
787    tcg_out_op_rrr(s, opc, a0, a1, a2);
788}
789
790static const TCGOutOpBinary outop_remu = {
791    .base.static_constraint = C_O1_I2(r, r, r),
792    .out_rrr = tgen_remu,
793};
794
795static void tgen_rotl(TCGContext *s, TCGType type,
796                     TCGReg a0, TCGReg a1, TCGReg a2)
797{
798    TCGOpcode opc = (type == TCG_TYPE_I32
799                     ? INDEX_op_tci_rotl32
800                     : INDEX_op_rotl);
801    tcg_out_op_rrr(s, opc, a0, a1, a2);
802}
803
804static const TCGOutOpBinary outop_rotl = {
805    .base.static_constraint = C_O1_I2(r, r, r),
806    .out_rrr = tgen_rotl,
807};
808
809static void tgen_rotr(TCGContext *s, TCGType type,
810                     TCGReg a0, TCGReg a1, TCGReg a2)
811{
812    TCGOpcode opc = (type == TCG_TYPE_I32
813                     ? INDEX_op_tci_rotr32
814                     : INDEX_op_rotr);
815    tcg_out_op_rrr(s, opc, a0, a1, a2);
816}
817
818static const TCGOutOpBinary outop_rotr = {
819    .base.static_constraint = C_O1_I2(r, r, r),
820    .out_rrr = tgen_rotr,
821};
822
823static void tgen_sar(TCGContext *s, TCGType type,
824                     TCGReg a0, TCGReg a1, TCGReg a2)
825{
826    if (type < TCG_TYPE_REG) {
827        tcg_out_ext32s(s, TCG_REG_TMP, a1);
828        a1 = TCG_REG_TMP;
829    }
830    tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
831}
832
833static const TCGOutOpBinary outop_sar = {
834    .base.static_constraint = C_O1_I2(r, r, r),
835    .out_rrr = tgen_sar,
836};
837
838static void tgen_shl(TCGContext *s, TCGType type,
839                     TCGReg a0, TCGReg a1, TCGReg a2)
840{
841    tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
842}
843
844static const TCGOutOpBinary outop_shl = {
845    .base.static_constraint = C_O1_I2(r, r, r),
846    .out_rrr = tgen_shl,
847};
848
849static void tgen_shr(TCGContext *s, TCGType type,
850                     TCGReg a0, TCGReg a1, TCGReg a2)
851{
852    if (type < TCG_TYPE_REG) {
853        tcg_out_ext32u(s, TCG_REG_TMP, a1);
854        a1 = TCG_REG_TMP;
855    }
856    tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
857}
858
859static const TCGOutOpBinary outop_shr = {
860    .base.static_constraint = C_O1_I2(r, r, r),
861    .out_rrr = tgen_shr,
862};
863
864static void tgen_sub(TCGContext *s, TCGType type,
865                     TCGReg a0, TCGReg a1, TCGReg a2)
866{
867    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
868}
869
870static const TCGOutOpSubtract outop_sub = {
871    .base.static_constraint = C_O1_I2(r, r, r),
872    .out_rrr = tgen_sub,
873};
874
875static void tgen_xor(TCGContext *s, TCGType type,
876                     TCGReg a0, TCGReg a1, TCGReg a2)
877{
878    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
879}
880
881static const TCGOutOpBinary outop_xor = {
882    .base.static_constraint = C_O1_I2(r, r, r),
883    .out_rrr = tgen_xor,
884};
885
886static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
887{
888    tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1);
889}
890
891static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
892{
893    return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented;
894}
895
896static const TCGOutOpUnary outop_ctpop = {
897    .base.static_constraint = C_Dynamic,
898    .base.dynamic_constraint = cset_ctpop,
899    .out_rr = tgen_ctpop,
900};
901
902static void tgen_bswap16(TCGContext *s, TCGType type,
903                         TCGReg a0, TCGReg a1, unsigned flags)
904{
905    tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1);
906    if (flags & TCG_BSWAP_OS) {
907        tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16);
908    }
909}
910
911static const TCGOutOpBswap outop_bswap16 = {
912    .base.static_constraint = C_O1_I1(r, r),
913    .out_rr = tgen_bswap16,
914};
915
916static void tgen_bswap32(TCGContext *s, TCGType type,
917                         TCGReg a0, TCGReg a1, unsigned flags)
918{
919    tcg_out_op_rr(s, INDEX_op_bswap32, a0, a1);
920    if (flags & TCG_BSWAP_OS) {
921        tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32);
922    }
923}
924
925static const TCGOutOpBswap outop_bswap32 = {
926    .base.static_constraint = C_O1_I1(r, r),
927    .out_rr = tgen_bswap32,
928};
929
930#if TCG_TARGET_REG_BITS == 64
931static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
932{
933    tcg_out_op_rr(s, INDEX_op_bswap64, a0, a1);
934}
935
936static const TCGOutOpUnary outop_bswap64 = {
937    .base.static_constraint = C_O1_I1(r, r),
938    .out_rr = tgen_bswap64,
939};
940#endif
941
942static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
943{
944    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
945}
946
947static const TCGOutOpUnary outop_neg = {
948    .base.static_constraint = C_O1_I1(r, r),
949    .out_rr = tgen_neg,
950};
951
952static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
953{
954    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
955}
956
957static const TCGOutOpUnary outop_not = {
958    .base.static_constraint = C_O1_I1(r, r),
959    .out_rr = tgen_not,
960};
961
962static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
963                         TCGReg dest, TCGReg arg1, TCGReg arg2)
964{
965    TCGOpcode opc = (type == TCG_TYPE_I32
966                     ? INDEX_op_tci_setcond32
967                     : INDEX_op_setcond);
968    tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond);
969}
970
971static const TCGOutOpSetcond outop_setcond = {
972    .base.static_constraint = C_O1_I2(r, r, r),
973    .out_rrr = tgen_setcond,
974};
975
976static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
977                            TCGReg dest, TCGReg arg1, TCGReg arg2)
978{
979    tgen_setcond(s, type, cond, dest, arg1, arg2);
980    tgen_neg(s, type, dest, dest);
981}
982
983static const TCGOutOpSetcond outop_negsetcond = {
984    .base.static_constraint = C_O1_I2(r, r, r),
985    .out_rrr = tgen_negsetcond,
986};
987
988static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
989                        TCGReg arg0, TCGReg arg1, TCGLabel *l)
990{
991    tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1);
992    tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
993}
994
995static const TCGOutOpBrcond outop_brcond = {
996    .base.static_constraint = C_O0_I2(r, r),
997    .out_rr = tgen_brcond,
998};
999
1000static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
1001                         TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
1002                         TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
1003{
1004    TCGOpcode opc = (type == TCG_TYPE_I32
1005                     ? INDEX_op_tci_movcond32
1006                     : INDEX_op_movcond);
1007    tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond);
1008}
1009
1010static const TCGOutOpMovcond outop_movcond = {
1011    .base.static_constraint = C_O1_I4(r, r, r, r, r),
1012    .out = tgen_movcond,
1013};
1014
1015static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
1016                         TCGArg bl, bool const_bl,
1017                         TCGArg bh, bool const_bh, TCGLabel *l)
1018{
1019    tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
1020                      al, ah, bl, bh, cond);
1021    tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
1022}
1023
1024#if TCG_TARGET_REG_BITS != 32
1025__attribute__((unused))
1026#endif
1027static const TCGOutOpBrcond2 outop_brcond2 = {
1028    .base.static_constraint = C_O0_I4(r, r, r, r),
1029    .out = tgen_brcond2,
1030};
1031
1032static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
1033                          TCGReg al, TCGReg ah,
1034                          TCGArg bl, bool const_bl,
1035                          TCGArg bh, bool const_bh)
1036{
1037    tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, ret, al, ah, bl, bh, cond);
1038}
1039
1040#if TCG_TARGET_REG_BITS != 32
1041__attribute__((unused))
1042#endif
1043static const TCGOutOpSetcond2 outop_setcond2 = {
1044    .base.static_constraint = C_O1_I4(r, r, r, r, r),
1045    .out = tgen_setcond2,
1046};
1047
1048static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1049                       const TCGArg args[TCG_MAX_OP_ARGS],
1050                       const int const_args[TCG_MAX_OP_ARGS])
1051{
1052    switch (opc) {
1053    case INDEX_op_goto_ptr:
1054        tcg_out_op_r(s, opc, args[0]);
1055        break;
1056
1057    case INDEX_op_br:
1058        tcg_out_op_l(s, opc, arg_label(args[0]));
1059        break;
1060
1061    CASE_32_64(ld8u)
1062    CASE_32_64(ld8s)
1063    CASE_32_64(ld16u)
1064    CASE_32_64(ld16s)
1065    case INDEX_op_ld_i32:
1066    CASE_64(ld32u)
1067    CASE_64(ld32s)
1068    CASE_64(ld)
1069    CASE_32_64(st8)
1070    CASE_32_64(st16)
1071    case INDEX_op_st_i32:
1072    CASE_64(st32)
1073    CASE_64(st)
1074        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
1075        break;
1076
1077    CASE_32_64(deposit)
1078        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
1079        break;
1080
1081    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
1082    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
1083        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
1084        break;
1085
1086    CASE_32_64(add2)
1087    CASE_32_64(sub2)
1088        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
1089                          args[3], args[4], args[5]);
1090        break;
1091
1092    case INDEX_op_qemu_ld_i64:
1093    case INDEX_op_qemu_st_i64:
1094        if (TCG_TARGET_REG_BITS == 32) {
1095            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
1096            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
1097            break;
1098        }
1099        /* fall through */
1100    case INDEX_op_qemu_ld_i32:
1101    case INDEX_op_qemu_st_i32:
1102        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
1103            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
1104            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
1105        } else {
1106            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1107        }
1108        break;
1109
1110    case INDEX_op_mb:
1111        tcg_out_op_v(s, opc);
1112        break;
1113
1114    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1115    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1116    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1117    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
1118    case INDEX_op_extu_i32_i64:
1119    case INDEX_op_extrl_i64_i32:
1120    default:
1121        g_assert_not_reached();
1122    }
1123}
1124
1125static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
1126                       intptr_t offset)
1127{
1128    switch (type) {
1129    case TCG_TYPE_I32:
1130        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
1131        break;
1132#if TCG_TARGET_REG_BITS == 64
1133    case TCG_TYPE_I64:
1134        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
1135        break;
1136#endif
1137    default:
1138        g_assert_not_reached();
1139    }
1140}
1141
1142static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1143                               TCGReg base, intptr_t ofs)
1144{
1145    return false;
1146}
1147
1148/* Test if a constant matches the constraint. */
1149static bool tcg_target_const_match(int64_t val, int ct,
1150                                   TCGType type, TCGCond cond, int vece)
1151{
1152    return ct & TCG_CT_CONST;
1153}
1154
1155static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1156{
1157    memset(p, 0, sizeof(*p) * count);
1158}
1159
1160static void tcg_target_init(TCGContext *s)
1161{
1162    /* The current code uses uint8_t for tcg operations. */
1163    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
1164
1165    /* Registers available for 32 bit operations. */
1166    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
1167    /* Registers available for 64 bit operations. */
1168    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
1169    /*
1170     * The interpreter "registers" are in the local stack frame and
1171     * cannot be clobbered by the called helper functions.  However,
1172     * the interpreter assumes a 128-bit return value and assigns to
1173     * the return value registers.
1174     */
1175    tcg_target_call_clobber_regs =
1176        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1177
1178    s->reserved_regs = 0;
1179    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1180    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1181
1182    /* The call arguments come first, followed by the temp storage. */
1183    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1184                  TCG_STATIC_FRAME_SIZE);
1185}
1186
1187/* Generate global QEMU prologue and epilogue code. */
1188static inline void tcg_target_qemu_prologue(TCGContext *s)
1189{
1190}
1191
1192static void tcg_out_tb_start(TCGContext *s)
1193{
1194    /* nothing to do */
1195}
1196
1197bool tcg_target_has_memory_bswap(MemOp memop)
1198{
1199    return true;
1200}
1201
1202static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1203{
1204    g_assert_not_reached();
1205}
1206
1207static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1208{
1209    g_assert_not_reached();
1210}
1211