xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 4d137ff819bae33d045f13bb9186e3a2c71cb7e4)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58        return C_O1_I1(r, r);
59
60    case INDEX_op_st8_i32:
61    case INDEX_op_st16_i32:
62    case INDEX_op_st_i32:
63    case INDEX_op_st8_i64:
64    case INDEX_op_st16_i64:
65    case INDEX_op_st32_i64:
66    case INDEX_op_st_i64:
67        return C_O0_I2(r, r);
68
69    case INDEX_op_add2_i32:
70    case INDEX_op_add2_i64:
71    case INDEX_op_sub2_i32:
72    case INDEX_op_sub2_i64:
73        return C_O2_I4(r, r, r, r, r, r);
74
75    case INDEX_op_qemu_ld_i32:
76        return C_O1_I1(r, r);
77    case INDEX_op_qemu_ld_i64:
78        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
79    case INDEX_op_qemu_st_i32:
80        return C_O0_I2(r, r);
81    case INDEX_op_qemu_st_i64:
82        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
83
84    default:
85        return C_NotImplemented;
86    }
87}
88
89static const int tcg_target_reg_alloc_order[] = {
90    TCG_REG_R4,
91    TCG_REG_R5,
92    TCG_REG_R6,
93    TCG_REG_R7,
94    TCG_REG_R8,
95    TCG_REG_R9,
96    TCG_REG_R10,
97    TCG_REG_R11,
98    TCG_REG_R12,
99    TCG_REG_R13,
100    TCG_REG_R14,
101    TCG_REG_R15,
102    /* Either 2 or 4 of these are call clobbered, so use them last. */
103    TCG_REG_R3,
104    TCG_REG_R2,
105    TCG_REG_R1,
106    TCG_REG_R0,
107};
108
109/* No call arguments via registers.  All will be stored on the "stack". */
110static const int tcg_target_call_iarg_regs[] = { };
111
112static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
113{
114    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
115    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
116    return TCG_REG_R0 + slot;
117}
118
119#ifdef CONFIG_DEBUG_TCG
120static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
121    "r00",
122    "r01",
123    "r02",
124    "r03",
125    "r04",
126    "r05",
127    "r06",
128    "r07",
129    "r08",
130    "r09",
131    "r10",
132    "r11",
133    "r12",
134    "r13",
135    "r14",
136    "r15",
137};
138#endif
139
140static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
141                        intptr_t value, intptr_t addend)
142{
143    intptr_t diff = value - (intptr_t)(code_ptr + 1);
144
145    tcg_debug_assert(addend == 0);
146    tcg_debug_assert(type == 20);
147
148    if (diff == sextract32(diff, 0, type)) {
149        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
150        return true;
151    }
152    return false;
153}
154
155static void stack_bounds_check(TCGReg base, intptr_t offset)
156{
157    if (base == TCG_REG_CALL_STACK) {
158        tcg_debug_assert(offset >= 0);
159        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
160                                   TCG_STATIC_FRAME_SIZE));
161    }
162}
163
164static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
165{
166    tcg_insn_unit insn = 0;
167
168    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
169    insn = deposit32(insn, 0, 8, op);
170    tcg_out32(s, insn);
171}
172
173static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
174{
175    tcg_insn_unit insn = 0;
176    intptr_t diff;
177
178    /* Special case for exit_tb: map null -> 0. */
179    if (p0 == NULL) {
180        diff = 0;
181    } else {
182        diff = p0 - (void *)(s->code_ptr + 1);
183        tcg_debug_assert(diff != 0);
184        if (diff != sextract32(diff, 0, 20)) {
185            tcg_raise_tb_overflow(s);
186        }
187    }
188    insn = deposit32(insn, 0, 8, op);
189    insn = deposit32(insn, 12, 20, diff);
190    tcg_out32(s, insn);
191}
192
193static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
194{
195    tcg_insn_unit insn = 0;
196
197    insn = deposit32(insn, 0, 8, op);
198    insn = deposit32(insn, 8, 4, r0);
199    tcg_out32(s, insn);
200}
201
202static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
203{
204    tcg_out32(s, (uint8_t)op);
205}
206
207static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
208{
209    tcg_insn_unit insn = 0;
210
211    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
212    insn = deposit32(insn, 0, 8, op);
213    insn = deposit32(insn, 8, 4, r0);
214    insn = deposit32(insn, 12, 20, i1);
215    tcg_out32(s, insn);
216}
217
218static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
219{
220    tcg_insn_unit insn = 0;
221
222    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
223    insn = deposit32(insn, 0, 8, op);
224    insn = deposit32(insn, 8, 4, r0);
225    tcg_out32(s, insn);
226}
227
228static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
229{
230    tcg_insn_unit insn = 0;
231
232    insn = deposit32(insn, 0, 8, op);
233    insn = deposit32(insn, 8, 4, r0);
234    insn = deposit32(insn, 12, 4, r1);
235    tcg_out32(s, insn);
236}
237
238static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
239                           TCGReg r0, TCGReg r1, TCGArg m2)
240{
241    tcg_insn_unit insn = 0;
242
243    tcg_debug_assert(m2 == extract32(m2, 0, 16));
244    insn = deposit32(insn, 0, 8, op);
245    insn = deposit32(insn, 8, 4, r0);
246    insn = deposit32(insn, 12, 4, r1);
247    insn = deposit32(insn, 16, 16, m2);
248    tcg_out32(s, insn);
249}
250
251static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
252                           TCGReg r0, TCGReg r1, TCGReg r2)
253{
254    tcg_insn_unit insn = 0;
255
256    insn = deposit32(insn, 0, 8, op);
257    insn = deposit32(insn, 8, 4, r0);
258    insn = deposit32(insn, 12, 4, r1);
259    insn = deposit32(insn, 16, 4, r2);
260    tcg_out32(s, insn);
261}
262
263static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
264                           TCGReg r0, TCGReg r1, intptr_t i2)
265{
266    tcg_insn_unit insn = 0;
267
268    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
269    insn = deposit32(insn, 0, 8, op);
270    insn = deposit32(insn, 8, 4, r0);
271    insn = deposit32(insn, 12, 4, r1);
272    insn = deposit32(insn, 16, 16, i2);
273    tcg_out32(s, insn);
274}
275
276static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
277                            TCGReg r1, uint8_t b2, uint8_t b3)
278{
279    tcg_insn_unit insn = 0;
280
281    tcg_debug_assert(b2 == extract32(b2, 0, 6));
282    tcg_debug_assert(b3 == extract32(b3, 0, 6));
283    insn = deposit32(insn, 0, 8, op);
284    insn = deposit32(insn, 8, 4, r0);
285    insn = deposit32(insn, 12, 4, r1);
286    insn = deposit32(insn, 16, 6, b2);
287    insn = deposit32(insn, 22, 6, b3);
288    tcg_out32(s, insn);
289}
290
291static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
292                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
293{
294    tcg_insn_unit insn = 0;
295
296    insn = deposit32(insn, 0, 8, op);
297    insn = deposit32(insn, 8, 4, r0);
298    insn = deposit32(insn, 12, 4, r1);
299    insn = deposit32(insn, 16, 4, r2);
300    insn = deposit32(insn, 20, 4, c3);
301    tcg_out32(s, insn);
302}
303
304static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
305                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
306{
307    tcg_insn_unit insn = 0;
308
309    tcg_debug_assert(b3 == extract32(b3, 0, 6));
310    tcg_debug_assert(b4 == extract32(b4, 0, 6));
311    insn = deposit32(insn, 0, 8, op);
312    insn = deposit32(insn, 8, 4, r0);
313    insn = deposit32(insn, 12, 4, r1);
314    insn = deposit32(insn, 16, 4, r2);
315    insn = deposit32(insn, 20, 6, b3);
316    insn = deposit32(insn, 26, 6, b4);
317    tcg_out32(s, insn);
318}
319
320static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
321                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
322{
323    tcg_insn_unit insn = 0;
324
325    insn = deposit32(insn, 0, 8, op);
326    insn = deposit32(insn, 8, 4, r0);
327    insn = deposit32(insn, 12, 4, r1);
328    insn = deposit32(insn, 16, 4, r2);
329    insn = deposit32(insn, 20, 4, r3);
330    tcg_out32(s, insn);
331}
332
333static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
334                              TCGReg r0, TCGReg r1, TCGReg r2,
335                              TCGReg r3, TCGReg r4, TCGCond c5)
336{
337    tcg_insn_unit insn = 0;
338
339    insn = deposit32(insn, 0, 8, op);
340    insn = deposit32(insn, 8, 4, r0);
341    insn = deposit32(insn, 12, 4, r1);
342    insn = deposit32(insn, 16, 4, r2);
343    insn = deposit32(insn, 20, 4, r3);
344    insn = deposit32(insn, 24, 4, r4);
345    insn = deposit32(insn, 28, 4, c5);
346    tcg_out32(s, insn);
347}
348
349static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
350                              TCGReg r0, TCGReg r1, TCGReg r2,
351                              TCGReg r3, TCGReg r4, TCGReg r5)
352{
353    tcg_insn_unit insn = 0;
354
355    insn = deposit32(insn, 0, 8, op);
356    insn = deposit32(insn, 8, 4, r0);
357    insn = deposit32(insn, 12, 4, r1);
358    insn = deposit32(insn, 16, 4, r2);
359    insn = deposit32(insn, 20, 4, r3);
360    insn = deposit32(insn, 24, 4, r4);
361    insn = deposit32(insn, 28, 4, r5);
362    tcg_out32(s, insn);
363}
364
365static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
366                         TCGReg base, intptr_t offset)
367{
368    stack_bounds_check(base, offset);
369    if (offset != sextract32(offset, 0, 16)) {
370        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
371        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
372        base = TCG_REG_TMP;
373        offset = 0;
374    }
375    tcg_out_op_rrs(s, op, val, base, offset);
376}
377
378static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
379                       intptr_t offset)
380{
381    switch (type) {
382    case TCG_TYPE_I32:
383        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
384        break;
385#if TCG_TARGET_REG_BITS == 64
386    case TCG_TYPE_I64:
387        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
388        break;
389#endif
390    default:
391        g_assert_not_reached();
392    }
393}
394
395static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
396{
397    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
398    return true;
399}
400
401static void tcg_out_movi(TCGContext *s, TCGType type,
402                         TCGReg ret, tcg_target_long arg)
403{
404    switch (type) {
405    case TCG_TYPE_I32:
406#if TCG_TARGET_REG_BITS == 64
407        arg = (int32_t)arg;
408        /* fall through */
409    case TCG_TYPE_I64:
410#endif
411        break;
412    default:
413        g_assert_not_reached();
414    }
415
416    if (arg == sextract32(arg, 0, 20)) {
417        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
418    } else {
419        tcg_insn_unit insn = 0;
420
421        new_pool_label(s, arg, 20, s->code_ptr, 0);
422        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
423        insn = deposit32(insn, 8, 4, ret);
424        tcg_out32(s, insn);
425    }
426}
427
428static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
429                            TCGReg rs, unsigned pos, unsigned len)
430{
431    tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len);
432}
433
434static const TCGOutOpExtract outop_extract = {
435    .base.static_constraint = C_O1_I1(r, r),
436    .out_rr = tcg_out_extract,
437};
438
439static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
440                             TCGReg rs, unsigned pos, unsigned len)
441{
442    tcg_out_op_rrbb(s, INDEX_op_sextract, rd, rs, pos, len);
443}
444
445static const TCGOutOpExtract outop_sextract = {
446    .base.static_constraint = C_O1_I1(r, r),
447    .out_rr = tcg_out_sextract,
448};
449
450static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
451{
452    tcg_out_sextract(s, type, rd, rs, 0, 8);
453}
454
455static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
456{
457    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
458}
459
460static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
461{
462    tcg_out_sextract(s, type, rd, rs, 0, 16);
463}
464
465static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
466{
467    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
468}
469
470static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
471{
472    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
473    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
474}
475
476static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
477{
478    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
479    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
480}
481
482static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
483{
484    tcg_out_ext32s(s, rd, rs);
485}
486
487static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
488{
489    tcg_out_ext32u(s, rd, rs);
490}
491
492static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
493{
494    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
495    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
496}
497
498static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
499{
500    return false;
501}
502
503static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
504                             tcg_target_long imm)
505{
506    /* This function is only used for passing structs by reference. */
507    g_assert_not_reached();
508}
509
510static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
511                         const TCGHelperInfo *info)
512{
513    ffi_cif *cif = info->cif;
514    tcg_insn_unit insn = 0;
515    uint8_t which;
516
517    if (cif->rtype == &ffi_type_void) {
518        which = 0;
519    } else {
520        tcg_debug_assert(cif->rtype->size == 4 ||
521                         cif->rtype->size == 8 ||
522                         cif->rtype->size == 16);
523        which = ctz32(cif->rtype->size) - 1;
524    }
525    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
526    insn = deposit32(insn, 0, 8, INDEX_op_call);
527    insn = deposit32(insn, 8, 4, which);
528    tcg_out32(s, insn);
529}
530
531#if TCG_TARGET_REG_BITS == 64
532# define CASE_32_64(x) \
533        case glue(glue(INDEX_op_, x), _i64): \
534        case glue(glue(INDEX_op_, x), _i32):
535# define CASE_64(x) \
536        case glue(glue(INDEX_op_, x), _i64):
537#else
538# define CASE_32_64(x) \
539        case glue(glue(INDEX_op_, x), _i32):
540# define CASE_64(x)
541#endif
542
543static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
544{
545    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
546}
547
548static void tcg_out_goto_tb(TCGContext *s, int which)
549{
550    /* indirect jump method. */
551    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
552    set_jmp_reset_offset(s, which);
553}
554
555void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
556                              uintptr_t jmp_rx, uintptr_t jmp_rw)
557{
558    /* Always indirect, nothing to do */
559}
560
561static void tgen_add(TCGContext *s, TCGType type,
562                     TCGReg a0, TCGReg a1, TCGReg a2)
563{
564    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
565}
566
567static const TCGOutOpBinary outop_add = {
568    .base.static_constraint = C_O1_I2(r, r, r),
569    .out_rrr = tgen_add,
570};
571
572static void tgen_and(TCGContext *s, TCGType type,
573                     TCGReg a0, TCGReg a1, TCGReg a2)
574{
575    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
576}
577
578static const TCGOutOpBinary outop_and = {
579    .base.static_constraint = C_O1_I2(r, r, r),
580    .out_rrr = tgen_and,
581};
582
583static void tgen_andc(TCGContext *s, TCGType type,
584                      TCGReg a0, TCGReg a1, TCGReg a2)
585{
586    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
587}
588
589static const TCGOutOpBinary outop_andc = {
590    .base.static_constraint = C_O1_I2(r, r, r),
591    .out_rrr = tgen_andc,
592};
593
594static void tgen_clz(TCGContext *s, TCGType type,
595                      TCGReg a0, TCGReg a1, TCGReg a2)
596{
597    TCGOpcode opc = (type == TCG_TYPE_I32
598                     ? INDEX_op_tci_clz32
599                     : INDEX_op_clz);
600    tcg_out_op_rrr(s, opc, a0, a1, a2);
601}
602
603static const TCGOutOpBinary outop_clz = {
604    .base.static_constraint = C_O1_I2(r, r, r),
605    .out_rrr = tgen_clz,
606};
607
608static void tgen_ctz(TCGContext *s, TCGType type,
609                      TCGReg a0, TCGReg a1, TCGReg a2)
610{
611    TCGOpcode opc = (type == TCG_TYPE_I32
612                     ? INDEX_op_tci_ctz32
613                     : INDEX_op_ctz);
614    tcg_out_op_rrr(s, opc, a0, a1, a2);
615}
616
617static const TCGOutOpBinary outop_ctz = {
618    .base.static_constraint = C_O1_I2(r, r, r),
619    .out_rrr = tgen_ctz,
620};
621
622static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
623                         TCGReg a2, unsigned ofs, unsigned len)
624{
625    tcg_out_op_rrrbb(s, INDEX_op_deposit, a0, a1, a2, ofs, len);
626}
627
628static const TCGOutOpDeposit outop_deposit = {
629    .base.static_constraint = C_O1_I2(r, r, r),
630    .out_rrr = tgen_deposit,
631};
632
633static void tgen_divs(TCGContext *s, TCGType type,
634                      TCGReg a0, TCGReg a1, TCGReg a2)
635{
636    TCGOpcode opc = (type == TCG_TYPE_I32
637                     ? INDEX_op_tci_divs32
638                     : INDEX_op_divs);
639    tcg_out_op_rrr(s, opc, a0, a1, a2);
640}
641
642static const TCGOutOpBinary outop_divs = {
643    .base.static_constraint = C_O1_I2(r, r, r),
644    .out_rrr = tgen_divs,
645};
646
647static const TCGOutOpDivRem outop_divs2 = {
648    .base.static_constraint = C_NotImplemented,
649};
650
651static void tgen_divu(TCGContext *s, TCGType type,
652                      TCGReg a0, TCGReg a1, TCGReg a2)
653{
654    TCGOpcode opc = (type == TCG_TYPE_I32
655                     ? INDEX_op_tci_divu32
656                     : INDEX_op_divu);
657    tcg_out_op_rrr(s, opc, a0, a1, a2);
658}
659
660static const TCGOutOpBinary outop_divu = {
661    .base.static_constraint = C_O1_I2(r, r, r),
662    .out_rrr = tgen_divu,
663};
664
665static const TCGOutOpDivRem outop_divu2 = {
666    .base.static_constraint = C_NotImplemented,
667};
668
669static void tgen_eqv(TCGContext *s, TCGType type,
670                     TCGReg a0, TCGReg a1, TCGReg a2)
671{
672    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
673}
674
675static const TCGOutOpBinary outop_eqv = {
676    .base.static_constraint = C_O1_I2(r, r, r),
677    .out_rrr = tgen_eqv,
678};
679
680#if TCG_TARGET_REG_BITS == 64
681static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
682{
683    tcg_out_extract(s, TCG_TYPE_I64, a0, a1, 32, 32);
684}
685
686static const TCGOutOpUnary outop_extrh_i64_i32 = {
687    .base.static_constraint = C_O1_I1(r, r),
688    .out_rr = tgen_extrh_i64_i32,
689};
690#endif
691
692static void tgen_mul(TCGContext *s, TCGType type,
693                     TCGReg a0, TCGReg a1, TCGReg a2)
694{
695    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
696}
697
698static const TCGOutOpBinary outop_mul = {
699    .base.static_constraint = C_O1_I2(r, r, r),
700    .out_rrr = tgen_mul,
701};
702
703static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
704{
705    return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented;
706}
707
708static void tgen_muls2(TCGContext *s, TCGType type,
709                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
710{
711    tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3);
712}
713
714static const TCGOutOpMul2 outop_muls2 = {
715    .base.static_constraint = C_Dynamic,
716    .base.dynamic_constraint = cset_mul2,
717    .out_rrrr = tgen_muls2,
718};
719
720static const TCGOutOpBinary outop_mulsh = {
721    .base.static_constraint = C_NotImplemented,
722};
723
724static void tgen_mulu2(TCGContext *s, TCGType type,
725                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
726{
727    tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3);
728}
729
730static const TCGOutOpMul2 outop_mulu2 = {
731    .base.static_constraint = C_Dynamic,
732    .base.dynamic_constraint = cset_mul2,
733    .out_rrrr = tgen_mulu2,
734};
735
736static const TCGOutOpBinary outop_muluh = {
737    .base.static_constraint = C_NotImplemented,
738};
739
740static void tgen_nand(TCGContext *s, TCGType type,
741                     TCGReg a0, TCGReg a1, TCGReg a2)
742{
743    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
744}
745
746static const TCGOutOpBinary outop_nand = {
747    .base.static_constraint = C_O1_I2(r, r, r),
748    .out_rrr = tgen_nand,
749};
750
751static void tgen_nor(TCGContext *s, TCGType type,
752                     TCGReg a0, TCGReg a1, TCGReg a2)
753{
754    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
755}
756
757static const TCGOutOpBinary outop_nor = {
758    .base.static_constraint = C_O1_I2(r, r, r),
759    .out_rrr = tgen_nor,
760};
761
762static void tgen_or(TCGContext *s, TCGType type,
763                     TCGReg a0, TCGReg a1, TCGReg a2)
764{
765    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
766}
767
768static const TCGOutOpBinary outop_or = {
769    .base.static_constraint = C_O1_I2(r, r, r),
770    .out_rrr = tgen_or,
771};
772
773static void tgen_orc(TCGContext *s, TCGType type,
774                     TCGReg a0, TCGReg a1, TCGReg a2)
775{
776    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
777}
778
779static const TCGOutOpBinary outop_orc = {
780    .base.static_constraint = C_O1_I2(r, r, r),
781    .out_rrr = tgen_orc,
782};
783
784static void tgen_rems(TCGContext *s, TCGType type,
785                      TCGReg a0, TCGReg a1, TCGReg a2)
786{
787    TCGOpcode opc = (type == TCG_TYPE_I32
788                     ? INDEX_op_tci_rems32
789                     : INDEX_op_rems);
790    tcg_out_op_rrr(s, opc, a0, a1, a2);
791}
792
793static const TCGOutOpBinary outop_rems = {
794    .base.static_constraint = C_O1_I2(r, r, r),
795    .out_rrr = tgen_rems,
796};
797
798static void tgen_remu(TCGContext *s, TCGType type,
799                      TCGReg a0, TCGReg a1, TCGReg a2)
800{
801    TCGOpcode opc = (type == TCG_TYPE_I32
802                     ? INDEX_op_tci_remu32
803                     : INDEX_op_remu);
804    tcg_out_op_rrr(s, opc, a0, a1, a2);
805}
806
807static const TCGOutOpBinary outop_remu = {
808    .base.static_constraint = C_O1_I2(r, r, r),
809    .out_rrr = tgen_remu,
810};
811
812static void tgen_rotl(TCGContext *s, TCGType type,
813                     TCGReg a0, TCGReg a1, TCGReg a2)
814{
815    TCGOpcode opc = (type == TCG_TYPE_I32
816                     ? INDEX_op_tci_rotl32
817                     : INDEX_op_rotl);
818    tcg_out_op_rrr(s, opc, a0, a1, a2);
819}
820
821static const TCGOutOpBinary outop_rotl = {
822    .base.static_constraint = C_O1_I2(r, r, r),
823    .out_rrr = tgen_rotl,
824};
825
826static void tgen_rotr(TCGContext *s, TCGType type,
827                     TCGReg a0, TCGReg a1, TCGReg a2)
828{
829    TCGOpcode opc = (type == TCG_TYPE_I32
830                     ? INDEX_op_tci_rotr32
831                     : INDEX_op_rotr);
832    tcg_out_op_rrr(s, opc, a0, a1, a2);
833}
834
835static const TCGOutOpBinary outop_rotr = {
836    .base.static_constraint = C_O1_I2(r, r, r),
837    .out_rrr = tgen_rotr,
838};
839
840static void tgen_sar(TCGContext *s, TCGType type,
841                     TCGReg a0, TCGReg a1, TCGReg a2)
842{
843    if (type < TCG_TYPE_REG) {
844        tcg_out_ext32s(s, TCG_REG_TMP, a1);
845        a1 = TCG_REG_TMP;
846    }
847    tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
848}
849
850static const TCGOutOpBinary outop_sar = {
851    .base.static_constraint = C_O1_I2(r, r, r),
852    .out_rrr = tgen_sar,
853};
854
855static void tgen_shl(TCGContext *s, TCGType type,
856                     TCGReg a0, TCGReg a1, TCGReg a2)
857{
858    tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
859}
860
861static const TCGOutOpBinary outop_shl = {
862    .base.static_constraint = C_O1_I2(r, r, r),
863    .out_rrr = tgen_shl,
864};
865
866static void tgen_shr(TCGContext *s, TCGType type,
867                     TCGReg a0, TCGReg a1, TCGReg a2)
868{
869    if (type < TCG_TYPE_REG) {
870        tcg_out_ext32u(s, TCG_REG_TMP, a1);
871        a1 = TCG_REG_TMP;
872    }
873    tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
874}
875
876static const TCGOutOpBinary outop_shr = {
877    .base.static_constraint = C_O1_I2(r, r, r),
878    .out_rrr = tgen_shr,
879};
880
881static void tgen_sub(TCGContext *s, TCGType type,
882                     TCGReg a0, TCGReg a1, TCGReg a2)
883{
884    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
885}
886
887static const TCGOutOpSubtract outop_sub = {
888    .base.static_constraint = C_O1_I2(r, r, r),
889    .out_rrr = tgen_sub,
890};
891
892static void tgen_xor(TCGContext *s, TCGType type,
893                     TCGReg a0, TCGReg a1, TCGReg a2)
894{
895    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
896}
897
898static const TCGOutOpBinary outop_xor = {
899    .base.static_constraint = C_O1_I2(r, r, r),
900    .out_rrr = tgen_xor,
901};
902
903static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
904{
905    tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1);
906}
907
908static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
909{
910    return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented;
911}
912
913static const TCGOutOpUnary outop_ctpop = {
914    .base.static_constraint = C_Dynamic,
915    .base.dynamic_constraint = cset_ctpop,
916    .out_rr = tgen_ctpop,
917};
918
919static void tgen_bswap16(TCGContext *s, TCGType type,
920                         TCGReg a0, TCGReg a1, unsigned flags)
921{
922    tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1);
923    if (flags & TCG_BSWAP_OS) {
924        tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16);
925    }
926}
927
928static const TCGOutOpBswap outop_bswap16 = {
929    .base.static_constraint = C_O1_I1(r, r),
930    .out_rr = tgen_bswap16,
931};
932
933static void tgen_bswap32(TCGContext *s, TCGType type,
934                         TCGReg a0, TCGReg a1, unsigned flags)
935{
936    tcg_out_op_rr(s, INDEX_op_bswap32, a0, a1);
937    if (flags & TCG_BSWAP_OS) {
938        tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32);
939    }
940}
941
942static const TCGOutOpBswap outop_bswap32 = {
943    .base.static_constraint = C_O1_I1(r, r),
944    .out_rr = tgen_bswap32,
945};
946
947#if TCG_TARGET_REG_BITS == 64
948static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
949{
950    tcg_out_op_rr(s, INDEX_op_bswap64, a0, a1);
951}
952
953static const TCGOutOpUnary outop_bswap64 = {
954    .base.static_constraint = C_O1_I1(r, r),
955    .out_rr = tgen_bswap64,
956};
957#endif
958
959static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
960{
961    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
962}
963
964static const TCGOutOpUnary outop_neg = {
965    .base.static_constraint = C_O1_I1(r, r),
966    .out_rr = tgen_neg,
967};
968
969static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
970{
971    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
972}
973
974static const TCGOutOpUnary outop_not = {
975    .base.static_constraint = C_O1_I1(r, r),
976    .out_rr = tgen_not,
977};
978
979static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
980                         TCGReg dest, TCGReg arg1, TCGReg arg2)
981{
982    TCGOpcode opc = (type == TCG_TYPE_I32
983                     ? INDEX_op_tci_setcond32
984                     : INDEX_op_setcond);
985    tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond);
986}
987
988static const TCGOutOpSetcond outop_setcond = {
989    .base.static_constraint = C_O1_I2(r, r, r),
990    .out_rrr = tgen_setcond,
991};
992
993static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
994                            TCGReg dest, TCGReg arg1, TCGReg arg2)
995{
996    tgen_setcond(s, type, cond, dest, arg1, arg2);
997    tgen_neg(s, type, dest, dest);
998}
999
1000static const TCGOutOpSetcond outop_negsetcond = {
1001    .base.static_constraint = C_O1_I2(r, r, r),
1002    .out_rrr = tgen_negsetcond,
1003};
1004
1005static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
1006                        TCGReg arg0, TCGReg arg1, TCGLabel *l)
1007{
1008    tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1);
1009    tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
1010}
1011
1012static const TCGOutOpBrcond outop_brcond = {
1013    .base.static_constraint = C_O0_I2(r, r),
1014    .out_rr = tgen_brcond,
1015};
1016
1017static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
1018                         TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
1019                         TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
1020{
1021    TCGOpcode opc = (type == TCG_TYPE_I32
1022                     ? INDEX_op_tci_movcond32
1023                     : INDEX_op_movcond);
1024    tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond);
1025}
1026
1027static const TCGOutOpMovcond outop_movcond = {
1028    .base.static_constraint = C_O1_I4(r, r, r, r, r),
1029    .out = tgen_movcond,
1030};
1031
1032static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
1033                         TCGArg bl, bool const_bl,
1034                         TCGArg bh, bool const_bh, TCGLabel *l)
1035{
1036    tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
1037                      al, ah, bl, bh, cond);
1038    tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
1039}
1040
1041#if TCG_TARGET_REG_BITS != 32
1042__attribute__((unused))
1043#endif
1044static const TCGOutOpBrcond2 outop_brcond2 = {
1045    .base.static_constraint = C_O0_I4(r, r, r, r),
1046    .out = tgen_brcond2,
1047};
1048
1049static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
1050                          TCGReg al, TCGReg ah,
1051                          TCGArg bl, bool const_bl,
1052                          TCGArg bh, bool const_bh)
1053{
1054    tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, ret, al, ah, bl, bh, cond);
1055}
1056
1057#if TCG_TARGET_REG_BITS != 32
1058__attribute__((unused))
1059#endif
1060static const TCGOutOpSetcond2 outop_setcond2 = {
1061    .base.static_constraint = C_O1_I4(r, r, r, r, r),
1062    .out = tgen_setcond2,
1063};
1064
1065static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1066                       const TCGArg args[TCG_MAX_OP_ARGS],
1067                       const int const_args[TCG_MAX_OP_ARGS])
1068{
1069    switch (opc) {
1070    case INDEX_op_goto_ptr:
1071        tcg_out_op_r(s, opc, args[0]);
1072        break;
1073
1074    case INDEX_op_br:
1075        tcg_out_op_l(s, opc, arg_label(args[0]));
1076        break;
1077
1078    CASE_32_64(ld8u)
1079    CASE_32_64(ld8s)
1080    CASE_32_64(ld16u)
1081    CASE_32_64(ld16s)
1082    case INDEX_op_ld_i32:
1083    CASE_64(ld32u)
1084    CASE_64(ld32s)
1085    CASE_64(ld)
1086    CASE_32_64(st8)
1087    CASE_32_64(st16)
1088    case INDEX_op_st_i32:
1089    CASE_64(st32)
1090    CASE_64(st)
1091        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
1092        break;
1093
1094    CASE_32_64(add2)
1095    CASE_32_64(sub2)
1096        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
1097                          args[3], args[4], args[5]);
1098        break;
1099
1100    case INDEX_op_qemu_ld_i64:
1101    case INDEX_op_qemu_st_i64:
1102        if (TCG_TARGET_REG_BITS == 32) {
1103            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
1104            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
1105            break;
1106        }
1107        /* fall through */
1108    case INDEX_op_qemu_ld_i32:
1109    case INDEX_op_qemu_st_i32:
1110        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
1111            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
1112            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
1113        } else {
1114            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1115        }
1116        break;
1117
1118    case INDEX_op_mb:
1119        tcg_out_op_v(s, opc);
1120        break;
1121
1122    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1123    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1124    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1125    default:
1126        g_assert_not_reached();
1127    }
1128}
1129
1130static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
1131                       intptr_t offset)
1132{
1133    switch (type) {
1134    case TCG_TYPE_I32:
1135        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
1136        break;
1137#if TCG_TARGET_REG_BITS == 64
1138    case TCG_TYPE_I64:
1139        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
1140        break;
1141#endif
1142    default:
1143        g_assert_not_reached();
1144    }
1145}
1146
1147static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1148                               TCGReg base, intptr_t ofs)
1149{
1150    return false;
1151}
1152
1153/* Test if a constant matches the constraint. */
1154static bool tcg_target_const_match(int64_t val, int ct,
1155                                   TCGType type, TCGCond cond, int vece)
1156{
1157    return ct & TCG_CT_CONST;
1158}
1159
1160static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1161{
1162    memset(p, 0, sizeof(*p) * count);
1163}
1164
1165static void tcg_target_init(TCGContext *s)
1166{
1167    /* The current code uses uint8_t for tcg operations. */
1168    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
1169
1170    /* Registers available for 32 bit operations. */
1171    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
1172    /* Registers available for 64 bit operations. */
1173    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
1174    /*
1175     * The interpreter "registers" are in the local stack frame and
1176     * cannot be clobbered by the called helper functions.  However,
1177     * the interpreter assumes a 128-bit return value and assigns to
1178     * the return value registers.
1179     */
1180    tcg_target_call_clobber_regs =
1181        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1182
1183    s->reserved_regs = 0;
1184    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1185    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1186
1187    /* The call arguments come first, followed by the temp storage. */
1188    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1189                  TCG_STATIC_FRAME_SIZE);
1190}
1191
1192/* Generate global QEMU prologue and epilogue code. */
1193static inline void tcg_target_qemu_prologue(TCGContext *s)
1194{
1195}
1196
1197static void tcg_out_tb_start(TCGContext *s)
1198{
1199    /* nothing to do */
1200}
1201
1202bool tcg_target_has_memory_bswap(MemOp memop)
1203{
1204    return true;
1205}
1206
1207static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1208{
1209    g_assert_not_reached();
1210}
1211
1212static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1213{
1214    g_assert_not_reached();
1215}
1216