xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 07d5d502f2b4a8eedda3c6bdfcab31dc36d1d1d5)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_sextract_i32:
61    case INDEX_op_sextract_i64:
62        return C_O1_I1(r, r);
63
64    case INDEX_op_st8_i32:
65    case INDEX_op_st16_i32:
66    case INDEX_op_st_i32:
67    case INDEX_op_st8_i64:
68    case INDEX_op_st16_i64:
69    case INDEX_op_st32_i64:
70    case INDEX_op_st_i64:
71        return C_O0_I2(r, r);
72
73    case INDEX_op_deposit_i32:
74    case INDEX_op_deposit_i64:
75        return C_O1_I2(r, r, r);
76
77    case INDEX_op_add2_i32:
78    case INDEX_op_add2_i64:
79    case INDEX_op_sub2_i32:
80    case INDEX_op_sub2_i64:
81        return C_O2_I4(r, r, r, r, r, r);
82
83    case INDEX_op_qemu_ld_i32:
84        return C_O1_I1(r, r);
85    case INDEX_op_qemu_ld_i64:
86        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
87    case INDEX_op_qemu_st_i32:
88        return C_O0_I2(r, r);
89    case INDEX_op_qemu_st_i64:
90        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
91
92    default:
93        return C_NotImplemented;
94    }
95}
96
97static const int tcg_target_reg_alloc_order[] = {
98    TCG_REG_R4,
99    TCG_REG_R5,
100    TCG_REG_R6,
101    TCG_REG_R7,
102    TCG_REG_R8,
103    TCG_REG_R9,
104    TCG_REG_R10,
105    TCG_REG_R11,
106    TCG_REG_R12,
107    TCG_REG_R13,
108    TCG_REG_R14,
109    TCG_REG_R15,
110    /* Either 2 or 4 of these are call clobbered, so use them last. */
111    TCG_REG_R3,
112    TCG_REG_R2,
113    TCG_REG_R1,
114    TCG_REG_R0,
115};
116
117/* No call arguments via registers.  All will be stored on the "stack". */
118static const int tcg_target_call_iarg_regs[] = { };
119
120static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
121{
122    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
123    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
124    return TCG_REG_R0 + slot;
125}
126
127#ifdef CONFIG_DEBUG_TCG
128static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
129    "r00",
130    "r01",
131    "r02",
132    "r03",
133    "r04",
134    "r05",
135    "r06",
136    "r07",
137    "r08",
138    "r09",
139    "r10",
140    "r11",
141    "r12",
142    "r13",
143    "r14",
144    "r15",
145};
146#endif
147
148static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
149                        intptr_t value, intptr_t addend)
150{
151    intptr_t diff = value - (intptr_t)(code_ptr + 1);
152
153    tcg_debug_assert(addend == 0);
154    tcg_debug_assert(type == 20);
155
156    if (diff == sextract32(diff, 0, type)) {
157        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
158        return true;
159    }
160    return false;
161}
162
163static void stack_bounds_check(TCGReg base, intptr_t offset)
164{
165    if (base == TCG_REG_CALL_STACK) {
166        tcg_debug_assert(offset >= 0);
167        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
168                                   TCG_STATIC_FRAME_SIZE));
169    }
170}
171
172static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
173{
174    tcg_insn_unit insn = 0;
175
176    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
177    insn = deposit32(insn, 0, 8, op);
178    tcg_out32(s, insn);
179}
180
181static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
182{
183    tcg_insn_unit insn = 0;
184    intptr_t diff;
185
186    /* Special case for exit_tb: map null -> 0. */
187    if (p0 == NULL) {
188        diff = 0;
189    } else {
190        diff = p0 - (void *)(s->code_ptr + 1);
191        tcg_debug_assert(diff != 0);
192        if (diff != sextract32(diff, 0, 20)) {
193            tcg_raise_tb_overflow(s);
194        }
195    }
196    insn = deposit32(insn, 0, 8, op);
197    insn = deposit32(insn, 12, 20, diff);
198    tcg_out32(s, insn);
199}
200
201static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
202{
203    tcg_insn_unit insn = 0;
204
205    insn = deposit32(insn, 0, 8, op);
206    insn = deposit32(insn, 8, 4, r0);
207    tcg_out32(s, insn);
208}
209
210static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
211{
212    tcg_out32(s, (uint8_t)op);
213}
214
215static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
216{
217    tcg_insn_unit insn = 0;
218
219    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
220    insn = deposit32(insn, 0, 8, op);
221    insn = deposit32(insn, 8, 4, r0);
222    insn = deposit32(insn, 12, 20, i1);
223    tcg_out32(s, insn);
224}
225
226static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
227{
228    tcg_insn_unit insn = 0;
229
230    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
231    insn = deposit32(insn, 0, 8, op);
232    insn = deposit32(insn, 8, 4, r0);
233    tcg_out32(s, insn);
234}
235
236static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
237{
238    tcg_insn_unit insn = 0;
239
240    insn = deposit32(insn, 0, 8, op);
241    insn = deposit32(insn, 8, 4, r0);
242    insn = deposit32(insn, 12, 4, r1);
243    tcg_out32(s, insn);
244}
245
246static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
247                           TCGReg r0, TCGReg r1, TCGArg m2)
248{
249    tcg_insn_unit insn = 0;
250
251    tcg_debug_assert(m2 == extract32(m2, 0, 16));
252    insn = deposit32(insn, 0, 8, op);
253    insn = deposit32(insn, 8, 4, r0);
254    insn = deposit32(insn, 12, 4, r1);
255    insn = deposit32(insn, 16, 16, m2);
256    tcg_out32(s, insn);
257}
258
259static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
260                           TCGReg r0, TCGReg r1, TCGReg r2)
261{
262    tcg_insn_unit insn = 0;
263
264    insn = deposit32(insn, 0, 8, op);
265    insn = deposit32(insn, 8, 4, r0);
266    insn = deposit32(insn, 12, 4, r1);
267    insn = deposit32(insn, 16, 4, r2);
268    tcg_out32(s, insn);
269}
270
271static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
272                           TCGReg r0, TCGReg r1, intptr_t i2)
273{
274    tcg_insn_unit insn = 0;
275
276    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
277    insn = deposit32(insn, 0, 8, op);
278    insn = deposit32(insn, 8, 4, r0);
279    insn = deposit32(insn, 12, 4, r1);
280    insn = deposit32(insn, 16, 16, i2);
281    tcg_out32(s, insn);
282}
283
284static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
285                            TCGReg r1, uint8_t b2, uint8_t b3)
286{
287    tcg_insn_unit insn = 0;
288
289    tcg_debug_assert(b2 == extract32(b2, 0, 6));
290    tcg_debug_assert(b3 == extract32(b3, 0, 6));
291    insn = deposit32(insn, 0, 8, op);
292    insn = deposit32(insn, 8, 4, r0);
293    insn = deposit32(insn, 12, 4, r1);
294    insn = deposit32(insn, 16, 6, b2);
295    insn = deposit32(insn, 22, 6, b3);
296    tcg_out32(s, insn);
297}
298
299static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
300                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
301{
302    tcg_insn_unit insn = 0;
303
304    insn = deposit32(insn, 0, 8, op);
305    insn = deposit32(insn, 8, 4, r0);
306    insn = deposit32(insn, 12, 4, r1);
307    insn = deposit32(insn, 16, 4, r2);
308    insn = deposit32(insn, 20, 4, c3);
309    tcg_out32(s, insn);
310}
311
312static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
313                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
314{
315    tcg_insn_unit insn = 0;
316
317    tcg_debug_assert(b3 == extract32(b3, 0, 6));
318    tcg_debug_assert(b4 == extract32(b4, 0, 6));
319    insn = deposit32(insn, 0, 8, op);
320    insn = deposit32(insn, 8, 4, r0);
321    insn = deposit32(insn, 12, 4, r1);
322    insn = deposit32(insn, 16, 4, r2);
323    insn = deposit32(insn, 20, 6, b3);
324    insn = deposit32(insn, 26, 6, b4);
325    tcg_out32(s, insn);
326}
327
328static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
329                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
330{
331    tcg_insn_unit insn = 0;
332
333    insn = deposit32(insn, 0, 8, op);
334    insn = deposit32(insn, 8, 4, r0);
335    insn = deposit32(insn, 12, 4, r1);
336    insn = deposit32(insn, 16, 4, r2);
337    insn = deposit32(insn, 20, 4, r3);
338    tcg_out32(s, insn);
339}
340
341static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
342                              TCGReg r0, TCGReg r1, TCGReg r2,
343                              TCGReg r3, TCGReg r4, TCGCond c5)
344{
345    tcg_insn_unit insn = 0;
346
347    insn = deposit32(insn, 0, 8, op);
348    insn = deposit32(insn, 8, 4, r0);
349    insn = deposit32(insn, 12, 4, r1);
350    insn = deposit32(insn, 16, 4, r2);
351    insn = deposit32(insn, 20, 4, r3);
352    insn = deposit32(insn, 24, 4, r4);
353    insn = deposit32(insn, 28, 4, c5);
354    tcg_out32(s, insn);
355}
356
357static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
358                              TCGReg r0, TCGReg r1, TCGReg r2,
359                              TCGReg r3, TCGReg r4, TCGReg r5)
360{
361    tcg_insn_unit insn = 0;
362
363    insn = deposit32(insn, 0, 8, op);
364    insn = deposit32(insn, 8, 4, r0);
365    insn = deposit32(insn, 12, 4, r1);
366    insn = deposit32(insn, 16, 4, r2);
367    insn = deposit32(insn, 20, 4, r3);
368    insn = deposit32(insn, 24, 4, r4);
369    insn = deposit32(insn, 28, 4, r5);
370    tcg_out32(s, insn);
371}
372
373static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
374                         TCGReg base, intptr_t offset)
375{
376    stack_bounds_check(base, offset);
377    if (offset != sextract32(offset, 0, 16)) {
378        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
379        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
380        base = TCG_REG_TMP;
381        offset = 0;
382    }
383    tcg_out_op_rrs(s, op, val, base, offset);
384}
385
386static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
387                       intptr_t offset)
388{
389    switch (type) {
390    case TCG_TYPE_I32:
391        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
392        break;
393#if TCG_TARGET_REG_BITS == 64
394    case TCG_TYPE_I64:
395        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
396        break;
397#endif
398    default:
399        g_assert_not_reached();
400    }
401}
402
403static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
404{
405    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
406    return true;
407}
408
409static void tcg_out_movi(TCGContext *s, TCGType type,
410                         TCGReg ret, tcg_target_long arg)
411{
412    switch (type) {
413    case TCG_TYPE_I32:
414#if TCG_TARGET_REG_BITS == 64
415        arg = (int32_t)arg;
416        /* fall through */
417    case TCG_TYPE_I64:
418#endif
419        break;
420    default:
421        g_assert_not_reached();
422    }
423
424    if (arg == sextract32(arg, 0, 20)) {
425        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
426    } else {
427        tcg_insn_unit insn = 0;
428
429        new_pool_label(s, arg, 20, s->code_ptr, 0);
430        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
431        insn = deposit32(insn, 8, 4, ret);
432        tcg_out32(s, insn);
433    }
434}
435
436static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
437                            TCGReg rs, unsigned pos, unsigned len)
438{
439    tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len);
440}
441
442static const TCGOutOpExtract outop_extract = {
443    .base.static_constraint = C_O1_I1(r, r),
444    .out_rr = tcg_out_extract,
445};
446
447static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
448                             TCGReg rs, unsigned pos, unsigned len)
449{
450    TCGOpcode opc = type == TCG_TYPE_I32 ?
451                    INDEX_op_sextract_i32 :
452                    INDEX_op_sextract_i64;
453    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
454}
455
456static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
457{
458    tcg_out_sextract(s, type, rd, rs, 0, 8);
459}
460
461static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
462{
463    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
464}
465
466static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
467{
468    tcg_out_sextract(s, type, rd, rs, 0, 16);
469}
470
471static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
472{
473    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
474}
475
476static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
477{
478    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
479    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
480}
481
482static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
483{
484    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
485    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
486}
487
488static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
489{
490    tcg_out_ext32s(s, rd, rs);
491}
492
493static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
494{
495    tcg_out_ext32u(s, rd, rs);
496}
497
498static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
499{
500    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
501    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
502}
503
504static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
505{
506    return false;
507}
508
509static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
510                             tcg_target_long imm)
511{
512    /* This function is only used for passing structs by reference. */
513    g_assert_not_reached();
514}
515
516static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
517                         const TCGHelperInfo *info)
518{
519    ffi_cif *cif = info->cif;
520    tcg_insn_unit insn = 0;
521    uint8_t which;
522
523    if (cif->rtype == &ffi_type_void) {
524        which = 0;
525    } else {
526        tcg_debug_assert(cif->rtype->size == 4 ||
527                         cif->rtype->size == 8 ||
528                         cif->rtype->size == 16);
529        which = ctz32(cif->rtype->size) - 1;
530    }
531    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
532    insn = deposit32(insn, 0, 8, INDEX_op_call);
533    insn = deposit32(insn, 8, 4, which);
534    tcg_out32(s, insn);
535}
536
537#if TCG_TARGET_REG_BITS == 64
538# define CASE_32_64(x) \
539        case glue(glue(INDEX_op_, x), _i64): \
540        case glue(glue(INDEX_op_, x), _i32):
541# define CASE_64(x) \
542        case glue(glue(INDEX_op_, x), _i64):
543#else
544# define CASE_32_64(x) \
545        case glue(glue(INDEX_op_, x), _i32):
546# define CASE_64(x)
547#endif
548
549static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
550{
551    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
552}
553
554static void tcg_out_goto_tb(TCGContext *s, int which)
555{
556    /* indirect jump method. */
557    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
558    set_jmp_reset_offset(s, which);
559}
560
561void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
562                              uintptr_t jmp_rx, uintptr_t jmp_rw)
563{
564    /* Always indirect, nothing to do */
565}
566
567static void tgen_add(TCGContext *s, TCGType type,
568                     TCGReg a0, TCGReg a1, TCGReg a2)
569{
570    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
571}
572
573static const TCGOutOpBinary outop_add = {
574    .base.static_constraint = C_O1_I2(r, r, r),
575    .out_rrr = tgen_add,
576};
577
578static void tgen_and(TCGContext *s, TCGType type,
579                     TCGReg a0, TCGReg a1, TCGReg a2)
580{
581    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
582}
583
584static const TCGOutOpBinary outop_and = {
585    .base.static_constraint = C_O1_I2(r, r, r),
586    .out_rrr = tgen_and,
587};
588
589static void tgen_andc(TCGContext *s, TCGType type,
590                      TCGReg a0, TCGReg a1, TCGReg a2)
591{
592    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
593}
594
595static const TCGOutOpBinary outop_andc = {
596    .base.static_constraint = C_O1_I2(r, r, r),
597    .out_rrr = tgen_andc,
598};
599
600static void tgen_clz(TCGContext *s, TCGType type,
601                      TCGReg a0, TCGReg a1, TCGReg a2)
602{
603    TCGOpcode opc = (type == TCG_TYPE_I32
604                     ? INDEX_op_tci_clz32
605                     : INDEX_op_clz);
606    tcg_out_op_rrr(s, opc, a0, a1, a2);
607}
608
609static const TCGOutOpBinary outop_clz = {
610    .base.static_constraint = C_O1_I2(r, r, r),
611    .out_rrr = tgen_clz,
612};
613
614static void tgen_ctz(TCGContext *s, TCGType type,
615                      TCGReg a0, TCGReg a1, TCGReg a2)
616{
617    TCGOpcode opc = (type == TCG_TYPE_I32
618                     ? INDEX_op_tci_ctz32
619                     : INDEX_op_ctz);
620    tcg_out_op_rrr(s, opc, a0, a1, a2);
621}
622
623static const TCGOutOpBinary outop_ctz = {
624    .base.static_constraint = C_O1_I2(r, r, r),
625    .out_rrr = tgen_ctz,
626};
627
628static void tgen_divs(TCGContext *s, TCGType type,
629                      TCGReg a0, TCGReg a1, TCGReg a2)
630{
631    TCGOpcode opc = (type == TCG_TYPE_I32
632                     ? INDEX_op_tci_divs32
633                     : INDEX_op_divs);
634    tcg_out_op_rrr(s, opc, a0, a1, a2);
635}
636
637static const TCGOutOpBinary outop_divs = {
638    .base.static_constraint = C_O1_I2(r, r, r),
639    .out_rrr = tgen_divs,
640};
641
642static const TCGOutOpDivRem outop_divs2 = {
643    .base.static_constraint = C_NotImplemented,
644};
645
646static void tgen_divu(TCGContext *s, TCGType type,
647                      TCGReg a0, TCGReg a1, TCGReg a2)
648{
649    TCGOpcode opc = (type == TCG_TYPE_I32
650                     ? INDEX_op_tci_divu32
651                     : INDEX_op_divu);
652    tcg_out_op_rrr(s, opc, a0, a1, a2);
653}
654
655static const TCGOutOpBinary outop_divu = {
656    .base.static_constraint = C_O1_I2(r, r, r),
657    .out_rrr = tgen_divu,
658};
659
660static const TCGOutOpDivRem outop_divu2 = {
661    .base.static_constraint = C_NotImplemented,
662};
663
664static void tgen_eqv(TCGContext *s, TCGType type,
665                     TCGReg a0, TCGReg a1, TCGReg a2)
666{
667    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
668}
669
670static const TCGOutOpBinary outop_eqv = {
671    .base.static_constraint = C_O1_I2(r, r, r),
672    .out_rrr = tgen_eqv,
673};
674
675static void tgen_mul(TCGContext *s, TCGType type,
676                     TCGReg a0, TCGReg a1, TCGReg a2)
677{
678    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
679}
680
681static const TCGOutOpBinary outop_mul = {
682    .base.static_constraint = C_O1_I2(r, r, r),
683    .out_rrr = tgen_mul,
684};
685
686static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
687{
688    return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented;
689}
690
691static void tgen_muls2(TCGContext *s, TCGType type,
692                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
693{
694    tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3);
695}
696
697static const TCGOutOpMul2 outop_muls2 = {
698    .base.static_constraint = C_Dynamic,
699    .base.dynamic_constraint = cset_mul2,
700    .out_rrrr = tgen_muls2,
701};
702
703static const TCGOutOpBinary outop_mulsh = {
704    .base.static_constraint = C_NotImplemented,
705};
706
707static void tgen_mulu2(TCGContext *s, TCGType type,
708                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
709{
710    tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3);
711}
712
713static const TCGOutOpMul2 outop_mulu2 = {
714    .base.static_constraint = C_Dynamic,
715    .base.dynamic_constraint = cset_mul2,
716    .out_rrrr = tgen_mulu2,
717};
718
719static const TCGOutOpBinary outop_muluh = {
720    .base.static_constraint = C_NotImplemented,
721};
722
723static void tgen_nand(TCGContext *s, TCGType type,
724                     TCGReg a0, TCGReg a1, TCGReg a2)
725{
726    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
727}
728
729static const TCGOutOpBinary outop_nand = {
730    .base.static_constraint = C_O1_I2(r, r, r),
731    .out_rrr = tgen_nand,
732};
733
734static void tgen_nor(TCGContext *s, TCGType type,
735                     TCGReg a0, TCGReg a1, TCGReg a2)
736{
737    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
738}
739
740static const TCGOutOpBinary outop_nor = {
741    .base.static_constraint = C_O1_I2(r, r, r),
742    .out_rrr = tgen_nor,
743};
744
745static void tgen_or(TCGContext *s, TCGType type,
746                     TCGReg a0, TCGReg a1, TCGReg a2)
747{
748    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
749}
750
751static const TCGOutOpBinary outop_or = {
752    .base.static_constraint = C_O1_I2(r, r, r),
753    .out_rrr = tgen_or,
754};
755
756static void tgen_orc(TCGContext *s, TCGType type,
757                     TCGReg a0, TCGReg a1, TCGReg a2)
758{
759    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
760}
761
762static const TCGOutOpBinary outop_orc = {
763    .base.static_constraint = C_O1_I2(r, r, r),
764    .out_rrr = tgen_orc,
765};
766
767static void tgen_rems(TCGContext *s, TCGType type,
768                      TCGReg a0, TCGReg a1, TCGReg a2)
769{
770    TCGOpcode opc = (type == TCG_TYPE_I32
771                     ? INDEX_op_tci_rems32
772                     : INDEX_op_rems);
773    tcg_out_op_rrr(s, opc, a0, a1, a2);
774}
775
776static const TCGOutOpBinary outop_rems = {
777    .base.static_constraint = C_O1_I2(r, r, r),
778    .out_rrr = tgen_rems,
779};
780
781static void tgen_remu(TCGContext *s, TCGType type,
782                      TCGReg a0, TCGReg a1, TCGReg a2)
783{
784    TCGOpcode opc = (type == TCG_TYPE_I32
785                     ? INDEX_op_tci_remu32
786                     : INDEX_op_remu);
787    tcg_out_op_rrr(s, opc, a0, a1, a2);
788}
789
790static const TCGOutOpBinary outop_remu = {
791    .base.static_constraint = C_O1_I2(r, r, r),
792    .out_rrr = tgen_remu,
793};
794
795static void tgen_rotl(TCGContext *s, TCGType type,
796                     TCGReg a0, TCGReg a1, TCGReg a2)
797{
798    TCGOpcode opc = (type == TCG_TYPE_I32
799                     ? INDEX_op_tci_rotl32
800                     : INDEX_op_rotl);
801    tcg_out_op_rrr(s, opc, a0, a1, a2);
802}
803
804static const TCGOutOpBinary outop_rotl = {
805    .base.static_constraint = C_O1_I2(r, r, r),
806    .out_rrr = tgen_rotl,
807};
808
809static void tgen_rotr(TCGContext *s, TCGType type,
810                     TCGReg a0, TCGReg a1, TCGReg a2)
811{
812    TCGOpcode opc = (type == TCG_TYPE_I32
813                     ? INDEX_op_tci_rotr32
814                     : INDEX_op_rotr);
815    tcg_out_op_rrr(s, opc, a0, a1, a2);
816}
817
818static const TCGOutOpBinary outop_rotr = {
819    .base.static_constraint = C_O1_I2(r, r, r),
820    .out_rrr = tgen_rotr,
821};
822
823static void tgen_sar(TCGContext *s, TCGType type,
824                     TCGReg a0, TCGReg a1, TCGReg a2)
825{
826    if (type < TCG_TYPE_REG) {
827        tcg_out_ext32s(s, TCG_REG_TMP, a1);
828        a1 = TCG_REG_TMP;
829    }
830    tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
831}
832
833static const TCGOutOpBinary outop_sar = {
834    .base.static_constraint = C_O1_I2(r, r, r),
835    .out_rrr = tgen_sar,
836};
837
838static void tgen_shl(TCGContext *s, TCGType type,
839                     TCGReg a0, TCGReg a1, TCGReg a2)
840{
841    tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
842}
843
844static const TCGOutOpBinary outop_shl = {
845    .base.static_constraint = C_O1_I2(r, r, r),
846    .out_rrr = tgen_shl,
847};
848
849static void tgen_shr(TCGContext *s, TCGType type,
850                     TCGReg a0, TCGReg a1, TCGReg a2)
851{
852    if (type < TCG_TYPE_REG) {
853        tcg_out_ext32u(s, TCG_REG_TMP, a1);
854        a1 = TCG_REG_TMP;
855    }
856    tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
857}
858
859static const TCGOutOpBinary outop_shr = {
860    .base.static_constraint = C_O1_I2(r, r, r),
861    .out_rrr = tgen_shr,
862};
863
864static void tgen_sub(TCGContext *s, TCGType type,
865                     TCGReg a0, TCGReg a1, TCGReg a2)
866{
867    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
868}
869
870static const TCGOutOpSubtract outop_sub = {
871    .base.static_constraint = C_O1_I2(r, r, r),
872    .out_rrr = tgen_sub,
873};
874
875static void tgen_xor(TCGContext *s, TCGType type,
876                     TCGReg a0, TCGReg a1, TCGReg a2)
877{
878    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
879}
880
881static const TCGOutOpBinary outop_xor = {
882    .base.static_constraint = C_O1_I2(r, r, r),
883    .out_rrr = tgen_xor,
884};
885
886static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
887{
888    tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1);
889}
890
891static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
892{
893    return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented;
894}
895
896static const TCGOutOpUnary outop_ctpop = {
897    .base.static_constraint = C_Dynamic,
898    .base.dynamic_constraint = cset_ctpop,
899    .out_rr = tgen_ctpop,
900};
901
902static void tgen_bswap16(TCGContext *s, TCGType type,
903                         TCGReg a0, TCGReg a1, unsigned flags)
904{
905    tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1);
906    if (flags & TCG_BSWAP_OS) {
907        tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16);
908    }
909}
910
911static const TCGOutOpBswap outop_bswap16 = {
912    .base.static_constraint = C_O1_I1(r, r),
913    .out_rr = tgen_bswap16,
914};
915
916static void tgen_bswap32(TCGContext *s, TCGType type,
917                         TCGReg a0, TCGReg a1, unsigned flags)
918{
919    tcg_out_op_rr(s, INDEX_op_bswap32, a0, a1);
920    if (flags & TCG_BSWAP_OS) {
921        tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32);
922    }
923}
924
925static const TCGOutOpBswap outop_bswap32 = {
926    .base.static_constraint = C_O1_I1(r, r),
927    .out_rr = tgen_bswap32,
928};
929
930#if TCG_TARGET_REG_BITS == 64
931static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
932{
933    tcg_out_op_rr(s, INDEX_op_bswap64, a0, a1);
934}
935
936static const TCGOutOpUnary outop_bswap64 = {
937    .base.static_constraint = C_O1_I1(r, r),
938    .out_rr = tgen_bswap64,
939};
940#endif
941
942static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
943{
944    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
945}
946
947static const TCGOutOpUnary outop_neg = {
948    .base.static_constraint = C_O1_I1(r, r),
949    .out_rr = tgen_neg,
950};
951
952static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
953{
954    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
955}
956
957static const TCGOutOpUnary outop_not = {
958    .base.static_constraint = C_O1_I1(r, r),
959    .out_rr = tgen_not,
960};
961
962static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
963                         TCGReg dest, TCGReg arg1, TCGReg arg2)
964{
965    TCGOpcode opc = (type == TCG_TYPE_I32
966                     ? INDEX_op_tci_setcond32
967                     : INDEX_op_setcond);
968    tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond);
969}
970
971static const TCGOutOpSetcond outop_setcond = {
972    .base.static_constraint = C_O1_I2(r, r, r),
973    .out_rrr = tgen_setcond,
974};
975
976static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
977                            TCGReg dest, TCGReg arg1, TCGReg arg2)
978{
979    tgen_setcond(s, type, cond, dest, arg1, arg2);
980    tgen_neg(s, type, dest, dest);
981}
982
983static const TCGOutOpSetcond outop_negsetcond = {
984    .base.static_constraint = C_O1_I2(r, r, r),
985    .out_rrr = tgen_negsetcond,
986};
987
988static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
989                        TCGReg arg0, TCGReg arg1, TCGLabel *l)
990{
991    tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1);
992    tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
993}
994
995static const TCGOutOpBrcond outop_brcond = {
996    .base.static_constraint = C_O0_I2(r, r),
997    .out_rr = tgen_brcond,
998};
999
1000static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
1001                         TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2,
1002                         TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf)
1003{
1004    TCGOpcode opc = (type == TCG_TYPE_I32
1005                     ? INDEX_op_tci_movcond32
1006                     : INDEX_op_movcond);
1007    tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond);
1008}
1009
1010static const TCGOutOpMovcond outop_movcond = {
1011    .base.static_constraint = C_O1_I4(r, r, r, r, r),
1012    .out = tgen_movcond,
1013};
1014
1015static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
1016                         TCGArg bl, bool const_bl,
1017                         TCGArg bh, bool const_bh, TCGLabel *l)
1018{
1019    tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
1020                      al, ah, bl, bh, cond);
1021    tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l);
1022}
1023
1024#if TCG_TARGET_REG_BITS != 32
1025__attribute__((unused))
1026#endif
1027static const TCGOutOpBrcond2 outop_brcond2 = {
1028    .base.static_constraint = C_O0_I4(r, r, r, r),
1029    .out = tgen_brcond2,
1030};
1031
1032static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
1033                          TCGReg al, TCGReg ah,
1034                          TCGArg bl, bool const_bl,
1035                          TCGArg bh, bool const_bh)
1036{
1037    tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, ret, al, ah, bl, bh, cond);
1038}
1039
1040#if TCG_TARGET_REG_BITS != 32
1041__attribute__((unused))
1042#endif
1043static const TCGOutOpSetcond2 outop_setcond2 = {
1044    .base.static_constraint = C_O1_I4(r, r, r, r, r),
1045    .out = tgen_setcond2,
1046};
1047
1048static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
1049                       const TCGArg args[TCG_MAX_OP_ARGS],
1050                       const int const_args[TCG_MAX_OP_ARGS])
1051{
1052    switch (opc) {
1053    case INDEX_op_goto_ptr:
1054        tcg_out_op_r(s, opc, args[0]);
1055        break;
1056
1057    case INDEX_op_br:
1058        tcg_out_op_l(s, opc, arg_label(args[0]));
1059        break;
1060
1061    CASE_32_64(ld8u)
1062    CASE_32_64(ld8s)
1063    CASE_32_64(ld16u)
1064    CASE_32_64(ld16s)
1065    case INDEX_op_ld_i32:
1066    CASE_64(ld32u)
1067    CASE_64(ld32s)
1068    CASE_64(ld)
1069    CASE_32_64(st8)
1070    CASE_32_64(st16)
1071    case INDEX_op_st_i32:
1072    CASE_64(st32)
1073    CASE_64(st)
1074        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
1075        break;
1076
1077    CASE_32_64(deposit)
1078        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
1079        break;
1080
1081    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
1082        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
1083        break;
1084
1085    CASE_32_64(add2)
1086    CASE_32_64(sub2)
1087        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
1088                          args[3], args[4], args[5]);
1089        break;
1090
1091    case INDEX_op_qemu_ld_i64:
1092    case INDEX_op_qemu_st_i64:
1093        if (TCG_TARGET_REG_BITS == 32) {
1094            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
1095            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
1096            break;
1097        }
1098        /* fall through */
1099    case INDEX_op_qemu_ld_i32:
1100    case INDEX_op_qemu_st_i32:
1101        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
1102            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
1103            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
1104        } else {
1105            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1106        }
1107        break;
1108
1109    case INDEX_op_mb:
1110        tcg_out_op_v(s, opc);
1111        break;
1112
1113    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1114    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1115    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1116    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
1117    case INDEX_op_extu_i32_i64:
1118    case INDEX_op_extrl_i64_i32:
1119    default:
1120        g_assert_not_reached();
1121    }
1122}
1123
1124static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
1125                       intptr_t offset)
1126{
1127    switch (type) {
1128    case TCG_TYPE_I32:
1129        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
1130        break;
1131#if TCG_TARGET_REG_BITS == 64
1132    case TCG_TYPE_I64:
1133        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
1134        break;
1135#endif
1136    default:
1137        g_assert_not_reached();
1138    }
1139}
1140
1141static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1142                               TCGReg base, intptr_t ofs)
1143{
1144    return false;
1145}
1146
1147/* Test if a constant matches the constraint. */
1148static bool tcg_target_const_match(int64_t val, int ct,
1149                                   TCGType type, TCGCond cond, int vece)
1150{
1151    return ct & TCG_CT_CONST;
1152}
1153
1154static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1155{
1156    memset(p, 0, sizeof(*p) * count);
1157}
1158
1159static void tcg_target_init(TCGContext *s)
1160{
1161    /* The current code uses uint8_t for tcg operations. */
1162    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
1163
1164    /* Registers available for 32 bit operations. */
1165    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
1166    /* Registers available for 64 bit operations. */
1167    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
1168    /*
1169     * The interpreter "registers" are in the local stack frame and
1170     * cannot be clobbered by the called helper functions.  However,
1171     * the interpreter assumes a 128-bit return value and assigns to
1172     * the return value registers.
1173     */
1174    tcg_target_call_clobber_regs =
1175        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1176
1177    s->reserved_regs = 0;
1178    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1179    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1180
1181    /* The call arguments come first, followed by the temp storage. */
1182    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1183                  TCG_STATIC_FRAME_SIZE);
1184}
1185
1186/* Generate global QEMU prologue and epilogue code. */
1187static inline void tcg_target_qemu_prologue(TCGContext *s)
1188{
1189}
1190
1191static void tcg_out_tb_start(TCGContext *s)
1192{
1193    /* nothing to do */
1194}
1195
1196bool tcg_target_has_memory_bswap(MemOp memop)
1197{
1198    return true;
1199}
1200
1201static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1202{
1203    g_assert_not_reached();
1204}
1205
1206static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1207{
1208    g_assert_not_reached();
1209}
1210