xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision d776198cd31d1578c4b0239dc80cb2841e86f2f8)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69        return C_O1_I1(r, r);
70
71    case INDEX_op_st8_i32:
72    case INDEX_op_st16_i32:
73    case INDEX_op_st_i32:
74    case INDEX_op_st8_i64:
75    case INDEX_op_st16_i64:
76    case INDEX_op_st32_i64:
77    case INDEX_op_st_i64:
78        return C_O0_I2(r, r);
79
80    case INDEX_op_setcond_i32:
81    case INDEX_op_setcond_i64:
82    case INDEX_op_deposit_i32:
83    case INDEX_op_deposit_i64:
84        return C_O1_I2(r, r, r);
85
86    case INDEX_op_brcond_i32:
87    case INDEX_op_brcond_i64:
88        return C_O0_I2(r, r);
89
90    case INDEX_op_add2_i32:
91    case INDEX_op_add2_i64:
92    case INDEX_op_sub2_i32:
93    case INDEX_op_sub2_i64:
94        return C_O2_I4(r, r, r, r, r, r);
95
96#if TCG_TARGET_REG_BITS == 32
97    case INDEX_op_brcond2_i32:
98        return C_O0_I4(r, r, r, r);
99#endif
100
101    case INDEX_op_movcond_i32:
102    case INDEX_op_movcond_i64:
103    case INDEX_op_setcond2_i32:
104        return C_O1_I4(r, r, r, r, r);
105
106    case INDEX_op_qemu_ld_i32:
107        return C_O1_I1(r, r);
108    case INDEX_op_qemu_ld_i64:
109        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
110    case INDEX_op_qemu_st_i32:
111        return C_O0_I2(r, r);
112    case INDEX_op_qemu_st_i64:
113        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
114
115    default:
116        return C_NotImplemented;
117    }
118}
119
120static const int tcg_target_reg_alloc_order[] = {
121    TCG_REG_R4,
122    TCG_REG_R5,
123    TCG_REG_R6,
124    TCG_REG_R7,
125    TCG_REG_R8,
126    TCG_REG_R9,
127    TCG_REG_R10,
128    TCG_REG_R11,
129    TCG_REG_R12,
130    TCG_REG_R13,
131    TCG_REG_R14,
132    TCG_REG_R15,
133    /* Either 2 or 4 of these are call clobbered, so use them last. */
134    TCG_REG_R3,
135    TCG_REG_R2,
136    TCG_REG_R1,
137    TCG_REG_R0,
138};
139
140/* No call arguments via registers.  All will be stored on the "stack". */
141static const int tcg_target_call_iarg_regs[] = { };
142
143static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
144{
145    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
146    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
147    return TCG_REG_R0 + slot;
148}
149
150#ifdef CONFIG_DEBUG_TCG
151static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
152    "r00",
153    "r01",
154    "r02",
155    "r03",
156    "r04",
157    "r05",
158    "r06",
159    "r07",
160    "r08",
161    "r09",
162    "r10",
163    "r11",
164    "r12",
165    "r13",
166    "r14",
167    "r15",
168};
169#endif
170
171static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
172                        intptr_t value, intptr_t addend)
173{
174    intptr_t diff = value - (intptr_t)(code_ptr + 1);
175
176    tcg_debug_assert(addend == 0);
177    tcg_debug_assert(type == 20);
178
179    if (diff == sextract32(diff, 0, type)) {
180        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
181        return true;
182    }
183    return false;
184}
185
186static void stack_bounds_check(TCGReg base, intptr_t offset)
187{
188    if (base == TCG_REG_CALL_STACK) {
189        tcg_debug_assert(offset >= 0);
190        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
191                                   TCG_STATIC_FRAME_SIZE));
192    }
193}
194
195static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
196{
197    tcg_insn_unit insn = 0;
198
199    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
200    insn = deposit32(insn, 0, 8, op);
201    tcg_out32(s, insn);
202}
203
204static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
205{
206    tcg_insn_unit insn = 0;
207    intptr_t diff;
208
209    /* Special case for exit_tb: map null -> 0. */
210    if (p0 == NULL) {
211        diff = 0;
212    } else {
213        diff = p0 - (void *)(s->code_ptr + 1);
214        tcg_debug_assert(diff != 0);
215        if (diff != sextract32(diff, 0, 20)) {
216            tcg_raise_tb_overflow(s);
217        }
218    }
219    insn = deposit32(insn, 0, 8, op);
220    insn = deposit32(insn, 12, 20, diff);
221    tcg_out32(s, insn);
222}
223
224static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
225{
226    tcg_insn_unit insn = 0;
227
228    insn = deposit32(insn, 0, 8, op);
229    insn = deposit32(insn, 8, 4, r0);
230    tcg_out32(s, insn);
231}
232
233static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
234{
235    tcg_out32(s, (uint8_t)op);
236}
237
238static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
239{
240    tcg_insn_unit insn = 0;
241
242    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
243    insn = deposit32(insn, 0, 8, op);
244    insn = deposit32(insn, 8, 4, r0);
245    insn = deposit32(insn, 12, 20, i1);
246    tcg_out32(s, insn);
247}
248
249static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
250{
251    tcg_insn_unit insn = 0;
252
253    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
254    insn = deposit32(insn, 0, 8, op);
255    insn = deposit32(insn, 8, 4, r0);
256    tcg_out32(s, insn);
257}
258
259static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
260{
261    tcg_insn_unit insn = 0;
262
263    insn = deposit32(insn, 0, 8, op);
264    insn = deposit32(insn, 8, 4, r0);
265    insn = deposit32(insn, 12, 4, r1);
266    tcg_out32(s, insn);
267}
268
269static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
270                           TCGReg r0, TCGReg r1, TCGArg m2)
271{
272    tcg_insn_unit insn = 0;
273
274    tcg_debug_assert(m2 == extract32(m2, 0, 16));
275    insn = deposit32(insn, 0, 8, op);
276    insn = deposit32(insn, 8, 4, r0);
277    insn = deposit32(insn, 12, 4, r1);
278    insn = deposit32(insn, 16, 16, m2);
279    tcg_out32(s, insn);
280}
281
282static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
283                           TCGReg r0, TCGReg r1, TCGReg r2)
284{
285    tcg_insn_unit insn = 0;
286
287    insn = deposit32(insn, 0, 8, op);
288    insn = deposit32(insn, 8, 4, r0);
289    insn = deposit32(insn, 12, 4, r1);
290    insn = deposit32(insn, 16, 4, r2);
291    tcg_out32(s, insn);
292}
293
294static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
295                           TCGReg r0, TCGReg r1, intptr_t i2)
296{
297    tcg_insn_unit insn = 0;
298
299    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
300    insn = deposit32(insn, 0, 8, op);
301    insn = deposit32(insn, 8, 4, r0);
302    insn = deposit32(insn, 12, 4, r1);
303    insn = deposit32(insn, 16, 16, i2);
304    tcg_out32(s, insn);
305}
306
307static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
308                            TCGReg r1, uint8_t b2, uint8_t b3)
309{
310    tcg_insn_unit insn = 0;
311
312    tcg_debug_assert(b2 == extract32(b2, 0, 6));
313    tcg_debug_assert(b3 == extract32(b3, 0, 6));
314    insn = deposit32(insn, 0, 8, op);
315    insn = deposit32(insn, 8, 4, r0);
316    insn = deposit32(insn, 12, 4, r1);
317    insn = deposit32(insn, 16, 6, b2);
318    insn = deposit32(insn, 22, 6, b3);
319    tcg_out32(s, insn);
320}
321
322static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
323                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
324{
325    tcg_insn_unit insn = 0;
326
327    insn = deposit32(insn, 0, 8, op);
328    insn = deposit32(insn, 8, 4, r0);
329    insn = deposit32(insn, 12, 4, r1);
330    insn = deposit32(insn, 16, 4, r2);
331    insn = deposit32(insn, 20, 4, c3);
332    tcg_out32(s, insn);
333}
334
335static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
336                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
337{
338    tcg_insn_unit insn = 0;
339
340    tcg_debug_assert(b3 == extract32(b3, 0, 6));
341    tcg_debug_assert(b4 == extract32(b4, 0, 6));
342    insn = deposit32(insn, 0, 8, op);
343    insn = deposit32(insn, 8, 4, r0);
344    insn = deposit32(insn, 12, 4, r1);
345    insn = deposit32(insn, 16, 4, r2);
346    insn = deposit32(insn, 20, 6, b3);
347    insn = deposit32(insn, 26, 6, b4);
348    tcg_out32(s, insn);
349}
350
351static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
352                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
353{
354    tcg_insn_unit insn = 0;
355
356    insn = deposit32(insn, 0, 8, op);
357    insn = deposit32(insn, 8, 4, r0);
358    insn = deposit32(insn, 12, 4, r1);
359    insn = deposit32(insn, 16, 4, r2);
360    insn = deposit32(insn, 20, 4, r3);
361    tcg_out32(s, insn);
362}
363
364static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
365                              TCGReg r0, TCGReg r1, TCGReg r2,
366                              TCGReg r3, TCGReg r4, TCGCond c5)
367{
368    tcg_insn_unit insn = 0;
369
370    insn = deposit32(insn, 0, 8, op);
371    insn = deposit32(insn, 8, 4, r0);
372    insn = deposit32(insn, 12, 4, r1);
373    insn = deposit32(insn, 16, 4, r2);
374    insn = deposit32(insn, 20, 4, r3);
375    insn = deposit32(insn, 24, 4, r4);
376    insn = deposit32(insn, 28, 4, c5);
377    tcg_out32(s, insn);
378}
379
380static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
381                              TCGReg r0, TCGReg r1, TCGReg r2,
382                              TCGReg r3, TCGReg r4, TCGReg r5)
383{
384    tcg_insn_unit insn = 0;
385
386    insn = deposit32(insn, 0, 8, op);
387    insn = deposit32(insn, 8, 4, r0);
388    insn = deposit32(insn, 12, 4, r1);
389    insn = deposit32(insn, 16, 4, r2);
390    insn = deposit32(insn, 20, 4, r3);
391    insn = deposit32(insn, 24, 4, r4);
392    insn = deposit32(insn, 28, 4, r5);
393    tcg_out32(s, insn);
394}
395
396static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
397                         TCGReg base, intptr_t offset)
398{
399    stack_bounds_check(base, offset);
400    if (offset != sextract32(offset, 0, 16)) {
401        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
402        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
403        base = TCG_REG_TMP;
404        offset = 0;
405    }
406    tcg_out_op_rrs(s, op, val, base, offset);
407}
408
409static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
410                       intptr_t offset)
411{
412    switch (type) {
413    case TCG_TYPE_I32:
414        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
415        break;
416#if TCG_TARGET_REG_BITS == 64
417    case TCG_TYPE_I64:
418        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
419        break;
420#endif
421    default:
422        g_assert_not_reached();
423    }
424}
425
426static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
427{
428    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
429    return true;
430}
431
432static void tcg_out_movi(TCGContext *s, TCGType type,
433                         TCGReg ret, tcg_target_long arg)
434{
435    switch (type) {
436    case TCG_TYPE_I32:
437#if TCG_TARGET_REG_BITS == 64
438        arg = (int32_t)arg;
439        /* fall through */
440    case TCG_TYPE_I64:
441#endif
442        break;
443    default:
444        g_assert_not_reached();
445    }
446
447    if (arg == sextract32(arg, 0, 20)) {
448        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
449    } else {
450        tcg_insn_unit insn = 0;
451
452        new_pool_label(s, arg, 20, s->code_ptr, 0);
453        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
454        insn = deposit32(insn, 8, 4, ret);
455        tcg_out32(s, insn);
456    }
457}
458
459static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
460                            TCGReg rs, unsigned pos, unsigned len)
461{
462    TCGOpcode opc = type == TCG_TYPE_I32 ?
463                    INDEX_op_extract_i32 :
464                    INDEX_op_extract_i64;
465    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
466}
467
468static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
469                             TCGReg rs, unsigned pos, unsigned len)
470{
471    TCGOpcode opc = type == TCG_TYPE_I32 ?
472                    INDEX_op_sextract_i32 :
473                    INDEX_op_sextract_i64;
474    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
475}
476
477static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
478{
479    tcg_out_sextract(s, type, rd, rs, 0, 8);
480}
481
482static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
483{
484    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
485}
486
487static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
488{
489    tcg_out_sextract(s, type, rd, rs, 0, 16);
490}
491
492static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
493{
494    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
495}
496
497static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
498{
499    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
500    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
501}
502
503static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
504{
505    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
506    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
507}
508
509static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
510{
511    tcg_out_ext32s(s, rd, rs);
512}
513
514static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
515{
516    tcg_out_ext32u(s, rd, rs);
517}
518
519static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
520{
521    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
522    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
523}
524
525static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
526{
527    return false;
528}
529
530static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
531                             tcg_target_long imm)
532{
533    /* This function is only used for passing structs by reference. */
534    g_assert_not_reached();
535}
536
537static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
538                         const TCGHelperInfo *info)
539{
540    ffi_cif *cif = info->cif;
541    tcg_insn_unit insn = 0;
542    uint8_t which;
543
544    if (cif->rtype == &ffi_type_void) {
545        which = 0;
546    } else {
547        tcg_debug_assert(cif->rtype->size == 4 ||
548                         cif->rtype->size == 8 ||
549                         cif->rtype->size == 16);
550        which = ctz32(cif->rtype->size) - 1;
551    }
552    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
553    insn = deposit32(insn, 0, 8, INDEX_op_call);
554    insn = deposit32(insn, 8, 4, which);
555    tcg_out32(s, insn);
556}
557
558#if TCG_TARGET_REG_BITS == 64
559# define CASE_32_64(x) \
560        case glue(glue(INDEX_op_, x), _i64): \
561        case glue(glue(INDEX_op_, x), _i32):
562# define CASE_64(x) \
563        case glue(glue(INDEX_op_, x), _i64):
564#else
565# define CASE_32_64(x) \
566        case glue(glue(INDEX_op_, x), _i32):
567# define CASE_64(x)
568#endif
569
570static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
571{
572    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
573}
574
575static void tcg_out_goto_tb(TCGContext *s, int which)
576{
577    /* indirect jump method. */
578    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
579    set_jmp_reset_offset(s, which);
580}
581
582void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
583                              uintptr_t jmp_rx, uintptr_t jmp_rw)
584{
585    /* Always indirect, nothing to do */
586}
587
588static void tgen_add(TCGContext *s, TCGType type,
589                     TCGReg a0, TCGReg a1, TCGReg a2)
590{
591    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
592}
593
594static const TCGOutOpBinary outop_add = {
595    .base.static_constraint = C_O1_I2(r, r, r),
596    .out_rrr = tgen_add,
597};
598
599static void tgen_and(TCGContext *s, TCGType type,
600                     TCGReg a0, TCGReg a1, TCGReg a2)
601{
602    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
603}
604
605static const TCGOutOpBinary outop_and = {
606    .base.static_constraint = C_O1_I2(r, r, r),
607    .out_rrr = tgen_and,
608};
609
610static void tgen_andc(TCGContext *s, TCGType type,
611                      TCGReg a0, TCGReg a1, TCGReg a2)
612{
613    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
614}
615
616static const TCGOutOpBinary outop_andc = {
617    .base.static_constraint = C_O1_I2(r, r, r),
618    .out_rrr = tgen_andc,
619};
620
621static void tgen_clz(TCGContext *s, TCGType type,
622                      TCGReg a0, TCGReg a1, TCGReg a2)
623{
624    TCGOpcode opc = (type == TCG_TYPE_I32
625                     ? INDEX_op_tci_clz32
626                     : INDEX_op_clz);
627    tcg_out_op_rrr(s, opc, a0, a1, a2);
628}
629
630static const TCGOutOpBinary outop_clz = {
631    .base.static_constraint = C_O1_I2(r, r, r),
632    .out_rrr = tgen_clz,
633};
634
635static void tgen_ctz(TCGContext *s, TCGType type,
636                      TCGReg a0, TCGReg a1, TCGReg a2)
637{
638    TCGOpcode opc = (type == TCG_TYPE_I32
639                     ? INDEX_op_tci_ctz32
640                     : INDEX_op_ctz);
641    tcg_out_op_rrr(s, opc, a0, a1, a2);
642}
643
644static const TCGOutOpBinary outop_ctz = {
645    .base.static_constraint = C_O1_I2(r, r, r),
646    .out_rrr = tgen_ctz,
647};
648
649static void tgen_divs(TCGContext *s, TCGType type,
650                      TCGReg a0, TCGReg a1, TCGReg a2)
651{
652    TCGOpcode opc = (type == TCG_TYPE_I32
653                     ? INDEX_op_tci_divs32
654                     : INDEX_op_divs);
655    tcg_out_op_rrr(s, opc, a0, a1, a2);
656}
657
658static const TCGOutOpBinary outop_divs = {
659    .base.static_constraint = C_O1_I2(r, r, r),
660    .out_rrr = tgen_divs,
661};
662
663static const TCGOutOpDivRem outop_divs2 = {
664    .base.static_constraint = C_NotImplemented,
665};
666
667static void tgen_divu(TCGContext *s, TCGType type,
668                      TCGReg a0, TCGReg a1, TCGReg a2)
669{
670    TCGOpcode opc = (type == TCG_TYPE_I32
671                     ? INDEX_op_tci_divu32
672                     : INDEX_op_divu);
673    tcg_out_op_rrr(s, opc, a0, a1, a2);
674}
675
676static const TCGOutOpBinary outop_divu = {
677    .base.static_constraint = C_O1_I2(r, r, r),
678    .out_rrr = tgen_divu,
679};
680
681static const TCGOutOpDivRem outop_divu2 = {
682    .base.static_constraint = C_NotImplemented,
683};
684
685static void tgen_eqv(TCGContext *s, TCGType type,
686                     TCGReg a0, TCGReg a1, TCGReg a2)
687{
688    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
689}
690
691static const TCGOutOpBinary outop_eqv = {
692    .base.static_constraint = C_O1_I2(r, r, r),
693    .out_rrr = tgen_eqv,
694};
695
696static void tgen_mul(TCGContext *s, TCGType type,
697                     TCGReg a0, TCGReg a1, TCGReg a2)
698{
699    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
700}
701
702static const TCGOutOpBinary outop_mul = {
703    .base.static_constraint = C_O1_I2(r, r, r),
704    .out_rrr = tgen_mul,
705};
706
707static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
708{
709    return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented;
710}
711
712static void tgen_muls2(TCGContext *s, TCGType type,
713                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
714{
715    tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3);
716}
717
718static const TCGOutOpMul2 outop_muls2 = {
719    .base.static_constraint = C_Dynamic,
720    .base.dynamic_constraint = cset_mul2,
721    .out_rrrr = tgen_muls2,
722};
723
724static const TCGOutOpBinary outop_mulsh = {
725    .base.static_constraint = C_NotImplemented,
726};
727
728static void tgen_mulu2(TCGContext *s, TCGType type,
729                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
730{
731    tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3);
732}
733
734static const TCGOutOpMul2 outop_mulu2 = {
735    .base.static_constraint = C_Dynamic,
736    .base.dynamic_constraint = cset_mul2,
737    .out_rrrr = tgen_mulu2,
738};
739
740static const TCGOutOpBinary outop_muluh = {
741    .base.static_constraint = C_NotImplemented,
742};
743
744static void tgen_nand(TCGContext *s, TCGType type,
745                     TCGReg a0, TCGReg a1, TCGReg a2)
746{
747    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
748}
749
750static const TCGOutOpBinary outop_nand = {
751    .base.static_constraint = C_O1_I2(r, r, r),
752    .out_rrr = tgen_nand,
753};
754
755static void tgen_nor(TCGContext *s, TCGType type,
756                     TCGReg a0, TCGReg a1, TCGReg a2)
757{
758    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
759}
760
761static const TCGOutOpBinary outop_nor = {
762    .base.static_constraint = C_O1_I2(r, r, r),
763    .out_rrr = tgen_nor,
764};
765
766static void tgen_or(TCGContext *s, TCGType type,
767                     TCGReg a0, TCGReg a1, TCGReg a2)
768{
769    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
770}
771
772static const TCGOutOpBinary outop_or = {
773    .base.static_constraint = C_O1_I2(r, r, r),
774    .out_rrr = tgen_or,
775};
776
777static void tgen_orc(TCGContext *s, TCGType type,
778                     TCGReg a0, TCGReg a1, TCGReg a2)
779{
780    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
781}
782
783static const TCGOutOpBinary outop_orc = {
784    .base.static_constraint = C_O1_I2(r, r, r),
785    .out_rrr = tgen_orc,
786};
787
788static void tgen_rems(TCGContext *s, TCGType type,
789                      TCGReg a0, TCGReg a1, TCGReg a2)
790{
791    TCGOpcode opc = (type == TCG_TYPE_I32
792                     ? INDEX_op_tci_rems32
793                     : INDEX_op_rems);
794    tcg_out_op_rrr(s, opc, a0, a1, a2);
795}
796
797static const TCGOutOpBinary outop_rems = {
798    .base.static_constraint = C_O1_I2(r, r, r),
799    .out_rrr = tgen_rems,
800};
801
802static void tgen_remu(TCGContext *s, TCGType type,
803                      TCGReg a0, TCGReg a1, TCGReg a2)
804{
805    TCGOpcode opc = (type == TCG_TYPE_I32
806                     ? INDEX_op_tci_remu32
807                     : INDEX_op_remu);
808    tcg_out_op_rrr(s, opc, a0, a1, a2);
809}
810
811static const TCGOutOpBinary outop_remu = {
812    .base.static_constraint = C_O1_I2(r, r, r),
813    .out_rrr = tgen_remu,
814};
815
816static void tgen_rotl(TCGContext *s, TCGType type,
817                     TCGReg a0, TCGReg a1, TCGReg a2)
818{
819    TCGOpcode opc = (type == TCG_TYPE_I32
820                     ? INDEX_op_tci_rotl32
821                     : INDEX_op_rotl);
822    tcg_out_op_rrr(s, opc, a0, a1, a2);
823}
824
825static const TCGOutOpBinary outop_rotl = {
826    .base.static_constraint = C_O1_I2(r, r, r),
827    .out_rrr = tgen_rotl,
828};
829
830static void tgen_rotr(TCGContext *s, TCGType type,
831                     TCGReg a0, TCGReg a1, TCGReg a2)
832{
833    TCGOpcode opc = (type == TCG_TYPE_I32
834                     ? INDEX_op_tci_rotr32
835                     : INDEX_op_rotr);
836    tcg_out_op_rrr(s, opc, a0, a1, a2);
837}
838
839static const TCGOutOpBinary outop_rotr = {
840    .base.static_constraint = C_O1_I2(r, r, r),
841    .out_rrr = tgen_rotr,
842};
843
844static void tgen_sar(TCGContext *s, TCGType type,
845                     TCGReg a0, TCGReg a1, TCGReg a2)
846{
847    if (type < TCG_TYPE_REG) {
848        tcg_out_ext32s(s, TCG_REG_TMP, a1);
849        a1 = TCG_REG_TMP;
850    }
851    tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
852}
853
854static const TCGOutOpBinary outop_sar = {
855    .base.static_constraint = C_O1_I2(r, r, r),
856    .out_rrr = tgen_sar,
857};
858
859static void tgen_shl(TCGContext *s, TCGType type,
860                     TCGReg a0, TCGReg a1, TCGReg a2)
861{
862    tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
863}
864
865static const TCGOutOpBinary outop_shl = {
866    .base.static_constraint = C_O1_I2(r, r, r),
867    .out_rrr = tgen_shl,
868};
869
870static void tgen_shr(TCGContext *s, TCGType type,
871                     TCGReg a0, TCGReg a1, TCGReg a2)
872{
873    if (type < TCG_TYPE_REG) {
874        tcg_out_ext32u(s, TCG_REG_TMP, a1);
875        a1 = TCG_REG_TMP;
876    }
877    tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
878}
879
880static const TCGOutOpBinary outop_shr = {
881    .base.static_constraint = C_O1_I2(r, r, r),
882    .out_rrr = tgen_shr,
883};
884
885static void tgen_sub(TCGContext *s, TCGType type,
886                     TCGReg a0, TCGReg a1, TCGReg a2)
887{
888    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
889}
890
891static const TCGOutOpSubtract outop_sub = {
892    .base.static_constraint = C_O1_I2(r, r, r),
893    .out_rrr = tgen_sub,
894};
895
896static void tgen_xor(TCGContext *s, TCGType type,
897                     TCGReg a0, TCGReg a1, TCGReg a2)
898{
899    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
900}
901
902static const TCGOutOpBinary outop_xor = {
903    .base.static_constraint = C_O1_I2(r, r, r),
904    .out_rrr = tgen_xor,
905};
906
907static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
908{
909    tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1);
910}
911
912static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
913{
914    return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented;
915}
916
917static const TCGOutOpUnary outop_ctpop = {
918    .base.static_constraint = C_Dynamic,
919    .base.dynamic_constraint = cset_ctpop,
920    .out_rr = tgen_ctpop,
921};
922
923static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
924{
925    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
926}
927
928static const TCGOutOpUnary outop_neg = {
929    .base.static_constraint = C_O1_I1(r, r),
930    .out_rr = tgen_neg,
931};
932
933static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
934{
935    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
936}
937
938static const TCGOutOpUnary outop_not = {
939    .base.static_constraint = C_O1_I1(r, r),
940    .out_rr = tgen_not,
941};
942
943
944static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
945                       const TCGArg args[TCG_MAX_OP_ARGS],
946                       const int const_args[TCG_MAX_OP_ARGS])
947{
948    int width;
949
950    switch (opc) {
951    case INDEX_op_goto_ptr:
952        tcg_out_op_r(s, opc, args[0]);
953        break;
954
955    case INDEX_op_br:
956        tcg_out_op_l(s, opc, arg_label(args[0]));
957        break;
958
959    CASE_32_64(setcond)
960        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
961        break;
962
963    CASE_32_64(movcond)
964    case INDEX_op_setcond2_i32:
965        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
966                          args[3], args[4], args[5]);
967        break;
968
969    CASE_32_64(ld8u)
970    CASE_32_64(ld8s)
971    CASE_32_64(ld16u)
972    CASE_32_64(ld16s)
973    case INDEX_op_ld_i32:
974    CASE_64(ld32u)
975    CASE_64(ld32s)
976    CASE_64(ld)
977    CASE_32_64(st8)
978    CASE_32_64(st16)
979    case INDEX_op_st_i32:
980    CASE_64(st32)
981    CASE_64(st)
982        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
983        break;
984
985    CASE_32_64(deposit)
986        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
987        break;
988
989    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
990    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
991        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
992        break;
993
994    CASE_32_64(brcond)
995        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
996                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
997                        TCG_REG_TMP, args[0], args[1], args[2]);
998        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
999        break;
1000
1001    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
1002    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
1003        tcg_out_op_rr(s, opc, args[0], args[1]);
1004        break;
1005
1006    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
1007    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
1008        width = 16;
1009        goto do_bswap;
1010    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
1011        width = 32;
1012    do_bswap:
1013        /* The base tci bswaps zero-extend, and ignore high bits. */
1014        tcg_out_op_rr(s, opc, args[0], args[1]);
1015        if (args[2] & TCG_BSWAP_OS) {
1016            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
1017        }
1018        break;
1019
1020    CASE_32_64(add2)
1021    CASE_32_64(sub2)
1022        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
1023                          args[3], args[4], args[5]);
1024        break;
1025
1026#if TCG_TARGET_REG_BITS == 32
1027    case INDEX_op_brcond2_i32:
1028        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
1029                          args[0], args[1], args[2], args[3], args[4]);
1030        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
1031        break;
1032#endif
1033
1034    case INDEX_op_qemu_ld_i64:
1035    case INDEX_op_qemu_st_i64:
1036        if (TCG_TARGET_REG_BITS == 32) {
1037            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
1038            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
1039            break;
1040        }
1041        /* fall through */
1042    case INDEX_op_qemu_ld_i32:
1043    case INDEX_op_qemu_st_i32:
1044        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
1045            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
1046            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
1047        } else {
1048            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1049        }
1050        break;
1051
1052    case INDEX_op_mb:
1053        tcg_out_op_v(s, opc);
1054        break;
1055
1056    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1057    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1058    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1059    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
1060    case INDEX_op_extu_i32_i64:
1061    case INDEX_op_extrl_i64_i32:
1062    default:
1063        g_assert_not_reached();
1064    }
1065}
1066
1067static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
1068                       intptr_t offset)
1069{
1070    switch (type) {
1071    case TCG_TYPE_I32:
1072        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
1073        break;
1074#if TCG_TARGET_REG_BITS == 64
1075    case TCG_TYPE_I64:
1076        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
1077        break;
1078#endif
1079    default:
1080        g_assert_not_reached();
1081    }
1082}
1083
1084static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1085                               TCGReg base, intptr_t ofs)
1086{
1087    return false;
1088}
1089
1090/* Test if a constant matches the constraint. */
1091static bool tcg_target_const_match(int64_t val, int ct,
1092                                   TCGType type, TCGCond cond, int vece)
1093{
1094    return ct & TCG_CT_CONST;
1095}
1096
1097static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1098{
1099    memset(p, 0, sizeof(*p) * count);
1100}
1101
1102static void tcg_target_init(TCGContext *s)
1103{
1104    /* The current code uses uint8_t for tcg operations. */
1105    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
1106
1107    /* Registers available for 32 bit operations. */
1108    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
1109    /* Registers available for 64 bit operations. */
1110    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
1111    /*
1112     * The interpreter "registers" are in the local stack frame and
1113     * cannot be clobbered by the called helper functions.  However,
1114     * the interpreter assumes a 128-bit return value and assigns to
1115     * the return value registers.
1116     */
1117    tcg_target_call_clobber_regs =
1118        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1119
1120    s->reserved_regs = 0;
1121    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1122    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1123
1124    /* The call arguments come first, followed by the temp storage. */
1125    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1126                  TCG_STATIC_FRAME_SIZE);
1127}
1128
1129/* Generate global QEMU prologue and epilogue code. */
1130static inline void tcg_target_qemu_prologue(TCGContext *s)
1131{
1132}
1133
1134static void tcg_out_tb_start(TCGContext *s)
1135{
1136    /* nothing to do */
1137}
1138
1139bool tcg_target_has_memory_bswap(MemOp memop)
1140{
1141    return true;
1142}
1143
1144static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1145{
1146    g_assert_not_reached();
1147}
1148
1149static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1150{
1151    g_assert_not_reached();
1152}
1153