xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 5a5bb0a5a0b879c8f110c6a9bde9146181ef840c)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_setcond_i32:
83    case INDEX_op_setcond_i64:
84    case INDEX_op_deposit_i32:
85    case INDEX_op_deposit_i64:
86    case INDEX_op_ctz_i32:
87    case INDEX_op_ctz_i64:
88        return C_O1_I2(r, r, r);
89
90    case INDEX_op_brcond_i32:
91    case INDEX_op_brcond_i64:
92        return C_O0_I2(r, r);
93
94    case INDEX_op_add2_i32:
95    case INDEX_op_add2_i64:
96    case INDEX_op_sub2_i32:
97    case INDEX_op_sub2_i64:
98        return C_O2_I4(r, r, r, r, r, r);
99
100#if TCG_TARGET_REG_BITS == 32
101    case INDEX_op_brcond2_i32:
102        return C_O0_I4(r, r, r, r);
103#endif
104
105    case INDEX_op_mulu2_i32:
106    case INDEX_op_mulu2_i64:
107    case INDEX_op_muls2_i32:
108    case INDEX_op_muls2_i64:
109        return C_O2_I2(r, r, r, r);
110
111    case INDEX_op_movcond_i32:
112    case INDEX_op_movcond_i64:
113    case INDEX_op_setcond2_i32:
114        return C_O1_I4(r, r, r, r, r);
115
116    case INDEX_op_qemu_ld_i32:
117        return C_O1_I1(r, r);
118    case INDEX_op_qemu_ld_i64:
119        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
120    case INDEX_op_qemu_st_i32:
121        return C_O0_I2(r, r);
122    case INDEX_op_qemu_st_i64:
123        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
124
125    default:
126        return C_NotImplemented;
127    }
128}
129
130static const int tcg_target_reg_alloc_order[] = {
131    TCG_REG_R4,
132    TCG_REG_R5,
133    TCG_REG_R6,
134    TCG_REG_R7,
135    TCG_REG_R8,
136    TCG_REG_R9,
137    TCG_REG_R10,
138    TCG_REG_R11,
139    TCG_REG_R12,
140    TCG_REG_R13,
141    TCG_REG_R14,
142    TCG_REG_R15,
143    /* Either 2 or 4 of these are call clobbered, so use them last. */
144    TCG_REG_R3,
145    TCG_REG_R2,
146    TCG_REG_R1,
147    TCG_REG_R0,
148};
149
150/* No call arguments via registers.  All will be stored on the "stack". */
151static const int tcg_target_call_iarg_regs[] = { };
152
153static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
154{
155    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
156    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
157    return TCG_REG_R0 + slot;
158}
159
160#ifdef CONFIG_DEBUG_TCG
161static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
162    "r00",
163    "r01",
164    "r02",
165    "r03",
166    "r04",
167    "r05",
168    "r06",
169    "r07",
170    "r08",
171    "r09",
172    "r10",
173    "r11",
174    "r12",
175    "r13",
176    "r14",
177    "r15",
178};
179#endif
180
181static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
182                        intptr_t value, intptr_t addend)
183{
184    intptr_t diff = value - (intptr_t)(code_ptr + 1);
185
186    tcg_debug_assert(addend == 0);
187    tcg_debug_assert(type == 20);
188
189    if (diff == sextract32(diff, 0, type)) {
190        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
191        return true;
192    }
193    return false;
194}
195
196static void stack_bounds_check(TCGReg base, intptr_t offset)
197{
198    if (base == TCG_REG_CALL_STACK) {
199        tcg_debug_assert(offset >= 0);
200        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
201                                   TCG_STATIC_FRAME_SIZE));
202    }
203}
204
205static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
206{
207    tcg_insn_unit insn = 0;
208
209    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
210    insn = deposit32(insn, 0, 8, op);
211    tcg_out32(s, insn);
212}
213
214static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
215{
216    tcg_insn_unit insn = 0;
217    intptr_t diff;
218
219    /* Special case for exit_tb: map null -> 0. */
220    if (p0 == NULL) {
221        diff = 0;
222    } else {
223        diff = p0 - (void *)(s->code_ptr + 1);
224        tcg_debug_assert(diff != 0);
225        if (diff != sextract32(diff, 0, 20)) {
226            tcg_raise_tb_overflow(s);
227        }
228    }
229    insn = deposit32(insn, 0, 8, op);
230    insn = deposit32(insn, 12, 20, diff);
231    tcg_out32(s, insn);
232}
233
234static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
235{
236    tcg_insn_unit insn = 0;
237
238    insn = deposit32(insn, 0, 8, op);
239    insn = deposit32(insn, 8, 4, r0);
240    tcg_out32(s, insn);
241}
242
243static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
244{
245    tcg_out32(s, (uint8_t)op);
246}
247
248static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
249{
250    tcg_insn_unit insn = 0;
251
252    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
253    insn = deposit32(insn, 0, 8, op);
254    insn = deposit32(insn, 8, 4, r0);
255    insn = deposit32(insn, 12, 20, i1);
256    tcg_out32(s, insn);
257}
258
259static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
260{
261    tcg_insn_unit insn = 0;
262
263    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
264    insn = deposit32(insn, 0, 8, op);
265    insn = deposit32(insn, 8, 4, r0);
266    tcg_out32(s, insn);
267}
268
269static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
270{
271    tcg_insn_unit insn = 0;
272
273    insn = deposit32(insn, 0, 8, op);
274    insn = deposit32(insn, 8, 4, r0);
275    insn = deposit32(insn, 12, 4, r1);
276    tcg_out32(s, insn);
277}
278
279static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
280                           TCGReg r0, TCGReg r1, TCGArg m2)
281{
282    tcg_insn_unit insn = 0;
283
284    tcg_debug_assert(m2 == extract32(m2, 0, 16));
285    insn = deposit32(insn, 0, 8, op);
286    insn = deposit32(insn, 8, 4, r0);
287    insn = deposit32(insn, 12, 4, r1);
288    insn = deposit32(insn, 16, 16, m2);
289    tcg_out32(s, insn);
290}
291
292static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
293                           TCGReg r0, TCGReg r1, TCGReg r2)
294{
295    tcg_insn_unit insn = 0;
296
297    insn = deposit32(insn, 0, 8, op);
298    insn = deposit32(insn, 8, 4, r0);
299    insn = deposit32(insn, 12, 4, r1);
300    insn = deposit32(insn, 16, 4, r2);
301    tcg_out32(s, insn);
302}
303
304static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
305                           TCGReg r0, TCGReg r1, intptr_t i2)
306{
307    tcg_insn_unit insn = 0;
308
309    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
310    insn = deposit32(insn, 0, 8, op);
311    insn = deposit32(insn, 8, 4, r0);
312    insn = deposit32(insn, 12, 4, r1);
313    insn = deposit32(insn, 16, 16, i2);
314    tcg_out32(s, insn);
315}
316
317static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
318                            TCGReg r1, uint8_t b2, uint8_t b3)
319{
320    tcg_insn_unit insn = 0;
321
322    tcg_debug_assert(b2 == extract32(b2, 0, 6));
323    tcg_debug_assert(b3 == extract32(b3, 0, 6));
324    insn = deposit32(insn, 0, 8, op);
325    insn = deposit32(insn, 8, 4, r0);
326    insn = deposit32(insn, 12, 4, r1);
327    insn = deposit32(insn, 16, 6, b2);
328    insn = deposit32(insn, 22, 6, b3);
329    tcg_out32(s, insn);
330}
331
332static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
333                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
334{
335    tcg_insn_unit insn = 0;
336
337    insn = deposit32(insn, 0, 8, op);
338    insn = deposit32(insn, 8, 4, r0);
339    insn = deposit32(insn, 12, 4, r1);
340    insn = deposit32(insn, 16, 4, r2);
341    insn = deposit32(insn, 20, 4, c3);
342    tcg_out32(s, insn);
343}
344
345static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
346                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
347{
348    tcg_insn_unit insn = 0;
349
350    tcg_debug_assert(b3 == extract32(b3, 0, 6));
351    tcg_debug_assert(b4 == extract32(b4, 0, 6));
352    insn = deposit32(insn, 0, 8, op);
353    insn = deposit32(insn, 8, 4, r0);
354    insn = deposit32(insn, 12, 4, r1);
355    insn = deposit32(insn, 16, 4, r2);
356    insn = deposit32(insn, 20, 6, b3);
357    insn = deposit32(insn, 26, 6, b4);
358    tcg_out32(s, insn);
359}
360
361static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
362                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
363{
364    tcg_insn_unit insn = 0;
365
366    insn = deposit32(insn, 0, 8, op);
367    insn = deposit32(insn, 8, 4, r0);
368    insn = deposit32(insn, 12, 4, r1);
369    insn = deposit32(insn, 16, 4, r2);
370    insn = deposit32(insn, 20, 4, r3);
371    tcg_out32(s, insn);
372}
373
374static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
375                              TCGReg r0, TCGReg r1, TCGReg r2,
376                              TCGReg r3, TCGReg r4, TCGCond c5)
377{
378    tcg_insn_unit insn = 0;
379
380    insn = deposit32(insn, 0, 8, op);
381    insn = deposit32(insn, 8, 4, r0);
382    insn = deposit32(insn, 12, 4, r1);
383    insn = deposit32(insn, 16, 4, r2);
384    insn = deposit32(insn, 20, 4, r3);
385    insn = deposit32(insn, 24, 4, r4);
386    insn = deposit32(insn, 28, 4, c5);
387    tcg_out32(s, insn);
388}
389
390static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
391                              TCGReg r0, TCGReg r1, TCGReg r2,
392                              TCGReg r3, TCGReg r4, TCGReg r5)
393{
394    tcg_insn_unit insn = 0;
395
396    insn = deposit32(insn, 0, 8, op);
397    insn = deposit32(insn, 8, 4, r0);
398    insn = deposit32(insn, 12, 4, r1);
399    insn = deposit32(insn, 16, 4, r2);
400    insn = deposit32(insn, 20, 4, r3);
401    insn = deposit32(insn, 24, 4, r4);
402    insn = deposit32(insn, 28, 4, r5);
403    tcg_out32(s, insn);
404}
405
406static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
407                         TCGReg base, intptr_t offset)
408{
409    stack_bounds_check(base, offset);
410    if (offset != sextract32(offset, 0, 16)) {
411        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
412        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
413        base = TCG_REG_TMP;
414        offset = 0;
415    }
416    tcg_out_op_rrs(s, op, val, base, offset);
417}
418
419static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
420                       intptr_t offset)
421{
422    switch (type) {
423    case TCG_TYPE_I32:
424        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
425        break;
426#if TCG_TARGET_REG_BITS == 64
427    case TCG_TYPE_I64:
428        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
429        break;
430#endif
431    default:
432        g_assert_not_reached();
433    }
434}
435
436static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
437{
438    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
439    return true;
440}
441
442static void tcg_out_movi(TCGContext *s, TCGType type,
443                         TCGReg ret, tcg_target_long arg)
444{
445    switch (type) {
446    case TCG_TYPE_I32:
447#if TCG_TARGET_REG_BITS == 64
448        arg = (int32_t)arg;
449        /* fall through */
450    case TCG_TYPE_I64:
451#endif
452        break;
453    default:
454        g_assert_not_reached();
455    }
456
457    if (arg == sextract32(arg, 0, 20)) {
458        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
459    } else {
460        tcg_insn_unit insn = 0;
461
462        new_pool_label(s, arg, 20, s->code_ptr, 0);
463        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
464        insn = deposit32(insn, 8, 4, ret);
465        tcg_out32(s, insn);
466    }
467}
468
469static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
470                            TCGReg rs, unsigned pos, unsigned len)
471{
472    TCGOpcode opc = type == TCG_TYPE_I32 ?
473                    INDEX_op_extract_i32 :
474                    INDEX_op_extract_i64;
475    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
476}
477
478static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
479                             TCGReg rs, unsigned pos, unsigned len)
480{
481    TCGOpcode opc = type == TCG_TYPE_I32 ?
482                    INDEX_op_sextract_i32 :
483                    INDEX_op_sextract_i64;
484    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
485}
486
487static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
488{
489    tcg_out_sextract(s, type, rd, rs, 0, 8);
490}
491
492static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
493{
494    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
495}
496
497static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
498{
499    tcg_out_sextract(s, type, rd, rs, 0, 16);
500}
501
502static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
503{
504    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
505}
506
507static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
508{
509    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
510    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
511}
512
513static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
514{
515    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
516    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
517}
518
519static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
520{
521    tcg_out_ext32s(s, rd, rs);
522}
523
524static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
525{
526    tcg_out_ext32u(s, rd, rs);
527}
528
529static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
530{
531    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
532    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
533}
534
535static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
536{
537    return false;
538}
539
540static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
541                             tcg_target_long imm)
542{
543    /* This function is only used for passing structs by reference. */
544    g_assert_not_reached();
545}
546
547static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
548                         const TCGHelperInfo *info)
549{
550    ffi_cif *cif = info->cif;
551    tcg_insn_unit insn = 0;
552    uint8_t which;
553
554    if (cif->rtype == &ffi_type_void) {
555        which = 0;
556    } else {
557        tcg_debug_assert(cif->rtype->size == 4 ||
558                         cif->rtype->size == 8 ||
559                         cif->rtype->size == 16);
560        which = ctz32(cif->rtype->size) - 1;
561    }
562    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
563    insn = deposit32(insn, 0, 8, INDEX_op_call);
564    insn = deposit32(insn, 8, 4, which);
565    tcg_out32(s, insn);
566}
567
568#if TCG_TARGET_REG_BITS == 64
569# define CASE_32_64(x) \
570        case glue(glue(INDEX_op_, x), _i64): \
571        case glue(glue(INDEX_op_, x), _i32):
572# define CASE_64(x) \
573        case glue(glue(INDEX_op_, x), _i64):
574#else
575# define CASE_32_64(x) \
576        case glue(glue(INDEX_op_, x), _i32):
577# define CASE_64(x)
578#endif
579
580static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
581{
582    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
583}
584
585static void tcg_out_goto_tb(TCGContext *s, int which)
586{
587    /* indirect jump method. */
588    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
589    set_jmp_reset_offset(s, which);
590}
591
592void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
593                              uintptr_t jmp_rx, uintptr_t jmp_rw)
594{
595    /* Always indirect, nothing to do */
596}
597
598static void tgen_add(TCGContext *s, TCGType type,
599                     TCGReg a0, TCGReg a1, TCGReg a2)
600{
601    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
602}
603
604static const TCGOutOpBinary outop_add = {
605    .base.static_constraint = C_O1_I2(r, r, r),
606    .out_rrr = tgen_add,
607};
608
609static void tgen_and(TCGContext *s, TCGType type,
610                     TCGReg a0, TCGReg a1, TCGReg a2)
611{
612    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
613}
614
615static const TCGOutOpBinary outop_and = {
616    .base.static_constraint = C_O1_I2(r, r, r),
617    .out_rrr = tgen_and,
618};
619
620static void tgen_andc(TCGContext *s, TCGType type,
621                      TCGReg a0, TCGReg a1, TCGReg a2)
622{
623    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
624}
625
626static const TCGOutOpBinary outop_andc = {
627    .base.static_constraint = C_O1_I2(r, r, r),
628    .out_rrr = tgen_andc,
629};
630
631static void tgen_clz(TCGContext *s, TCGType type,
632                      TCGReg a0, TCGReg a1, TCGReg a2)
633{
634    TCGOpcode opc = (type == TCG_TYPE_I32
635                     ? INDEX_op_tci_clz32
636                     : INDEX_op_clz);
637    tcg_out_op_rrr(s, opc, a0, a1, a2);
638}
639
640static const TCGOutOpBinary outop_clz = {
641    .base.static_constraint = C_O1_I2(r, r, r),
642    .out_rrr = tgen_clz,
643};
644
645static void tgen_divs(TCGContext *s, TCGType type,
646                      TCGReg a0, TCGReg a1, TCGReg a2)
647{
648    TCGOpcode opc = (type == TCG_TYPE_I32
649                     ? INDEX_op_tci_divs32
650                     : INDEX_op_divs);
651    tcg_out_op_rrr(s, opc, a0, a1, a2);
652}
653
654static const TCGOutOpBinary outop_divs = {
655    .base.static_constraint = C_O1_I2(r, r, r),
656    .out_rrr = tgen_divs,
657};
658
659static const TCGOutOpDivRem outop_divs2 = {
660    .base.static_constraint = C_NotImplemented,
661};
662
663static void tgen_divu(TCGContext *s, TCGType type,
664                      TCGReg a0, TCGReg a1, TCGReg a2)
665{
666    TCGOpcode opc = (type == TCG_TYPE_I32
667                     ? INDEX_op_tci_divu32
668                     : INDEX_op_divu);
669    tcg_out_op_rrr(s, opc, a0, a1, a2);
670}
671
672static const TCGOutOpBinary outop_divu = {
673    .base.static_constraint = C_O1_I2(r, r, r),
674    .out_rrr = tgen_divu,
675};
676
677static const TCGOutOpDivRem outop_divu2 = {
678    .base.static_constraint = C_NotImplemented,
679};
680
681static void tgen_eqv(TCGContext *s, TCGType type,
682                     TCGReg a0, TCGReg a1, TCGReg a2)
683{
684    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
685}
686
687static const TCGOutOpBinary outop_eqv = {
688    .base.static_constraint = C_O1_I2(r, r, r),
689    .out_rrr = tgen_eqv,
690};
691
692static void tgen_mul(TCGContext *s, TCGType type,
693                     TCGReg a0, TCGReg a1, TCGReg a2)
694{
695    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
696}
697
698static const TCGOutOpBinary outop_mul = {
699    .base.static_constraint = C_O1_I2(r, r, r),
700    .out_rrr = tgen_mul,
701};
702
703static const TCGOutOpBinary outop_mulsh = {
704    .base.static_constraint = C_NotImplemented,
705};
706
707static const TCGOutOpBinary outop_muluh = {
708    .base.static_constraint = C_NotImplemented,
709};
710
711static void tgen_nand(TCGContext *s, TCGType type,
712                     TCGReg a0, TCGReg a1, TCGReg a2)
713{
714    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
715}
716
717static const TCGOutOpBinary outop_nand = {
718    .base.static_constraint = C_O1_I2(r, r, r),
719    .out_rrr = tgen_nand,
720};
721
722static void tgen_nor(TCGContext *s, TCGType type,
723                     TCGReg a0, TCGReg a1, TCGReg a2)
724{
725    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
726}
727
728static const TCGOutOpBinary outop_nor = {
729    .base.static_constraint = C_O1_I2(r, r, r),
730    .out_rrr = tgen_nor,
731};
732
733static void tgen_or(TCGContext *s, TCGType type,
734                     TCGReg a0, TCGReg a1, TCGReg a2)
735{
736    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
737}
738
739static const TCGOutOpBinary outop_or = {
740    .base.static_constraint = C_O1_I2(r, r, r),
741    .out_rrr = tgen_or,
742};
743
744static void tgen_orc(TCGContext *s, TCGType type,
745                     TCGReg a0, TCGReg a1, TCGReg a2)
746{
747    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
748}
749
750static const TCGOutOpBinary outop_orc = {
751    .base.static_constraint = C_O1_I2(r, r, r),
752    .out_rrr = tgen_orc,
753};
754
755static void tgen_rems(TCGContext *s, TCGType type,
756                      TCGReg a0, TCGReg a1, TCGReg a2)
757{
758    TCGOpcode opc = (type == TCG_TYPE_I32
759                     ? INDEX_op_tci_rems32
760                     : INDEX_op_rems);
761    tcg_out_op_rrr(s, opc, a0, a1, a2);
762}
763
764static const TCGOutOpBinary outop_rems = {
765    .base.static_constraint = C_O1_I2(r, r, r),
766    .out_rrr = tgen_rems,
767};
768
769static void tgen_remu(TCGContext *s, TCGType type,
770                      TCGReg a0, TCGReg a1, TCGReg a2)
771{
772    TCGOpcode opc = (type == TCG_TYPE_I32
773                     ? INDEX_op_tci_remu32
774                     : INDEX_op_remu);
775    tcg_out_op_rrr(s, opc, a0, a1, a2);
776}
777
778static const TCGOutOpBinary outop_remu = {
779    .base.static_constraint = C_O1_I2(r, r, r),
780    .out_rrr = tgen_remu,
781};
782
783static void tgen_rotl(TCGContext *s, TCGType type,
784                     TCGReg a0, TCGReg a1, TCGReg a2)
785{
786    TCGOpcode opc = (type == TCG_TYPE_I32
787                     ? INDEX_op_tci_rotl32
788                     : INDEX_op_rotl);
789    tcg_out_op_rrr(s, opc, a0, a1, a2);
790}
791
792static const TCGOutOpBinary outop_rotl = {
793    .base.static_constraint = C_O1_I2(r, r, r),
794    .out_rrr = tgen_rotl,
795};
796
797static void tgen_rotr(TCGContext *s, TCGType type,
798                     TCGReg a0, TCGReg a1, TCGReg a2)
799{
800    TCGOpcode opc = (type == TCG_TYPE_I32
801                     ? INDEX_op_tci_rotr32
802                     : INDEX_op_rotr);
803    tcg_out_op_rrr(s, opc, a0, a1, a2);
804}
805
806static const TCGOutOpBinary outop_rotr = {
807    .base.static_constraint = C_O1_I2(r, r, r),
808    .out_rrr = tgen_rotr,
809};
810
811static void tgen_sar(TCGContext *s, TCGType type,
812                     TCGReg a0, TCGReg a1, TCGReg a2)
813{
814    if (type < TCG_TYPE_REG) {
815        tcg_out_ext32s(s, TCG_REG_TMP, a1);
816        a1 = TCG_REG_TMP;
817    }
818    tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
819}
820
821static const TCGOutOpBinary outop_sar = {
822    .base.static_constraint = C_O1_I2(r, r, r),
823    .out_rrr = tgen_sar,
824};
825
826static void tgen_shl(TCGContext *s, TCGType type,
827                     TCGReg a0, TCGReg a1, TCGReg a2)
828{
829    tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
830}
831
832static const TCGOutOpBinary outop_shl = {
833    .base.static_constraint = C_O1_I2(r, r, r),
834    .out_rrr = tgen_shl,
835};
836
837static void tgen_shr(TCGContext *s, TCGType type,
838                     TCGReg a0, TCGReg a1, TCGReg a2)
839{
840    if (type < TCG_TYPE_REG) {
841        tcg_out_ext32u(s, TCG_REG_TMP, a1);
842        a1 = TCG_REG_TMP;
843    }
844    tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
845}
846
847static const TCGOutOpBinary outop_shr = {
848    .base.static_constraint = C_O1_I2(r, r, r),
849    .out_rrr = tgen_shr,
850};
851
852static void tgen_sub(TCGContext *s, TCGType type,
853                     TCGReg a0, TCGReg a1, TCGReg a2)
854{
855    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
856}
857
858static const TCGOutOpSubtract outop_sub = {
859    .base.static_constraint = C_O1_I2(r, r, r),
860    .out_rrr = tgen_sub,
861};
862
863static void tgen_xor(TCGContext *s, TCGType type,
864                     TCGReg a0, TCGReg a1, TCGReg a2)
865{
866    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
867}
868
869static const TCGOutOpBinary outop_xor = {
870    .base.static_constraint = C_O1_I2(r, r, r),
871    .out_rrr = tgen_xor,
872};
873
874static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
875{
876    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
877}
878
879static const TCGOutOpUnary outop_neg = {
880    .base.static_constraint = C_O1_I1(r, r),
881    .out_rr = tgen_neg,
882};
883
884static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
885{
886    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
887}
888
889static const TCGOutOpUnary outop_not = {
890    .base.static_constraint = C_O1_I1(r, r),
891    .out_rr = tgen_not,
892};
893
894
895static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
896                       const TCGArg args[TCG_MAX_OP_ARGS],
897                       const int const_args[TCG_MAX_OP_ARGS])
898{
899    int width;
900
901    switch (opc) {
902    case INDEX_op_goto_ptr:
903        tcg_out_op_r(s, opc, args[0]);
904        break;
905
906    case INDEX_op_br:
907        tcg_out_op_l(s, opc, arg_label(args[0]));
908        break;
909
910    CASE_32_64(setcond)
911        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
912        break;
913
914    CASE_32_64(movcond)
915    case INDEX_op_setcond2_i32:
916        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
917                          args[3], args[4], args[5]);
918        break;
919
920    CASE_32_64(ld8u)
921    CASE_32_64(ld8s)
922    CASE_32_64(ld16u)
923    CASE_32_64(ld16s)
924    case INDEX_op_ld_i32:
925    CASE_64(ld32u)
926    CASE_64(ld32s)
927    CASE_64(ld)
928    CASE_32_64(st8)
929    CASE_32_64(st16)
930    case INDEX_op_st_i32:
931    CASE_64(st32)
932    CASE_64(st)
933        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
934        break;
935
936    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
937        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
938        break;
939
940    CASE_32_64(deposit)
941        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
942        break;
943
944    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
945    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
946        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
947        break;
948
949    CASE_32_64(brcond)
950        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
951                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
952                        TCG_REG_TMP, args[0], args[1], args[2]);
953        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
954        break;
955
956    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
957    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
958    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
959        tcg_out_op_rr(s, opc, args[0], args[1]);
960        break;
961
962    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
963    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
964        width = 16;
965        goto do_bswap;
966    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
967        width = 32;
968    do_bswap:
969        /* The base tci bswaps zero-extend, and ignore high bits. */
970        tcg_out_op_rr(s, opc, args[0], args[1]);
971        if (args[2] & TCG_BSWAP_OS) {
972            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
973        }
974        break;
975
976    CASE_32_64(add2)
977    CASE_32_64(sub2)
978        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
979                          args[3], args[4], args[5]);
980        break;
981
982#if TCG_TARGET_REG_BITS == 32
983    case INDEX_op_brcond2_i32:
984        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
985                          args[0], args[1], args[2], args[3], args[4]);
986        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
987        break;
988#endif
989
990    CASE_32_64(mulu2)
991    CASE_32_64(muls2)
992        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
993        break;
994
995    case INDEX_op_qemu_ld_i64:
996    case INDEX_op_qemu_st_i64:
997        if (TCG_TARGET_REG_BITS == 32) {
998            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
999            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
1000            break;
1001        }
1002        /* fall through */
1003    case INDEX_op_qemu_ld_i32:
1004    case INDEX_op_qemu_st_i32:
1005        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
1006            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
1007            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
1008        } else {
1009            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1010        }
1011        break;
1012
1013    case INDEX_op_mb:
1014        tcg_out_op_v(s, opc);
1015        break;
1016
1017    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1018    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1019    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1020    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
1021    case INDEX_op_extu_i32_i64:
1022    case INDEX_op_extrl_i64_i32:
1023    default:
1024        g_assert_not_reached();
1025    }
1026}
1027
1028static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
1029                       intptr_t offset)
1030{
1031    switch (type) {
1032    case TCG_TYPE_I32:
1033        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
1034        break;
1035#if TCG_TARGET_REG_BITS == 64
1036    case TCG_TYPE_I64:
1037        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
1038        break;
1039#endif
1040    default:
1041        g_assert_not_reached();
1042    }
1043}
1044
1045static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1046                               TCGReg base, intptr_t ofs)
1047{
1048    return false;
1049}
1050
1051/* Test if a constant matches the constraint. */
1052static bool tcg_target_const_match(int64_t val, int ct,
1053                                   TCGType type, TCGCond cond, int vece)
1054{
1055    return ct & TCG_CT_CONST;
1056}
1057
1058static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1059{
1060    memset(p, 0, sizeof(*p) * count);
1061}
1062
1063static void tcg_target_init(TCGContext *s)
1064{
1065    /* The current code uses uint8_t for tcg operations. */
1066    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
1067
1068    /* Registers available for 32 bit operations. */
1069    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
1070    /* Registers available for 64 bit operations. */
1071    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
1072    /*
1073     * The interpreter "registers" are in the local stack frame and
1074     * cannot be clobbered by the called helper functions.  However,
1075     * the interpreter assumes a 128-bit return value and assigns to
1076     * the return value registers.
1077     */
1078    tcg_target_call_clobber_regs =
1079        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1080
1081    s->reserved_regs = 0;
1082    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1083    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1084
1085    /* The call arguments come first, followed by the temp storage. */
1086    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1087                  TCG_STATIC_FRAME_SIZE);
1088}
1089
1090/* Generate global QEMU prologue and epilogue code. */
1091static inline void tcg_target_qemu_prologue(TCGContext *s)
1092{
1093}
1094
1095static void tcg_out_tb_start(TCGContext *s)
1096{
1097    /* nothing to do */
1098}
1099
1100bool tcg_target_has_memory_bswap(MemOp memop)
1101{
1102    return true;
1103}
1104
1105static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1106{
1107    g_assert_not_reached();
1108}
1109
1110static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1111{
1112    g_assert_not_reached();
1113}
1114