xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 005a87e148dc20f59835b328336240759703d63d)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_setcond_i32:
83    case INDEX_op_setcond_i64:
84    case INDEX_op_deposit_i32:
85    case INDEX_op_deposit_i64:
86    case INDEX_op_clz_i32:
87    case INDEX_op_clz_i64:
88    case INDEX_op_ctz_i32:
89    case INDEX_op_ctz_i64:
90        return C_O1_I2(r, r, r);
91
92    case INDEX_op_brcond_i32:
93    case INDEX_op_brcond_i64:
94        return C_O0_I2(r, r);
95
96    case INDEX_op_add2_i32:
97    case INDEX_op_add2_i64:
98    case INDEX_op_sub2_i32:
99    case INDEX_op_sub2_i64:
100        return C_O2_I4(r, r, r, r, r, r);
101
102#if TCG_TARGET_REG_BITS == 32
103    case INDEX_op_brcond2_i32:
104        return C_O0_I4(r, r, r, r);
105#endif
106
107    case INDEX_op_mulu2_i32:
108    case INDEX_op_mulu2_i64:
109    case INDEX_op_muls2_i32:
110    case INDEX_op_muls2_i64:
111        return C_O2_I2(r, r, r, r);
112
113    case INDEX_op_movcond_i32:
114    case INDEX_op_movcond_i64:
115    case INDEX_op_setcond2_i32:
116        return C_O1_I4(r, r, r, r, r);
117
118    case INDEX_op_qemu_ld_i32:
119        return C_O1_I1(r, r);
120    case INDEX_op_qemu_ld_i64:
121        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
122    case INDEX_op_qemu_st_i32:
123        return C_O0_I2(r, r);
124    case INDEX_op_qemu_st_i64:
125        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
126
127    default:
128        return C_NotImplemented;
129    }
130}
131
132static const int tcg_target_reg_alloc_order[] = {
133    TCG_REG_R4,
134    TCG_REG_R5,
135    TCG_REG_R6,
136    TCG_REG_R7,
137    TCG_REG_R8,
138    TCG_REG_R9,
139    TCG_REG_R10,
140    TCG_REG_R11,
141    TCG_REG_R12,
142    TCG_REG_R13,
143    TCG_REG_R14,
144    TCG_REG_R15,
145    /* Either 2 or 4 of these are call clobbered, so use them last. */
146    TCG_REG_R3,
147    TCG_REG_R2,
148    TCG_REG_R1,
149    TCG_REG_R0,
150};
151
152/* No call arguments via registers.  All will be stored on the "stack". */
153static const int tcg_target_call_iarg_regs[] = { };
154
155static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
156{
157    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
158    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
159    return TCG_REG_R0 + slot;
160}
161
162#ifdef CONFIG_DEBUG_TCG
163static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
164    "r00",
165    "r01",
166    "r02",
167    "r03",
168    "r04",
169    "r05",
170    "r06",
171    "r07",
172    "r08",
173    "r09",
174    "r10",
175    "r11",
176    "r12",
177    "r13",
178    "r14",
179    "r15",
180};
181#endif
182
183static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
184                        intptr_t value, intptr_t addend)
185{
186    intptr_t diff = value - (intptr_t)(code_ptr + 1);
187
188    tcg_debug_assert(addend == 0);
189    tcg_debug_assert(type == 20);
190
191    if (diff == sextract32(diff, 0, type)) {
192        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
193        return true;
194    }
195    return false;
196}
197
198static void stack_bounds_check(TCGReg base, intptr_t offset)
199{
200    if (base == TCG_REG_CALL_STACK) {
201        tcg_debug_assert(offset >= 0);
202        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
203                                   TCG_STATIC_FRAME_SIZE));
204    }
205}
206
207static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
208{
209    tcg_insn_unit insn = 0;
210
211    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
212    insn = deposit32(insn, 0, 8, op);
213    tcg_out32(s, insn);
214}
215
216static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
217{
218    tcg_insn_unit insn = 0;
219    intptr_t diff;
220
221    /* Special case for exit_tb: map null -> 0. */
222    if (p0 == NULL) {
223        diff = 0;
224    } else {
225        diff = p0 - (void *)(s->code_ptr + 1);
226        tcg_debug_assert(diff != 0);
227        if (diff != sextract32(diff, 0, 20)) {
228            tcg_raise_tb_overflow(s);
229        }
230    }
231    insn = deposit32(insn, 0, 8, op);
232    insn = deposit32(insn, 12, 20, diff);
233    tcg_out32(s, insn);
234}
235
236static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
237{
238    tcg_insn_unit insn = 0;
239
240    insn = deposit32(insn, 0, 8, op);
241    insn = deposit32(insn, 8, 4, r0);
242    tcg_out32(s, insn);
243}
244
245static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
246{
247    tcg_out32(s, (uint8_t)op);
248}
249
250static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
251{
252    tcg_insn_unit insn = 0;
253
254    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
255    insn = deposit32(insn, 0, 8, op);
256    insn = deposit32(insn, 8, 4, r0);
257    insn = deposit32(insn, 12, 20, i1);
258    tcg_out32(s, insn);
259}
260
261static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
262{
263    tcg_insn_unit insn = 0;
264
265    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
266    insn = deposit32(insn, 0, 8, op);
267    insn = deposit32(insn, 8, 4, r0);
268    tcg_out32(s, insn);
269}
270
271static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
272{
273    tcg_insn_unit insn = 0;
274
275    insn = deposit32(insn, 0, 8, op);
276    insn = deposit32(insn, 8, 4, r0);
277    insn = deposit32(insn, 12, 4, r1);
278    tcg_out32(s, insn);
279}
280
281static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
282                           TCGReg r0, TCGReg r1, TCGArg m2)
283{
284    tcg_insn_unit insn = 0;
285
286    tcg_debug_assert(m2 == extract32(m2, 0, 16));
287    insn = deposit32(insn, 0, 8, op);
288    insn = deposit32(insn, 8, 4, r0);
289    insn = deposit32(insn, 12, 4, r1);
290    insn = deposit32(insn, 16, 16, m2);
291    tcg_out32(s, insn);
292}
293
294static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
295                           TCGReg r0, TCGReg r1, TCGReg r2)
296{
297    tcg_insn_unit insn = 0;
298
299    insn = deposit32(insn, 0, 8, op);
300    insn = deposit32(insn, 8, 4, r0);
301    insn = deposit32(insn, 12, 4, r1);
302    insn = deposit32(insn, 16, 4, r2);
303    tcg_out32(s, insn);
304}
305
306static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
307                           TCGReg r0, TCGReg r1, intptr_t i2)
308{
309    tcg_insn_unit insn = 0;
310
311    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
312    insn = deposit32(insn, 0, 8, op);
313    insn = deposit32(insn, 8, 4, r0);
314    insn = deposit32(insn, 12, 4, r1);
315    insn = deposit32(insn, 16, 16, i2);
316    tcg_out32(s, insn);
317}
318
319static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
320                            TCGReg r1, uint8_t b2, uint8_t b3)
321{
322    tcg_insn_unit insn = 0;
323
324    tcg_debug_assert(b2 == extract32(b2, 0, 6));
325    tcg_debug_assert(b3 == extract32(b3, 0, 6));
326    insn = deposit32(insn, 0, 8, op);
327    insn = deposit32(insn, 8, 4, r0);
328    insn = deposit32(insn, 12, 4, r1);
329    insn = deposit32(insn, 16, 6, b2);
330    insn = deposit32(insn, 22, 6, b3);
331    tcg_out32(s, insn);
332}
333
334static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
335                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
336{
337    tcg_insn_unit insn = 0;
338
339    insn = deposit32(insn, 0, 8, op);
340    insn = deposit32(insn, 8, 4, r0);
341    insn = deposit32(insn, 12, 4, r1);
342    insn = deposit32(insn, 16, 4, r2);
343    insn = deposit32(insn, 20, 4, c3);
344    tcg_out32(s, insn);
345}
346
347static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
348                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
349{
350    tcg_insn_unit insn = 0;
351
352    tcg_debug_assert(b3 == extract32(b3, 0, 6));
353    tcg_debug_assert(b4 == extract32(b4, 0, 6));
354    insn = deposit32(insn, 0, 8, op);
355    insn = deposit32(insn, 8, 4, r0);
356    insn = deposit32(insn, 12, 4, r1);
357    insn = deposit32(insn, 16, 4, r2);
358    insn = deposit32(insn, 20, 6, b3);
359    insn = deposit32(insn, 26, 6, b4);
360    tcg_out32(s, insn);
361}
362
363static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
364                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
365{
366    tcg_insn_unit insn = 0;
367
368    insn = deposit32(insn, 0, 8, op);
369    insn = deposit32(insn, 8, 4, r0);
370    insn = deposit32(insn, 12, 4, r1);
371    insn = deposit32(insn, 16, 4, r2);
372    insn = deposit32(insn, 20, 4, r3);
373    tcg_out32(s, insn);
374}
375
376static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
377                              TCGReg r0, TCGReg r1, TCGReg r2,
378                              TCGReg r3, TCGReg r4, TCGCond c5)
379{
380    tcg_insn_unit insn = 0;
381
382    insn = deposit32(insn, 0, 8, op);
383    insn = deposit32(insn, 8, 4, r0);
384    insn = deposit32(insn, 12, 4, r1);
385    insn = deposit32(insn, 16, 4, r2);
386    insn = deposit32(insn, 20, 4, r3);
387    insn = deposit32(insn, 24, 4, r4);
388    insn = deposit32(insn, 28, 4, c5);
389    tcg_out32(s, insn);
390}
391
392static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
393                              TCGReg r0, TCGReg r1, TCGReg r2,
394                              TCGReg r3, TCGReg r4, TCGReg r5)
395{
396    tcg_insn_unit insn = 0;
397
398    insn = deposit32(insn, 0, 8, op);
399    insn = deposit32(insn, 8, 4, r0);
400    insn = deposit32(insn, 12, 4, r1);
401    insn = deposit32(insn, 16, 4, r2);
402    insn = deposit32(insn, 20, 4, r3);
403    insn = deposit32(insn, 24, 4, r4);
404    insn = deposit32(insn, 28, 4, r5);
405    tcg_out32(s, insn);
406}
407
408static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
409                         TCGReg base, intptr_t offset)
410{
411    stack_bounds_check(base, offset);
412    if (offset != sextract32(offset, 0, 16)) {
413        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
414        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
415        base = TCG_REG_TMP;
416        offset = 0;
417    }
418    tcg_out_op_rrs(s, op, val, base, offset);
419}
420
421static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
422                       intptr_t offset)
423{
424    switch (type) {
425    case TCG_TYPE_I32:
426        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
427        break;
428#if TCG_TARGET_REG_BITS == 64
429    case TCG_TYPE_I64:
430        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
431        break;
432#endif
433    default:
434        g_assert_not_reached();
435    }
436}
437
438static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
439{
440    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
441    return true;
442}
443
444static void tcg_out_movi(TCGContext *s, TCGType type,
445                         TCGReg ret, tcg_target_long arg)
446{
447    switch (type) {
448    case TCG_TYPE_I32:
449#if TCG_TARGET_REG_BITS == 64
450        arg = (int32_t)arg;
451        /* fall through */
452    case TCG_TYPE_I64:
453#endif
454        break;
455    default:
456        g_assert_not_reached();
457    }
458
459    if (arg == sextract32(arg, 0, 20)) {
460        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
461    } else {
462        tcg_insn_unit insn = 0;
463
464        new_pool_label(s, arg, 20, s->code_ptr, 0);
465        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
466        insn = deposit32(insn, 8, 4, ret);
467        tcg_out32(s, insn);
468    }
469}
470
471static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
472                            TCGReg rs, unsigned pos, unsigned len)
473{
474    TCGOpcode opc = type == TCG_TYPE_I32 ?
475                    INDEX_op_extract_i32 :
476                    INDEX_op_extract_i64;
477    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
478}
479
480static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
481                             TCGReg rs, unsigned pos, unsigned len)
482{
483    TCGOpcode opc = type == TCG_TYPE_I32 ?
484                    INDEX_op_sextract_i32 :
485                    INDEX_op_sextract_i64;
486    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
487}
488
489static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
490{
491    tcg_out_sextract(s, type, rd, rs, 0, 8);
492}
493
494static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
495{
496    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
497}
498
499static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
500{
501    tcg_out_sextract(s, type, rd, rs, 0, 16);
502}
503
504static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
505{
506    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
507}
508
509static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
510{
511    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
512    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
513}
514
515static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
516{
517    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
518    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
519}
520
521static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
522{
523    tcg_out_ext32s(s, rd, rs);
524}
525
526static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
527{
528    tcg_out_ext32u(s, rd, rs);
529}
530
531static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
532{
533    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
534    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
535}
536
537static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
538{
539    return false;
540}
541
542static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
543                             tcg_target_long imm)
544{
545    /* This function is only used for passing structs by reference. */
546    g_assert_not_reached();
547}
548
549static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
550                         const TCGHelperInfo *info)
551{
552    ffi_cif *cif = info->cif;
553    tcg_insn_unit insn = 0;
554    uint8_t which;
555
556    if (cif->rtype == &ffi_type_void) {
557        which = 0;
558    } else {
559        tcg_debug_assert(cif->rtype->size == 4 ||
560                         cif->rtype->size == 8 ||
561                         cif->rtype->size == 16);
562        which = ctz32(cif->rtype->size) - 1;
563    }
564    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
565    insn = deposit32(insn, 0, 8, INDEX_op_call);
566    insn = deposit32(insn, 8, 4, which);
567    tcg_out32(s, insn);
568}
569
570#if TCG_TARGET_REG_BITS == 64
571# define CASE_32_64(x) \
572        case glue(glue(INDEX_op_, x), _i64): \
573        case glue(glue(INDEX_op_, x), _i32):
574# define CASE_64(x) \
575        case glue(glue(INDEX_op_, x), _i64):
576#else
577# define CASE_32_64(x) \
578        case glue(glue(INDEX_op_, x), _i32):
579# define CASE_64(x)
580#endif
581
582static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
583{
584    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
585}
586
587static void tcg_out_goto_tb(TCGContext *s, int which)
588{
589    /* indirect jump method. */
590    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
591    set_jmp_reset_offset(s, which);
592}
593
594void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
595                              uintptr_t jmp_rx, uintptr_t jmp_rw)
596{
597    /* Always indirect, nothing to do */
598}
599
600static void tgen_add(TCGContext *s, TCGType type,
601                     TCGReg a0, TCGReg a1, TCGReg a2)
602{
603    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
604}
605
606static const TCGOutOpBinary outop_add = {
607    .base.static_constraint = C_O1_I2(r, r, r),
608    .out_rrr = tgen_add,
609};
610
611static void tgen_and(TCGContext *s, TCGType type,
612                     TCGReg a0, TCGReg a1, TCGReg a2)
613{
614    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
615}
616
617static const TCGOutOpBinary outop_and = {
618    .base.static_constraint = C_O1_I2(r, r, r),
619    .out_rrr = tgen_and,
620};
621
622static void tgen_andc(TCGContext *s, TCGType type,
623                      TCGReg a0, TCGReg a1, TCGReg a2)
624{
625    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
626}
627
628static const TCGOutOpBinary outop_andc = {
629    .base.static_constraint = C_O1_I2(r, r, r),
630    .out_rrr = tgen_andc,
631};
632
633static void tgen_divs(TCGContext *s, TCGType type,
634                      TCGReg a0, TCGReg a1, TCGReg a2)
635{
636    TCGOpcode opc = (type == TCG_TYPE_I32
637                     ? INDEX_op_tci_divs32
638                     : INDEX_op_divs);
639    tcg_out_op_rrr(s, opc, a0, a1, a2);
640}
641
642static const TCGOutOpBinary outop_divs = {
643    .base.static_constraint = C_O1_I2(r, r, r),
644    .out_rrr = tgen_divs,
645};
646
647static const TCGOutOpDivRem outop_divs2 = {
648    .base.static_constraint = C_NotImplemented,
649};
650
651static void tgen_divu(TCGContext *s, TCGType type,
652                      TCGReg a0, TCGReg a1, TCGReg a2)
653{
654    TCGOpcode opc = (type == TCG_TYPE_I32
655                     ? INDEX_op_tci_divu32
656                     : INDEX_op_divu);
657    tcg_out_op_rrr(s, opc, a0, a1, a2);
658}
659
660static const TCGOutOpBinary outop_divu = {
661    .base.static_constraint = C_O1_I2(r, r, r),
662    .out_rrr = tgen_divu,
663};
664
665static const TCGOutOpDivRem outop_divu2 = {
666    .base.static_constraint = C_NotImplemented,
667};
668
669static void tgen_eqv(TCGContext *s, TCGType type,
670                     TCGReg a0, TCGReg a1, TCGReg a2)
671{
672    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
673}
674
675static const TCGOutOpBinary outop_eqv = {
676    .base.static_constraint = C_O1_I2(r, r, r),
677    .out_rrr = tgen_eqv,
678};
679
680static void tgen_mul(TCGContext *s, TCGType type,
681                     TCGReg a0, TCGReg a1, TCGReg a2)
682{
683    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
684}
685
686static const TCGOutOpBinary outop_mul = {
687    .base.static_constraint = C_O1_I2(r, r, r),
688    .out_rrr = tgen_mul,
689};
690
691static const TCGOutOpBinary outop_mulsh = {
692    .base.static_constraint = C_NotImplemented,
693};
694
695static const TCGOutOpBinary outop_muluh = {
696    .base.static_constraint = C_NotImplemented,
697};
698
699static void tgen_nand(TCGContext *s, TCGType type,
700                     TCGReg a0, TCGReg a1, TCGReg a2)
701{
702    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
703}
704
705static const TCGOutOpBinary outop_nand = {
706    .base.static_constraint = C_O1_I2(r, r, r),
707    .out_rrr = tgen_nand,
708};
709
710static void tgen_nor(TCGContext *s, TCGType type,
711                     TCGReg a0, TCGReg a1, TCGReg a2)
712{
713    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
714}
715
716static const TCGOutOpBinary outop_nor = {
717    .base.static_constraint = C_O1_I2(r, r, r),
718    .out_rrr = tgen_nor,
719};
720
721static void tgen_or(TCGContext *s, TCGType type,
722                     TCGReg a0, TCGReg a1, TCGReg a2)
723{
724    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
725}
726
727static const TCGOutOpBinary outop_or = {
728    .base.static_constraint = C_O1_I2(r, r, r),
729    .out_rrr = tgen_or,
730};
731
732static void tgen_orc(TCGContext *s, TCGType type,
733                     TCGReg a0, TCGReg a1, TCGReg a2)
734{
735    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
736}
737
738static const TCGOutOpBinary outop_orc = {
739    .base.static_constraint = C_O1_I2(r, r, r),
740    .out_rrr = tgen_orc,
741};
742
743static void tgen_rems(TCGContext *s, TCGType type,
744                      TCGReg a0, TCGReg a1, TCGReg a2)
745{
746    TCGOpcode opc = (type == TCG_TYPE_I32
747                     ? INDEX_op_tci_rems32
748                     : INDEX_op_rems);
749    tcg_out_op_rrr(s, opc, a0, a1, a2);
750}
751
752static const TCGOutOpBinary outop_rems = {
753    .base.static_constraint = C_O1_I2(r, r, r),
754    .out_rrr = tgen_rems,
755};
756
757static void tgen_remu(TCGContext *s, TCGType type,
758                      TCGReg a0, TCGReg a1, TCGReg a2)
759{
760    TCGOpcode opc = (type == TCG_TYPE_I32
761                     ? INDEX_op_tci_remu32
762                     : INDEX_op_remu);
763    tcg_out_op_rrr(s, opc, a0, a1, a2);
764}
765
766static const TCGOutOpBinary outop_remu = {
767    .base.static_constraint = C_O1_I2(r, r, r),
768    .out_rrr = tgen_remu,
769};
770
771static void tgen_rotl(TCGContext *s, TCGType type,
772                     TCGReg a0, TCGReg a1, TCGReg a2)
773{
774    TCGOpcode opc = (type == TCG_TYPE_I32
775                     ? INDEX_op_tci_rotl32
776                     : INDEX_op_rotl);
777    tcg_out_op_rrr(s, opc, a0, a1, a2);
778}
779
780static const TCGOutOpBinary outop_rotl = {
781    .base.static_constraint = C_O1_I2(r, r, r),
782    .out_rrr = tgen_rotl,
783};
784
785static void tgen_rotr(TCGContext *s, TCGType type,
786                     TCGReg a0, TCGReg a1, TCGReg a2)
787{
788    TCGOpcode opc = (type == TCG_TYPE_I32
789                     ? INDEX_op_tci_rotr32
790                     : INDEX_op_rotr);
791    tcg_out_op_rrr(s, opc, a0, a1, a2);
792}
793
794static const TCGOutOpBinary outop_rotr = {
795    .base.static_constraint = C_O1_I2(r, r, r),
796    .out_rrr = tgen_rotr,
797};
798
799static void tgen_sar(TCGContext *s, TCGType type,
800                     TCGReg a0, TCGReg a1, TCGReg a2)
801{
802    if (type < TCG_TYPE_REG) {
803        tcg_out_ext32s(s, TCG_REG_TMP, a1);
804        a1 = TCG_REG_TMP;
805    }
806    tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
807}
808
809static const TCGOutOpBinary outop_sar = {
810    .base.static_constraint = C_O1_I2(r, r, r),
811    .out_rrr = tgen_sar,
812};
813
814static void tgen_shl(TCGContext *s, TCGType type,
815                     TCGReg a0, TCGReg a1, TCGReg a2)
816{
817    tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
818}
819
820static const TCGOutOpBinary outop_shl = {
821    .base.static_constraint = C_O1_I2(r, r, r),
822    .out_rrr = tgen_shl,
823};
824
825static void tgen_shr(TCGContext *s, TCGType type,
826                     TCGReg a0, TCGReg a1, TCGReg a2)
827{
828    if (type < TCG_TYPE_REG) {
829        tcg_out_ext32u(s, TCG_REG_TMP, a1);
830        a1 = TCG_REG_TMP;
831    }
832    tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
833}
834
835static const TCGOutOpBinary outop_shr = {
836    .base.static_constraint = C_O1_I2(r, r, r),
837    .out_rrr = tgen_shr,
838};
839
840static void tgen_sub(TCGContext *s, TCGType type,
841                     TCGReg a0, TCGReg a1, TCGReg a2)
842{
843    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
844}
845
846static const TCGOutOpSubtract outop_sub = {
847    .base.static_constraint = C_O1_I2(r, r, r),
848    .out_rrr = tgen_sub,
849};
850
851static void tgen_xor(TCGContext *s, TCGType type,
852                     TCGReg a0, TCGReg a1, TCGReg a2)
853{
854    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
855}
856
857static const TCGOutOpBinary outop_xor = {
858    .base.static_constraint = C_O1_I2(r, r, r),
859    .out_rrr = tgen_xor,
860};
861
862static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
863{
864    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
865}
866
867static const TCGOutOpUnary outop_neg = {
868    .base.static_constraint = C_O1_I1(r, r),
869    .out_rr = tgen_neg,
870};
871
872static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
873{
874    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
875}
876
877static const TCGOutOpUnary outop_not = {
878    .base.static_constraint = C_O1_I1(r, r),
879    .out_rr = tgen_not,
880};
881
882
883static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
884                       const TCGArg args[TCG_MAX_OP_ARGS],
885                       const int const_args[TCG_MAX_OP_ARGS])
886{
887    int width;
888
889    switch (opc) {
890    case INDEX_op_goto_ptr:
891        tcg_out_op_r(s, opc, args[0]);
892        break;
893
894    case INDEX_op_br:
895        tcg_out_op_l(s, opc, arg_label(args[0]));
896        break;
897
898    CASE_32_64(setcond)
899        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
900        break;
901
902    CASE_32_64(movcond)
903    case INDEX_op_setcond2_i32:
904        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
905                          args[3], args[4], args[5]);
906        break;
907
908    CASE_32_64(ld8u)
909    CASE_32_64(ld8s)
910    CASE_32_64(ld16u)
911    CASE_32_64(ld16s)
912    case INDEX_op_ld_i32:
913    CASE_64(ld32u)
914    CASE_64(ld32s)
915    CASE_64(ld)
916    CASE_32_64(st8)
917    CASE_32_64(st16)
918    case INDEX_op_st_i32:
919    CASE_64(st32)
920    CASE_64(st)
921        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
922        break;
923
924    CASE_32_64(clz)      /* Optional (TCG_TARGET_HAS_clz_*). */
925    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
926        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
927        break;
928
929    CASE_32_64(deposit)
930        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
931        break;
932
933    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
934    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
935        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
936        break;
937
938    CASE_32_64(brcond)
939        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
940                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
941                        TCG_REG_TMP, args[0], args[1], args[2]);
942        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
943        break;
944
945    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
946    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
947    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
948        tcg_out_op_rr(s, opc, args[0], args[1]);
949        break;
950
951    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
952    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
953        width = 16;
954        goto do_bswap;
955    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
956        width = 32;
957    do_bswap:
958        /* The base tci bswaps zero-extend, and ignore high bits. */
959        tcg_out_op_rr(s, opc, args[0], args[1]);
960        if (args[2] & TCG_BSWAP_OS) {
961            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
962        }
963        break;
964
965    CASE_32_64(add2)
966    CASE_32_64(sub2)
967        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
968                          args[3], args[4], args[5]);
969        break;
970
971#if TCG_TARGET_REG_BITS == 32
972    case INDEX_op_brcond2_i32:
973        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
974                          args[0], args[1], args[2], args[3], args[4]);
975        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
976        break;
977#endif
978
979    CASE_32_64(mulu2)
980    CASE_32_64(muls2)
981        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
982        break;
983
984    case INDEX_op_qemu_ld_i64:
985    case INDEX_op_qemu_st_i64:
986        if (TCG_TARGET_REG_BITS == 32) {
987            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
988            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
989            break;
990        }
991        /* fall through */
992    case INDEX_op_qemu_ld_i32:
993    case INDEX_op_qemu_st_i32:
994        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
995            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
996            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
997        } else {
998            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
999        }
1000        break;
1001
1002    case INDEX_op_mb:
1003        tcg_out_op_v(s, opc);
1004        break;
1005
1006    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1007    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1008    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1009    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
1010    case INDEX_op_extu_i32_i64:
1011    case INDEX_op_extrl_i64_i32:
1012    default:
1013        g_assert_not_reached();
1014    }
1015}
1016
1017static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
1018                       intptr_t offset)
1019{
1020    switch (type) {
1021    case TCG_TYPE_I32:
1022        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
1023        break;
1024#if TCG_TARGET_REG_BITS == 64
1025    case TCG_TYPE_I64:
1026        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
1027        break;
1028#endif
1029    default:
1030        g_assert_not_reached();
1031    }
1032}
1033
1034static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1035                               TCGReg base, intptr_t ofs)
1036{
1037    return false;
1038}
1039
1040/* Test if a constant matches the constraint. */
1041static bool tcg_target_const_match(int64_t val, int ct,
1042                                   TCGType type, TCGCond cond, int vece)
1043{
1044    return ct & TCG_CT_CONST;
1045}
1046
1047static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1048{
1049    memset(p, 0, sizeof(*p) * count);
1050}
1051
1052static void tcg_target_init(TCGContext *s)
1053{
1054    /* The current code uses uint8_t for tcg operations. */
1055    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
1056
1057    /* Registers available for 32 bit operations. */
1058    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
1059    /* Registers available for 64 bit operations. */
1060    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
1061    /*
1062     * The interpreter "registers" are in the local stack frame and
1063     * cannot be clobbered by the called helper functions.  However,
1064     * the interpreter assumes a 128-bit return value and assigns to
1065     * the return value registers.
1066     */
1067    tcg_target_call_clobber_regs =
1068        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1069
1070    s->reserved_regs = 0;
1071    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1072    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1073
1074    /* The call arguments come first, followed by the temp storage. */
1075    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1076                  TCG_STATIC_FRAME_SIZE);
1077}
1078
1079/* Generate global QEMU prologue and epilogue code. */
1080static inline void tcg_target_qemu_prologue(TCGContext *s)
1081{
1082}
1083
1084static void tcg_out_tb_start(TCGContext *s)
1085{
1086    /* nothing to do */
1087}
1088
1089bool tcg_target_has_memory_bswap(MemOp memop)
1090{
1091    return true;
1092}
1093
1094static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1095{
1096    g_assert_not_reached();
1097}
1098
1099static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1100{
1101    g_assert_not_reached();
1102}
1103