xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 961b80aecd1a503eedb885c309a1d5267d89c98c)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_rem_i32:
83    case INDEX_op_rem_i64:
84    case INDEX_op_remu_i32:
85    case INDEX_op_remu_i64:
86    case INDEX_op_shl_i32:
87    case INDEX_op_shl_i64:
88    case INDEX_op_shr_i32:
89    case INDEX_op_shr_i64:
90    case INDEX_op_sar_i32:
91    case INDEX_op_sar_i64:
92    case INDEX_op_rotl_i32:
93    case INDEX_op_rotl_i64:
94    case INDEX_op_rotr_i32:
95    case INDEX_op_rotr_i64:
96    case INDEX_op_setcond_i32:
97    case INDEX_op_setcond_i64:
98    case INDEX_op_deposit_i32:
99    case INDEX_op_deposit_i64:
100    case INDEX_op_clz_i32:
101    case INDEX_op_clz_i64:
102    case INDEX_op_ctz_i32:
103    case INDEX_op_ctz_i64:
104        return C_O1_I2(r, r, r);
105
106    case INDEX_op_brcond_i32:
107    case INDEX_op_brcond_i64:
108        return C_O0_I2(r, r);
109
110    case INDEX_op_add2_i32:
111    case INDEX_op_add2_i64:
112    case INDEX_op_sub2_i32:
113    case INDEX_op_sub2_i64:
114        return C_O2_I4(r, r, r, r, r, r);
115
116#if TCG_TARGET_REG_BITS == 32
117    case INDEX_op_brcond2_i32:
118        return C_O0_I4(r, r, r, r);
119#endif
120
121    case INDEX_op_mulu2_i32:
122    case INDEX_op_mulu2_i64:
123    case INDEX_op_muls2_i32:
124    case INDEX_op_muls2_i64:
125        return C_O2_I2(r, r, r, r);
126
127    case INDEX_op_movcond_i32:
128    case INDEX_op_movcond_i64:
129    case INDEX_op_setcond2_i32:
130        return C_O1_I4(r, r, r, r, r);
131
132    case INDEX_op_qemu_ld_i32:
133        return C_O1_I1(r, r);
134    case INDEX_op_qemu_ld_i64:
135        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
136    case INDEX_op_qemu_st_i32:
137        return C_O0_I2(r, r);
138    case INDEX_op_qemu_st_i64:
139        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
140
141    default:
142        return C_NotImplemented;
143    }
144}
145
146static const int tcg_target_reg_alloc_order[] = {
147    TCG_REG_R4,
148    TCG_REG_R5,
149    TCG_REG_R6,
150    TCG_REG_R7,
151    TCG_REG_R8,
152    TCG_REG_R9,
153    TCG_REG_R10,
154    TCG_REG_R11,
155    TCG_REG_R12,
156    TCG_REG_R13,
157    TCG_REG_R14,
158    TCG_REG_R15,
159    /* Either 2 or 4 of these are call clobbered, so use them last. */
160    TCG_REG_R3,
161    TCG_REG_R2,
162    TCG_REG_R1,
163    TCG_REG_R0,
164};
165
166/* No call arguments via registers.  All will be stored on the "stack". */
167static const int tcg_target_call_iarg_regs[] = { };
168
169static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
170{
171    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
172    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
173    return TCG_REG_R0 + slot;
174}
175
176#ifdef CONFIG_DEBUG_TCG
177static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
178    "r00",
179    "r01",
180    "r02",
181    "r03",
182    "r04",
183    "r05",
184    "r06",
185    "r07",
186    "r08",
187    "r09",
188    "r10",
189    "r11",
190    "r12",
191    "r13",
192    "r14",
193    "r15",
194};
195#endif
196
197static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
198                        intptr_t value, intptr_t addend)
199{
200    intptr_t diff = value - (intptr_t)(code_ptr + 1);
201
202    tcg_debug_assert(addend == 0);
203    tcg_debug_assert(type == 20);
204
205    if (diff == sextract32(diff, 0, type)) {
206        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
207        return true;
208    }
209    return false;
210}
211
212static void stack_bounds_check(TCGReg base, intptr_t offset)
213{
214    if (base == TCG_REG_CALL_STACK) {
215        tcg_debug_assert(offset >= 0);
216        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
217                                   TCG_STATIC_FRAME_SIZE));
218    }
219}
220
221static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
222{
223    tcg_insn_unit insn = 0;
224
225    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
226    insn = deposit32(insn, 0, 8, op);
227    tcg_out32(s, insn);
228}
229
230static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
231{
232    tcg_insn_unit insn = 0;
233    intptr_t diff;
234
235    /* Special case for exit_tb: map null -> 0. */
236    if (p0 == NULL) {
237        diff = 0;
238    } else {
239        diff = p0 - (void *)(s->code_ptr + 1);
240        tcg_debug_assert(diff != 0);
241        if (diff != sextract32(diff, 0, 20)) {
242            tcg_raise_tb_overflow(s);
243        }
244    }
245    insn = deposit32(insn, 0, 8, op);
246    insn = deposit32(insn, 12, 20, diff);
247    tcg_out32(s, insn);
248}
249
250static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
251{
252    tcg_insn_unit insn = 0;
253
254    insn = deposit32(insn, 0, 8, op);
255    insn = deposit32(insn, 8, 4, r0);
256    tcg_out32(s, insn);
257}
258
259static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
260{
261    tcg_out32(s, (uint8_t)op);
262}
263
264static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
265{
266    tcg_insn_unit insn = 0;
267
268    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
269    insn = deposit32(insn, 0, 8, op);
270    insn = deposit32(insn, 8, 4, r0);
271    insn = deposit32(insn, 12, 20, i1);
272    tcg_out32(s, insn);
273}
274
275static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
276{
277    tcg_insn_unit insn = 0;
278
279    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
280    insn = deposit32(insn, 0, 8, op);
281    insn = deposit32(insn, 8, 4, r0);
282    tcg_out32(s, insn);
283}
284
285static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
286{
287    tcg_insn_unit insn = 0;
288
289    insn = deposit32(insn, 0, 8, op);
290    insn = deposit32(insn, 8, 4, r0);
291    insn = deposit32(insn, 12, 4, r1);
292    tcg_out32(s, insn);
293}
294
295static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
296                           TCGReg r0, TCGReg r1, TCGArg m2)
297{
298    tcg_insn_unit insn = 0;
299
300    tcg_debug_assert(m2 == extract32(m2, 0, 16));
301    insn = deposit32(insn, 0, 8, op);
302    insn = deposit32(insn, 8, 4, r0);
303    insn = deposit32(insn, 12, 4, r1);
304    insn = deposit32(insn, 16, 16, m2);
305    tcg_out32(s, insn);
306}
307
308static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
309                           TCGReg r0, TCGReg r1, TCGReg r2)
310{
311    tcg_insn_unit insn = 0;
312
313    insn = deposit32(insn, 0, 8, op);
314    insn = deposit32(insn, 8, 4, r0);
315    insn = deposit32(insn, 12, 4, r1);
316    insn = deposit32(insn, 16, 4, r2);
317    tcg_out32(s, insn);
318}
319
320static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
321                           TCGReg r0, TCGReg r1, intptr_t i2)
322{
323    tcg_insn_unit insn = 0;
324
325    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
326    insn = deposit32(insn, 0, 8, op);
327    insn = deposit32(insn, 8, 4, r0);
328    insn = deposit32(insn, 12, 4, r1);
329    insn = deposit32(insn, 16, 16, i2);
330    tcg_out32(s, insn);
331}
332
333static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
334                            TCGReg r1, uint8_t b2, uint8_t b3)
335{
336    tcg_insn_unit insn = 0;
337
338    tcg_debug_assert(b2 == extract32(b2, 0, 6));
339    tcg_debug_assert(b3 == extract32(b3, 0, 6));
340    insn = deposit32(insn, 0, 8, op);
341    insn = deposit32(insn, 8, 4, r0);
342    insn = deposit32(insn, 12, 4, r1);
343    insn = deposit32(insn, 16, 6, b2);
344    insn = deposit32(insn, 22, 6, b3);
345    tcg_out32(s, insn);
346}
347
348static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
349                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
350{
351    tcg_insn_unit insn = 0;
352
353    insn = deposit32(insn, 0, 8, op);
354    insn = deposit32(insn, 8, 4, r0);
355    insn = deposit32(insn, 12, 4, r1);
356    insn = deposit32(insn, 16, 4, r2);
357    insn = deposit32(insn, 20, 4, c3);
358    tcg_out32(s, insn);
359}
360
361static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
362                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
363{
364    tcg_insn_unit insn = 0;
365
366    tcg_debug_assert(b3 == extract32(b3, 0, 6));
367    tcg_debug_assert(b4 == extract32(b4, 0, 6));
368    insn = deposit32(insn, 0, 8, op);
369    insn = deposit32(insn, 8, 4, r0);
370    insn = deposit32(insn, 12, 4, r1);
371    insn = deposit32(insn, 16, 4, r2);
372    insn = deposit32(insn, 20, 6, b3);
373    insn = deposit32(insn, 26, 6, b4);
374    tcg_out32(s, insn);
375}
376
377static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
378                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
379{
380    tcg_insn_unit insn = 0;
381
382    insn = deposit32(insn, 0, 8, op);
383    insn = deposit32(insn, 8, 4, r0);
384    insn = deposit32(insn, 12, 4, r1);
385    insn = deposit32(insn, 16, 4, r2);
386    insn = deposit32(insn, 20, 4, r3);
387    tcg_out32(s, insn);
388}
389
390static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
391                              TCGReg r0, TCGReg r1, TCGReg r2,
392                              TCGReg r3, TCGReg r4, TCGCond c5)
393{
394    tcg_insn_unit insn = 0;
395
396    insn = deposit32(insn, 0, 8, op);
397    insn = deposit32(insn, 8, 4, r0);
398    insn = deposit32(insn, 12, 4, r1);
399    insn = deposit32(insn, 16, 4, r2);
400    insn = deposit32(insn, 20, 4, r3);
401    insn = deposit32(insn, 24, 4, r4);
402    insn = deposit32(insn, 28, 4, c5);
403    tcg_out32(s, insn);
404}
405
406static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
407                              TCGReg r0, TCGReg r1, TCGReg r2,
408                              TCGReg r3, TCGReg r4, TCGReg r5)
409{
410    tcg_insn_unit insn = 0;
411
412    insn = deposit32(insn, 0, 8, op);
413    insn = deposit32(insn, 8, 4, r0);
414    insn = deposit32(insn, 12, 4, r1);
415    insn = deposit32(insn, 16, 4, r2);
416    insn = deposit32(insn, 20, 4, r3);
417    insn = deposit32(insn, 24, 4, r4);
418    insn = deposit32(insn, 28, 4, r5);
419    tcg_out32(s, insn);
420}
421
422static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
423                         TCGReg base, intptr_t offset)
424{
425    stack_bounds_check(base, offset);
426    if (offset != sextract32(offset, 0, 16)) {
427        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
428        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
429        base = TCG_REG_TMP;
430        offset = 0;
431    }
432    tcg_out_op_rrs(s, op, val, base, offset);
433}
434
435static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
436                       intptr_t offset)
437{
438    switch (type) {
439    case TCG_TYPE_I32:
440        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
441        break;
442#if TCG_TARGET_REG_BITS == 64
443    case TCG_TYPE_I64:
444        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
445        break;
446#endif
447    default:
448        g_assert_not_reached();
449    }
450}
451
452static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
453{
454    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
455    return true;
456}
457
458static void tcg_out_movi(TCGContext *s, TCGType type,
459                         TCGReg ret, tcg_target_long arg)
460{
461    switch (type) {
462    case TCG_TYPE_I32:
463#if TCG_TARGET_REG_BITS == 64
464        arg = (int32_t)arg;
465        /* fall through */
466    case TCG_TYPE_I64:
467#endif
468        break;
469    default:
470        g_assert_not_reached();
471    }
472
473    if (arg == sextract32(arg, 0, 20)) {
474        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
475    } else {
476        tcg_insn_unit insn = 0;
477
478        new_pool_label(s, arg, 20, s->code_ptr, 0);
479        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
480        insn = deposit32(insn, 8, 4, ret);
481        tcg_out32(s, insn);
482    }
483}
484
485static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
486                            TCGReg rs, unsigned pos, unsigned len)
487{
488    TCGOpcode opc = type == TCG_TYPE_I32 ?
489                    INDEX_op_extract_i32 :
490                    INDEX_op_extract_i64;
491    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
492}
493
494static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
495                             TCGReg rs, unsigned pos, unsigned len)
496{
497    TCGOpcode opc = type == TCG_TYPE_I32 ?
498                    INDEX_op_sextract_i32 :
499                    INDEX_op_sextract_i64;
500    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
501}
502
503static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
504{
505    tcg_out_sextract(s, type, rd, rs, 0, 8);
506}
507
508static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
509{
510    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
511}
512
513static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
514{
515    tcg_out_sextract(s, type, rd, rs, 0, 16);
516}
517
518static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
519{
520    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
521}
522
523static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
524{
525    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
526    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
527}
528
529static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
530{
531    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
532    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
533}
534
535static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
536{
537    tcg_out_ext32s(s, rd, rs);
538}
539
540static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
541{
542    tcg_out_ext32u(s, rd, rs);
543}
544
545static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
546{
547    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
548    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
549}
550
551static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
552{
553    return false;
554}
555
556static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
557                             tcg_target_long imm)
558{
559    /* This function is only used for passing structs by reference. */
560    g_assert_not_reached();
561}
562
563static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
564                         const TCGHelperInfo *info)
565{
566    ffi_cif *cif = info->cif;
567    tcg_insn_unit insn = 0;
568    uint8_t which;
569
570    if (cif->rtype == &ffi_type_void) {
571        which = 0;
572    } else {
573        tcg_debug_assert(cif->rtype->size == 4 ||
574                         cif->rtype->size == 8 ||
575                         cif->rtype->size == 16);
576        which = ctz32(cif->rtype->size) - 1;
577    }
578    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
579    insn = deposit32(insn, 0, 8, INDEX_op_call);
580    insn = deposit32(insn, 8, 4, which);
581    tcg_out32(s, insn);
582}
583
584#if TCG_TARGET_REG_BITS == 64
585# define CASE_32_64(x) \
586        case glue(glue(INDEX_op_, x), _i64): \
587        case glue(glue(INDEX_op_, x), _i32):
588# define CASE_64(x) \
589        case glue(glue(INDEX_op_, x), _i64):
590#else
591# define CASE_32_64(x) \
592        case glue(glue(INDEX_op_, x), _i32):
593# define CASE_64(x)
594#endif
595
596static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
597{
598    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
599}
600
601static void tcg_out_goto_tb(TCGContext *s, int which)
602{
603    /* indirect jump method. */
604    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
605    set_jmp_reset_offset(s, which);
606}
607
608void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
609                              uintptr_t jmp_rx, uintptr_t jmp_rw)
610{
611    /* Always indirect, nothing to do */
612}
613
614static void tgen_add(TCGContext *s, TCGType type,
615                     TCGReg a0, TCGReg a1, TCGReg a2)
616{
617    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
618}
619
620static const TCGOutOpBinary outop_add = {
621    .base.static_constraint = C_O1_I2(r, r, r),
622    .out_rrr = tgen_add,
623};
624
625static void tgen_and(TCGContext *s, TCGType type,
626                     TCGReg a0, TCGReg a1, TCGReg a2)
627{
628    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
629}
630
631static const TCGOutOpBinary outop_and = {
632    .base.static_constraint = C_O1_I2(r, r, r),
633    .out_rrr = tgen_and,
634};
635
636static void tgen_andc(TCGContext *s, TCGType type,
637                      TCGReg a0, TCGReg a1, TCGReg a2)
638{
639    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
640}
641
642static const TCGOutOpBinary outop_andc = {
643    .base.static_constraint = C_O1_I2(r, r, r),
644    .out_rrr = tgen_andc,
645};
646
647static void tgen_divs(TCGContext *s, TCGType type,
648                      TCGReg a0, TCGReg a1, TCGReg a2)
649{
650    TCGOpcode opc = (type == TCG_TYPE_I32
651                     ? INDEX_op_tci_divs32
652                     : INDEX_op_divs);
653    tcg_out_op_rrr(s, opc, a0, a1, a2);
654}
655
656static const TCGOutOpBinary outop_divs = {
657    .base.static_constraint = C_O1_I2(r, r, r),
658    .out_rrr = tgen_divs,
659};
660
661static void tgen_divu(TCGContext *s, TCGType type,
662                      TCGReg a0, TCGReg a1, TCGReg a2)
663{
664    TCGOpcode opc = (type == TCG_TYPE_I32
665                     ? INDEX_op_tci_divu32
666                     : INDEX_op_divu);
667    tcg_out_op_rrr(s, opc, a0, a1, a2);
668}
669
670static const TCGOutOpBinary outop_divu = {
671    .base.static_constraint = C_O1_I2(r, r, r),
672    .out_rrr = tgen_divu,
673};
674
675static void tgen_eqv(TCGContext *s, TCGType type,
676                     TCGReg a0, TCGReg a1, TCGReg a2)
677{
678    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
679}
680
681static const TCGOutOpBinary outop_eqv = {
682    .base.static_constraint = C_O1_I2(r, r, r),
683    .out_rrr = tgen_eqv,
684};
685
686static void tgen_mul(TCGContext *s, TCGType type,
687                     TCGReg a0, TCGReg a1, TCGReg a2)
688{
689    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
690}
691
692static const TCGOutOpBinary outop_mul = {
693    .base.static_constraint = C_O1_I2(r, r, r),
694    .out_rrr = tgen_mul,
695};
696
697static const TCGOutOpBinary outop_mulsh = {
698    .base.static_constraint = C_NotImplemented,
699};
700
701static const TCGOutOpBinary outop_muluh = {
702    .base.static_constraint = C_NotImplemented,
703};
704
705static void tgen_nand(TCGContext *s, TCGType type,
706                     TCGReg a0, TCGReg a1, TCGReg a2)
707{
708    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
709}
710
711static const TCGOutOpBinary outop_nand = {
712    .base.static_constraint = C_O1_I2(r, r, r),
713    .out_rrr = tgen_nand,
714};
715
716static void tgen_nor(TCGContext *s, TCGType type,
717                     TCGReg a0, TCGReg a1, TCGReg a2)
718{
719    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
720}
721
722static const TCGOutOpBinary outop_nor = {
723    .base.static_constraint = C_O1_I2(r, r, r),
724    .out_rrr = tgen_nor,
725};
726
727static void tgen_or(TCGContext *s, TCGType type,
728                     TCGReg a0, TCGReg a1, TCGReg a2)
729{
730    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
731}
732
733static const TCGOutOpBinary outop_or = {
734    .base.static_constraint = C_O1_I2(r, r, r),
735    .out_rrr = tgen_or,
736};
737
738static void tgen_orc(TCGContext *s, TCGType type,
739                     TCGReg a0, TCGReg a1, TCGReg a2)
740{
741    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
742}
743
744static const TCGOutOpBinary outop_orc = {
745    .base.static_constraint = C_O1_I2(r, r, r),
746    .out_rrr = tgen_orc,
747};
748
749static void tgen_sub(TCGContext *s, TCGType type,
750                     TCGReg a0, TCGReg a1, TCGReg a2)
751{
752    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
753}
754
755static const TCGOutOpSubtract outop_sub = {
756    .base.static_constraint = C_O1_I2(r, r, r),
757    .out_rrr = tgen_sub,
758};
759
760static void tgen_xor(TCGContext *s, TCGType type,
761                     TCGReg a0, TCGReg a1, TCGReg a2)
762{
763    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
764}
765
766static const TCGOutOpBinary outop_xor = {
767    .base.static_constraint = C_O1_I2(r, r, r),
768    .out_rrr = tgen_xor,
769};
770
771static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
772{
773    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
774}
775
776static const TCGOutOpUnary outop_neg = {
777    .base.static_constraint = C_O1_I1(r, r),
778    .out_rr = tgen_neg,
779};
780
781static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
782{
783    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
784}
785
786static const TCGOutOpUnary outop_not = {
787    .base.static_constraint = C_O1_I1(r, r),
788    .out_rr = tgen_not,
789};
790
791
792static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
793                       const TCGArg args[TCG_MAX_OP_ARGS],
794                       const int const_args[TCG_MAX_OP_ARGS])
795{
796    int width;
797
798    switch (opc) {
799    case INDEX_op_goto_ptr:
800        tcg_out_op_r(s, opc, args[0]);
801        break;
802
803    case INDEX_op_br:
804        tcg_out_op_l(s, opc, arg_label(args[0]));
805        break;
806
807    CASE_32_64(setcond)
808        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
809        break;
810
811    CASE_32_64(movcond)
812    case INDEX_op_setcond2_i32:
813        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
814                          args[3], args[4], args[5]);
815        break;
816
817    CASE_32_64(ld8u)
818    CASE_32_64(ld8s)
819    CASE_32_64(ld16u)
820    CASE_32_64(ld16s)
821    case INDEX_op_ld_i32:
822    CASE_64(ld32u)
823    CASE_64(ld32s)
824    CASE_64(ld)
825    CASE_32_64(st8)
826    CASE_32_64(st16)
827    case INDEX_op_st_i32:
828    CASE_64(st32)
829    CASE_64(st)
830        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
831        break;
832
833    CASE_32_64(shl)
834    CASE_32_64(shr)
835    CASE_32_64(sar)
836    CASE_32_64(rotl)     /* Optional (TCG_TARGET_HAS_rot_*). */
837    CASE_32_64(rotr)     /* Optional (TCG_TARGET_HAS_rot_*). */
838    CASE_32_64(rem)      /* Optional (TCG_TARGET_HAS_div_*). */
839    CASE_32_64(remu)     /* Optional (TCG_TARGET_HAS_div_*). */
840    CASE_32_64(clz)      /* Optional (TCG_TARGET_HAS_clz_*). */
841    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
842        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
843        break;
844
845    CASE_32_64(deposit)
846        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
847        break;
848
849    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
850    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
851        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
852        break;
853
854    CASE_32_64(brcond)
855        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
856                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
857                        TCG_REG_TMP, args[0], args[1], args[2]);
858        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
859        break;
860
861    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
862    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
863    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
864        tcg_out_op_rr(s, opc, args[0], args[1]);
865        break;
866
867    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
868    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
869        width = 16;
870        goto do_bswap;
871    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
872        width = 32;
873    do_bswap:
874        /* The base tci bswaps zero-extend, and ignore high bits. */
875        tcg_out_op_rr(s, opc, args[0], args[1]);
876        if (args[2] & TCG_BSWAP_OS) {
877            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
878        }
879        break;
880
881    CASE_32_64(add2)
882    CASE_32_64(sub2)
883        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
884                          args[3], args[4], args[5]);
885        break;
886
887#if TCG_TARGET_REG_BITS == 32
888    case INDEX_op_brcond2_i32:
889        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
890                          args[0], args[1], args[2], args[3], args[4]);
891        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
892        break;
893#endif
894
895    CASE_32_64(mulu2)
896    CASE_32_64(muls2)
897        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
898        break;
899
900    case INDEX_op_qemu_ld_i64:
901    case INDEX_op_qemu_st_i64:
902        if (TCG_TARGET_REG_BITS == 32) {
903            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
904            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
905            break;
906        }
907        /* fall through */
908    case INDEX_op_qemu_ld_i32:
909    case INDEX_op_qemu_st_i32:
910        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
911            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
912            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
913        } else {
914            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
915        }
916        break;
917
918    case INDEX_op_mb:
919        tcg_out_op_v(s, opc);
920        break;
921
922    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
923    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
924    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
925    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
926    case INDEX_op_extu_i32_i64:
927    case INDEX_op_extrl_i64_i32:
928    default:
929        g_assert_not_reached();
930    }
931}
932
933static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
934                       intptr_t offset)
935{
936    switch (type) {
937    case TCG_TYPE_I32:
938        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
939        break;
940#if TCG_TARGET_REG_BITS == 64
941    case TCG_TYPE_I64:
942        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
943        break;
944#endif
945    default:
946        g_assert_not_reached();
947    }
948}
949
950static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
951                               TCGReg base, intptr_t ofs)
952{
953    return false;
954}
955
956/* Test if a constant matches the constraint. */
957static bool tcg_target_const_match(int64_t val, int ct,
958                                   TCGType type, TCGCond cond, int vece)
959{
960    return ct & TCG_CT_CONST;
961}
962
963static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
964{
965    memset(p, 0, sizeof(*p) * count);
966}
967
968static void tcg_target_init(TCGContext *s)
969{
970    /* The current code uses uint8_t for tcg operations. */
971    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
972
973    /* Registers available for 32 bit operations. */
974    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
975    /* Registers available for 64 bit operations. */
976    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
977    /*
978     * The interpreter "registers" are in the local stack frame and
979     * cannot be clobbered by the called helper functions.  However,
980     * the interpreter assumes a 128-bit return value and assigns to
981     * the return value registers.
982     */
983    tcg_target_call_clobber_regs =
984        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
985
986    s->reserved_regs = 0;
987    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
988    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
989
990    /* The call arguments come first, followed by the temp storage. */
991    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
992                  TCG_STATIC_FRAME_SIZE);
993}
994
995/* Generate global QEMU prologue and epilogue code. */
996static inline void tcg_target_qemu_prologue(TCGContext *s)
997{
998}
999
1000static void tcg_out_tb_start(TCGContext *s)
1001{
1002    /* nothing to do */
1003}
1004
1005bool tcg_target_has_memory_bswap(MemOp memop)
1006{
1007    return true;
1008}
1009
1010static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1011{
1012    g_assert_not_reached();
1013}
1014
1015static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1016{
1017    g_assert_not_reached();
1018}
1019