xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 8109598b683ad2b6b02cd9c79dc15b7fc0b685aa)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_rem_i32:
83    case INDEX_op_rem_i64:
84    case INDEX_op_remu_i32:
85    case INDEX_op_remu_i64:
86    case INDEX_op_shl_i32:
87    case INDEX_op_shl_i64:
88    case INDEX_op_shr_i32:
89    case INDEX_op_shr_i64:
90    case INDEX_op_sar_i32:
91    case INDEX_op_sar_i64:
92    case INDEX_op_rotl_i32:
93    case INDEX_op_rotl_i64:
94    case INDEX_op_rotr_i32:
95    case INDEX_op_rotr_i64:
96    case INDEX_op_setcond_i32:
97    case INDEX_op_setcond_i64:
98    case INDEX_op_deposit_i32:
99    case INDEX_op_deposit_i64:
100    case INDEX_op_clz_i32:
101    case INDEX_op_clz_i64:
102    case INDEX_op_ctz_i32:
103    case INDEX_op_ctz_i64:
104        return C_O1_I2(r, r, r);
105
106    case INDEX_op_brcond_i32:
107    case INDEX_op_brcond_i64:
108        return C_O0_I2(r, r);
109
110    case INDEX_op_add2_i32:
111    case INDEX_op_add2_i64:
112    case INDEX_op_sub2_i32:
113    case INDEX_op_sub2_i64:
114        return C_O2_I4(r, r, r, r, r, r);
115
116#if TCG_TARGET_REG_BITS == 32
117    case INDEX_op_brcond2_i32:
118        return C_O0_I4(r, r, r, r);
119#endif
120
121    case INDEX_op_mulu2_i32:
122    case INDEX_op_mulu2_i64:
123    case INDEX_op_muls2_i32:
124    case INDEX_op_muls2_i64:
125        return C_O2_I2(r, r, r, r);
126
127    case INDEX_op_movcond_i32:
128    case INDEX_op_movcond_i64:
129    case INDEX_op_setcond2_i32:
130        return C_O1_I4(r, r, r, r, r);
131
132    case INDEX_op_qemu_ld_i32:
133        return C_O1_I1(r, r);
134    case INDEX_op_qemu_ld_i64:
135        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
136    case INDEX_op_qemu_st_i32:
137        return C_O0_I2(r, r);
138    case INDEX_op_qemu_st_i64:
139        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
140
141    default:
142        return C_NotImplemented;
143    }
144}
145
146static const int tcg_target_reg_alloc_order[] = {
147    TCG_REG_R4,
148    TCG_REG_R5,
149    TCG_REG_R6,
150    TCG_REG_R7,
151    TCG_REG_R8,
152    TCG_REG_R9,
153    TCG_REG_R10,
154    TCG_REG_R11,
155    TCG_REG_R12,
156    TCG_REG_R13,
157    TCG_REG_R14,
158    TCG_REG_R15,
159    /* Either 2 or 4 of these are call clobbered, so use them last. */
160    TCG_REG_R3,
161    TCG_REG_R2,
162    TCG_REG_R1,
163    TCG_REG_R0,
164};
165
166/* No call arguments via registers.  All will be stored on the "stack". */
167static const int tcg_target_call_iarg_regs[] = { };
168
169static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
170{
171    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
172    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
173    return TCG_REG_R0 + slot;
174}
175
176#ifdef CONFIG_DEBUG_TCG
177static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
178    "r00",
179    "r01",
180    "r02",
181    "r03",
182    "r04",
183    "r05",
184    "r06",
185    "r07",
186    "r08",
187    "r09",
188    "r10",
189    "r11",
190    "r12",
191    "r13",
192    "r14",
193    "r15",
194};
195#endif
196
197static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
198                        intptr_t value, intptr_t addend)
199{
200    intptr_t diff = value - (intptr_t)(code_ptr + 1);
201
202    tcg_debug_assert(addend == 0);
203    tcg_debug_assert(type == 20);
204
205    if (diff == sextract32(diff, 0, type)) {
206        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
207        return true;
208    }
209    return false;
210}
211
212static void stack_bounds_check(TCGReg base, intptr_t offset)
213{
214    if (base == TCG_REG_CALL_STACK) {
215        tcg_debug_assert(offset >= 0);
216        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
217                                   TCG_STATIC_FRAME_SIZE));
218    }
219}
220
221static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
222{
223    tcg_insn_unit insn = 0;
224
225    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
226    insn = deposit32(insn, 0, 8, op);
227    tcg_out32(s, insn);
228}
229
230static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
231{
232    tcg_insn_unit insn = 0;
233    intptr_t diff;
234
235    /* Special case for exit_tb: map null -> 0. */
236    if (p0 == NULL) {
237        diff = 0;
238    } else {
239        diff = p0 - (void *)(s->code_ptr + 1);
240        tcg_debug_assert(diff != 0);
241        if (diff != sextract32(diff, 0, 20)) {
242            tcg_raise_tb_overflow(s);
243        }
244    }
245    insn = deposit32(insn, 0, 8, op);
246    insn = deposit32(insn, 12, 20, diff);
247    tcg_out32(s, insn);
248}
249
250static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
251{
252    tcg_insn_unit insn = 0;
253
254    insn = deposit32(insn, 0, 8, op);
255    insn = deposit32(insn, 8, 4, r0);
256    tcg_out32(s, insn);
257}
258
259static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
260{
261    tcg_out32(s, (uint8_t)op);
262}
263
264static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
265{
266    tcg_insn_unit insn = 0;
267
268    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
269    insn = deposit32(insn, 0, 8, op);
270    insn = deposit32(insn, 8, 4, r0);
271    insn = deposit32(insn, 12, 20, i1);
272    tcg_out32(s, insn);
273}
274
275static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
276{
277    tcg_insn_unit insn = 0;
278
279    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
280    insn = deposit32(insn, 0, 8, op);
281    insn = deposit32(insn, 8, 4, r0);
282    tcg_out32(s, insn);
283}
284
285static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
286{
287    tcg_insn_unit insn = 0;
288
289    insn = deposit32(insn, 0, 8, op);
290    insn = deposit32(insn, 8, 4, r0);
291    insn = deposit32(insn, 12, 4, r1);
292    tcg_out32(s, insn);
293}
294
295static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
296                           TCGReg r0, TCGReg r1, TCGArg m2)
297{
298    tcg_insn_unit insn = 0;
299
300    tcg_debug_assert(m2 == extract32(m2, 0, 16));
301    insn = deposit32(insn, 0, 8, op);
302    insn = deposit32(insn, 8, 4, r0);
303    insn = deposit32(insn, 12, 4, r1);
304    insn = deposit32(insn, 16, 16, m2);
305    tcg_out32(s, insn);
306}
307
308static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
309                           TCGReg r0, TCGReg r1, TCGReg r2)
310{
311    tcg_insn_unit insn = 0;
312
313    insn = deposit32(insn, 0, 8, op);
314    insn = deposit32(insn, 8, 4, r0);
315    insn = deposit32(insn, 12, 4, r1);
316    insn = deposit32(insn, 16, 4, r2);
317    tcg_out32(s, insn);
318}
319
320static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
321                           TCGReg r0, TCGReg r1, intptr_t i2)
322{
323    tcg_insn_unit insn = 0;
324
325    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
326    insn = deposit32(insn, 0, 8, op);
327    insn = deposit32(insn, 8, 4, r0);
328    insn = deposit32(insn, 12, 4, r1);
329    insn = deposit32(insn, 16, 16, i2);
330    tcg_out32(s, insn);
331}
332
333static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
334                            TCGReg r1, uint8_t b2, uint8_t b3)
335{
336    tcg_insn_unit insn = 0;
337
338    tcg_debug_assert(b2 == extract32(b2, 0, 6));
339    tcg_debug_assert(b3 == extract32(b3, 0, 6));
340    insn = deposit32(insn, 0, 8, op);
341    insn = deposit32(insn, 8, 4, r0);
342    insn = deposit32(insn, 12, 4, r1);
343    insn = deposit32(insn, 16, 6, b2);
344    insn = deposit32(insn, 22, 6, b3);
345    tcg_out32(s, insn);
346}
347
348static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
349                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
350{
351    tcg_insn_unit insn = 0;
352
353    insn = deposit32(insn, 0, 8, op);
354    insn = deposit32(insn, 8, 4, r0);
355    insn = deposit32(insn, 12, 4, r1);
356    insn = deposit32(insn, 16, 4, r2);
357    insn = deposit32(insn, 20, 4, c3);
358    tcg_out32(s, insn);
359}
360
361static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
362                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
363{
364    tcg_insn_unit insn = 0;
365
366    tcg_debug_assert(b3 == extract32(b3, 0, 6));
367    tcg_debug_assert(b4 == extract32(b4, 0, 6));
368    insn = deposit32(insn, 0, 8, op);
369    insn = deposit32(insn, 8, 4, r0);
370    insn = deposit32(insn, 12, 4, r1);
371    insn = deposit32(insn, 16, 4, r2);
372    insn = deposit32(insn, 20, 6, b3);
373    insn = deposit32(insn, 26, 6, b4);
374    tcg_out32(s, insn);
375}
376
377static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
378                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
379{
380    tcg_insn_unit insn = 0;
381
382    insn = deposit32(insn, 0, 8, op);
383    insn = deposit32(insn, 8, 4, r0);
384    insn = deposit32(insn, 12, 4, r1);
385    insn = deposit32(insn, 16, 4, r2);
386    insn = deposit32(insn, 20, 4, r3);
387    tcg_out32(s, insn);
388}
389
390static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
391                              TCGReg r0, TCGReg r1, TCGReg r2,
392                              TCGReg r3, TCGReg r4, TCGCond c5)
393{
394    tcg_insn_unit insn = 0;
395
396    insn = deposit32(insn, 0, 8, op);
397    insn = deposit32(insn, 8, 4, r0);
398    insn = deposit32(insn, 12, 4, r1);
399    insn = deposit32(insn, 16, 4, r2);
400    insn = deposit32(insn, 20, 4, r3);
401    insn = deposit32(insn, 24, 4, r4);
402    insn = deposit32(insn, 28, 4, c5);
403    tcg_out32(s, insn);
404}
405
406static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
407                              TCGReg r0, TCGReg r1, TCGReg r2,
408                              TCGReg r3, TCGReg r4, TCGReg r5)
409{
410    tcg_insn_unit insn = 0;
411
412    insn = deposit32(insn, 0, 8, op);
413    insn = deposit32(insn, 8, 4, r0);
414    insn = deposit32(insn, 12, 4, r1);
415    insn = deposit32(insn, 16, 4, r2);
416    insn = deposit32(insn, 20, 4, r3);
417    insn = deposit32(insn, 24, 4, r4);
418    insn = deposit32(insn, 28, 4, r5);
419    tcg_out32(s, insn);
420}
421
422static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
423                         TCGReg base, intptr_t offset)
424{
425    stack_bounds_check(base, offset);
426    if (offset != sextract32(offset, 0, 16)) {
427        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
428        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
429        base = TCG_REG_TMP;
430        offset = 0;
431    }
432    tcg_out_op_rrs(s, op, val, base, offset);
433}
434
435static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
436                       intptr_t offset)
437{
438    switch (type) {
439    case TCG_TYPE_I32:
440        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
441        break;
442#if TCG_TARGET_REG_BITS == 64
443    case TCG_TYPE_I64:
444        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
445        break;
446#endif
447    default:
448        g_assert_not_reached();
449    }
450}
451
452static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
453{
454    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
455    return true;
456}
457
458static void tcg_out_movi(TCGContext *s, TCGType type,
459                         TCGReg ret, tcg_target_long arg)
460{
461    switch (type) {
462    case TCG_TYPE_I32:
463#if TCG_TARGET_REG_BITS == 64
464        arg = (int32_t)arg;
465        /* fall through */
466    case TCG_TYPE_I64:
467#endif
468        break;
469    default:
470        g_assert_not_reached();
471    }
472
473    if (arg == sextract32(arg, 0, 20)) {
474        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
475    } else {
476        tcg_insn_unit insn = 0;
477
478        new_pool_label(s, arg, 20, s->code_ptr, 0);
479        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
480        insn = deposit32(insn, 8, 4, ret);
481        tcg_out32(s, insn);
482    }
483}
484
485static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
486                            TCGReg rs, unsigned pos, unsigned len)
487{
488    TCGOpcode opc = type == TCG_TYPE_I32 ?
489                    INDEX_op_extract_i32 :
490                    INDEX_op_extract_i64;
491    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
492}
493
494static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
495                             TCGReg rs, unsigned pos, unsigned len)
496{
497    TCGOpcode opc = type == TCG_TYPE_I32 ?
498                    INDEX_op_sextract_i32 :
499                    INDEX_op_sextract_i64;
500    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
501}
502
503static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
504{
505    tcg_out_sextract(s, type, rd, rs, 0, 8);
506}
507
508static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
509{
510    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
511}
512
513static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
514{
515    tcg_out_sextract(s, type, rd, rs, 0, 16);
516}
517
518static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
519{
520    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
521}
522
523static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
524{
525    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
526    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
527}
528
529static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
530{
531    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
532    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
533}
534
535static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
536{
537    tcg_out_ext32s(s, rd, rs);
538}
539
540static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
541{
542    tcg_out_ext32u(s, rd, rs);
543}
544
545static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
546{
547    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
548    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
549}
550
551static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
552{
553    return false;
554}
555
556static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
557                             tcg_target_long imm)
558{
559    /* This function is only used for passing structs by reference. */
560    g_assert_not_reached();
561}
562
563static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
564                         const TCGHelperInfo *info)
565{
566    ffi_cif *cif = info->cif;
567    tcg_insn_unit insn = 0;
568    uint8_t which;
569
570    if (cif->rtype == &ffi_type_void) {
571        which = 0;
572    } else {
573        tcg_debug_assert(cif->rtype->size == 4 ||
574                         cif->rtype->size == 8 ||
575                         cif->rtype->size == 16);
576        which = ctz32(cif->rtype->size) - 1;
577    }
578    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
579    insn = deposit32(insn, 0, 8, INDEX_op_call);
580    insn = deposit32(insn, 8, 4, which);
581    tcg_out32(s, insn);
582}
583
584#if TCG_TARGET_REG_BITS == 64
585# define CASE_32_64(x) \
586        case glue(glue(INDEX_op_, x), _i64): \
587        case glue(glue(INDEX_op_, x), _i32):
588# define CASE_64(x) \
589        case glue(glue(INDEX_op_, x), _i64):
590#else
591# define CASE_32_64(x) \
592        case glue(glue(INDEX_op_, x), _i32):
593# define CASE_64(x)
594#endif
595
596static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
597{
598    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
599}
600
601static void tcg_out_goto_tb(TCGContext *s, int which)
602{
603    /* indirect jump method. */
604    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
605    set_jmp_reset_offset(s, which);
606}
607
608void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
609                              uintptr_t jmp_rx, uintptr_t jmp_rw)
610{
611    /* Always indirect, nothing to do */
612}
613
614static void tgen_add(TCGContext *s, TCGType type,
615                     TCGReg a0, TCGReg a1, TCGReg a2)
616{
617    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
618}
619
620static const TCGOutOpBinary outop_add = {
621    .base.static_constraint = C_O1_I2(r, r, r),
622    .out_rrr = tgen_add,
623};
624
625static void tgen_and(TCGContext *s, TCGType type,
626                     TCGReg a0, TCGReg a1, TCGReg a2)
627{
628    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
629}
630
631static const TCGOutOpBinary outop_and = {
632    .base.static_constraint = C_O1_I2(r, r, r),
633    .out_rrr = tgen_and,
634};
635
636static void tgen_andc(TCGContext *s, TCGType type,
637                      TCGReg a0, TCGReg a1, TCGReg a2)
638{
639    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
640}
641
642static const TCGOutOpBinary outop_andc = {
643    .base.static_constraint = C_O1_I2(r, r, r),
644    .out_rrr = tgen_andc,
645};
646
647static void tgen_divs(TCGContext *s, TCGType type,
648                      TCGReg a0, TCGReg a1, TCGReg a2)
649{
650    TCGOpcode opc = (type == TCG_TYPE_I32
651                     ? INDEX_op_tci_divs32
652                     : INDEX_op_divs);
653    tcg_out_op_rrr(s, opc, a0, a1, a2);
654}
655
656static const TCGOutOpBinary outop_divs = {
657    .base.static_constraint = C_O1_I2(r, r, r),
658    .out_rrr = tgen_divs,
659};
660
661static const TCGOutOpDivRem outop_divs2 = {
662    .base.static_constraint = C_NotImplemented,
663};
664
665static void tgen_divu(TCGContext *s, TCGType type,
666                      TCGReg a0, TCGReg a1, TCGReg a2)
667{
668    TCGOpcode opc = (type == TCG_TYPE_I32
669                     ? INDEX_op_tci_divu32
670                     : INDEX_op_divu);
671    tcg_out_op_rrr(s, opc, a0, a1, a2);
672}
673
674static const TCGOutOpBinary outop_divu = {
675    .base.static_constraint = C_O1_I2(r, r, r),
676    .out_rrr = tgen_divu,
677};
678
679static const TCGOutOpDivRem outop_divu2 = {
680    .base.static_constraint = C_NotImplemented,
681};
682
683static void tgen_eqv(TCGContext *s, TCGType type,
684                     TCGReg a0, TCGReg a1, TCGReg a2)
685{
686    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
687}
688
689static const TCGOutOpBinary outop_eqv = {
690    .base.static_constraint = C_O1_I2(r, r, r),
691    .out_rrr = tgen_eqv,
692};
693
694static void tgen_mul(TCGContext *s, TCGType type,
695                     TCGReg a0, TCGReg a1, TCGReg a2)
696{
697    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
698}
699
700static const TCGOutOpBinary outop_mul = {
701    .base.static_constraint = C_O1_I2(r, r, r),
702    .out_rrr = tgen_mul,
703};
704
705static const TCGOutOpBinary outop_mulsh = {
706    .base.static_constraint = C_NotImplemented,
707};
708
709static const TCGOutOpBinary outop_muluh = {
710    .base.static_constraint = C_NotImplemented,
711};
712
713static void tgen_nand(TCGContext *s, TCGType type,
714                     TCGReg a0, TCGReg a1, TCGReg a2)
715{
716    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
717}
718
719static const TCGOutOpBinary outop_nand = {
720    .base.static_constraint = C_O1_I2(r, r, r),
721    .out_rrr = tgen_nand,
722};
723
724static void tgen_nor(TCGContext *s, TCGType type,
725                     TCGReg a0, TCGReg a1, TCGReg a2)
726{
727    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
728}
729
730static const TCGOutOpBinary outop_nor = {
731    .base.static_constraint = C_O1_I2(r, r, r),
732    .out_rrr = tgen_nor,
733};
734
735static void tgen_or(TCGContext *s, TCGType type,
736                     TCGReg a0, TCGReg a1, TCGReg a2)
737{
738    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
739}
740
741static const TCGOutOpBinary outop_or = {
742    .base.static_constraint = C_O1_I2(r, r, r),
743    .out_rrr = tgen_or,
744};
745
746static void tgen_orc(TCGContext *s, TCGType type,
747                     TCGReg a0, TCGReg a1, TCGReg a2)
748{
749    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
750}
751
752static const TCGOutOpBinary outop_orc = {
753    .base.static_constraint = C_O1_I2(r, r, r),
754    .out_rrr = tgen_orc,
755};
756
757static void tgen_sub(TCGContext *s, TCGType type,
758                     TCGReg a0, TCGReg a1, TCGReg a2)
759{
760    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
761}
762
763static const TCGOutOpSubtract outop_sub = {
764    .base.static_constraint = C_O1_I2(r, r, r),
765    .out_rrr = tgen_sub,
766};
767
768static void tgen_xor(TCGContext *s, TCGType type,
769                     TCGReg a0, TCGReg a1, TCGReg a2)
770{
771    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
772}
773
774static const TCGOutOpBinary outop_xor = {
775    .base.static_constraint = C_O1_I2(r, r, r),
776    .out_rrr = tgen_xor,
777};
778
779static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
780{
781    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
782}
783
784static const TCGOutOpUnary outop_neg = {
785    .base.static_constraint = C_O1_I1(r, r),
786    .out_rr = tgen_neg,
787};
788
789static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
790{
791    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
792}
793
794static const TCGOutOpUnary outop_not = {
795    .base.static_constraint = C_O1_I1(r, r),
796    .out_rr = tgen_not,
797};
798
799
800static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
801                       const TCGArg args[TCG_MAX_OP_ARGS],
802                       const int const_args[TCG_MAX_OP_ARGS])
803{
804    int width;
805
806    switch (opc) {
807    case INDEX_op_goto_ptr:
808        tcg_out_op_r(s, opc, args[0]);
809        break;
810
811    case INDEX_op_br:
812        tcg_out_op_l(s, opc, arg_label(args[0]));
813        break;
814
815    CASE_32_64(setcond)
816        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
817        break;
818
819    CASE_32_64(movcond)
820    case INDEX_op_setcond2_i32:
821        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
822                          args[3], args[4], args[5]);
823        break;
824
825    CASE_32_64(ld8u)
826    CASE_32_64(ld8s)
827    CASE_32_64(ld16u)
828    CASE_32_64(ld16s)
829    case INDEX_op_ld_i32:
830    CASE_64(ld32u)
831    CASE_64(ld32s)
832    CASE_64(ld)
833    CASE_32_64(st8)
834    CASE_32_64(st16)
835    case INDEX_op_st_i32:
836    CASE_64(st32)
837    CASE_64(st)
838        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
839        break;
840
841    CASE_32_64(shl)
842    CASE_32_64(shr)
843    CASE_32_64(sar)
844    CASE_32_64(rotl)     /* Optional (TCG_TARGET_HAS_rot_*). */
845    CASE_32_64(rotr)     /* Optional (TCG_TARGET_HAS_rot_*). */
846    CASE_32_64(rem)      /* Optional (TCG_TARGET_HAS_div_*). */
847    CASE_32_64(remu)     /* Optional (TCG_TARGET_HAS_div_*). */
848    CASE_32_64(clz)      /* Optional (TCG_TARGET_HAS_clz_*). */
849    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
850        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
851        break;
852
853    CASE_32_64(deposit)
854        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
855        break;
856
857    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
858    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
859        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
860        break;
861
862    CASE_32_64(brcond)
863        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
864                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
865                        TCG_REG_TMP, args[0], args[1], args[2]);
866        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
867        break;
868
869    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
870    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
871    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
872        tcg_out_op_rr(s, opc, args[0], args[1]);
873        break;
874
875    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
876    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
877        width = 16;
878        goto do_bswap;
879    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
880        width = 32;
881    do_bswap:
882        /* The base tci bswaps zero-extend, and ignore high bits. */
883        tcg_out_op_rr(s, opc, args[0], args[1]);
884        if (args[2] & TCG_BSWAP_OS) {
885            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
886        }
887        break;
888
889    CASE_32_64(add2)
890    CASE_32_64(sub2)
891        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
892                          args[3], args[4], args[5]);
893        break;
894
895#if TCG_TARGET_REG_BITS == 32
896    case INDEX_op_brcond2_i32:
897        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
898                          args[0], args[1], args[2], args[3], args[4]);
899        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
900        break;
901#endif
902
903    CASE_32_64(mulu2)
904    CASE_32_64(muls2)
905        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
906        break;
907
908    case INDEX_op_qemu_ld_i64:
909    case INDEX_op_qemu_st_i64:
910        if (TCG_TARGET_REG_BITS == 32) {
911            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
912            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
913            break;
914        }
915        /* fall through */
916    case INDEX_op_qemu_ld_i32:
917    case INDEX_op_qemu_st_i32:
918        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
919            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
920            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
921        } else {
922            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
923        }
924        break;
925
926    case INDEX_op_mb:
927        tcg_out_op_v(s, opc);
928        break;
929
930    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
931    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
932    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
933    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
934    case INDEX_op_extu_i32_i64:
935    case INDEX_op_extrl_i64_i32:
936    default:
937        g_assert_not_reached();
938    }
939}
940
941static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
942                       intptr_t offset)
943{
944    switch (type) {
945    case TCG_TYPE_I32:
946        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
947        break;
948#if TCG_TARGET_REG_BITS == 64
949    case TCG_TYPE_I64:
950        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
951        break;
952#endif
953    default:
954        g_assert_not_reached();
955    }
956}
957
958static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
959                               TCGReg base, intptr_t ofs)
960{
961    return false;
962}
963
964/* Test if a constant matches the constraint. */
965static bool tcg_target_const_match(int64_t val, int ct,
966                                   TCGType type, TCGCond cond, int vece)
967{
968    return ct & TCG_CT_CONST;
969}
970
971static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
972{
973    memset(p, 0, sizeof(*p) * count);
974}
975
976static void tcg_target_init(TCGContext *s)
977{
978    /* The current code uses uint8_t for tcg operations. */
979    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
980
981    /* Registers available for 32 bit operations. */
982    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
983    /* Registers available for 64 bit operations. */
984    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
985    /*
986     * The interpreter "registers" are in the local stack frame and
987     * cannot be clobbered by the called helper functions.  However,
988     * the interpreter assumes a 128-bit return value and assigns to
989     * the return value registers.
990     */
991    tcg_target_call_clobber_regs =
992        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
993
994    s->reserved_regs = 0;
995    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
996    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
997
998    /* The call arguments come first, followed by the temp storage. */
999    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1000                  TCG_STATIC_FRAME_SIZE);
1001}
1002
1003/* Generate global QEMU prologue and epilogue code. */
1004static inline void tcg_target_qemu_prologue(TCGContext *s)
1005{
1006}
1007
1008static void tcg_out_tb_start(TCGContext *s)
1009{
1010    /* nothing to do */
1011}
1012
1013bool tcg_target_has_memory_bswap(MemOp memop)
1014{
1015    return true;
1016}
1017
1018static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1019{
1020    g_assert_not_reached();
1021}
1022
1023static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1024{
1025    g_assert_not_reached();
1026}
1027