xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 3949f365eb6e7c934831c65c67b729562846ede9)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_rotl_i32:
83    case INDEX_op_rotl_i64:
84    case INDEX_op_rotr_i32:
85    case INDEX_op_rotr_i64:
86    case INDEX_op_setcond_i32:
87    case INDEX_op_setcond_i64:
88    case INDEX_op_deposit_i32:
89    case INDEX_op_deposit_i64:
90    case INDEX_op_clz_i32:
91    case INDEX_op_clz_i64:
92    case INDEX_op_ctz_i32:
93    case INDEX_op_ctz_i64:
94        return C_O1_I2(r, r, r);
95
96    case INDEX_op_brcond_i32:
97    case INDEX_op_brcond_i64:
98        return C_O0_I2(r, r);
99
100    case INDEX_op_add2_i32:
101    case INDEX_op_add2_i64:
102    case INDEX_op_sub2_i32:
103    case INDEX_op_sub2_i64:
104        return C_O2_I4(r, r, r, r, r, r);
105
106#if TCG_TARGET_REG_BITS == 32
107    case INDEX_op_brcond2_i32:
108        return C_O0_I4(r, r, r, r);
109#endif
110
111    case INDEX_op_mulu2_i32:
112    case INDEX_op_mulu2_i64:
113    case INDEX_op_muls2_i32:
114    case INDEX_op_muls2_i64:
115        return C_O2_I2(r, r, r, r);
116
117    case INDEX_op_movcond_i32:
118    case INDEX_op_movcond_i64:
119    case INDEX_op_setcond2_i32:
120        return C_O1_I4(r, r, r, r, r);
121
122    case INDEX_op_qemu_ld_i32:
123        return C_O1_I1(r, r);
124    case INDEX_op_qemu_ld_i64:
125        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
126    case INDEX_op_qemu_st_i32:
127        return C_O0_I2(r, r);
128    case INDEX_op_qemu_st_i64:
129        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
130
131    default:
132        return C_NotImplemented;
133    }
134}
135
136static const int tcg_target_reg_alloc_order[] = {
137    TCG_REG_R4,
138    TCG_REG_R5,
139    TCG_REG_R6,
140    TCG_REG_R7,
141    TCG_REG_R8,
142    TCG_REG_R9,
143    TCG_REG_R10,
144    TCG_REG_R11,
145    TCG_REG_R12,
146    TCG_REG_R13,
147    TCG_REG_R14,
148    TCG_REG_R15,
149    /* Either 2 or 4 of these are call clobbered, so use them last. */
150    TCG_REG_R3,
151    TCG_REG_R2,
152    TCG_REG_R1,
153    TCG_REG_R0,
154};
155
156/* No call arguments via registers.  All will be stored on the "stack". */
157static const int tcg_target_call_iarg_regs[] = { };
158
159static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
160{
161    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
162    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
163    return TCG_REG_R0 + slot;
164}
165
166#ifdef CONFIG_DEBUG_TCG
167static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
168    "r00",
169    "r01",
170    "r02",
171    "r03",
172    "r04",
173    "r05",
174    "r06",
175    "r07",
176    "r08",
177    "r09",
178    "r10",
179    "r11",
180    "r12",
181    "r13",
182    "r14",
183    "r15",
184};
185#endif
186
187static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
188                        intptr_t value, intptr_t addend)
189{
190    intptr_t diff = value - (intptr_t)(code_ptr + 1);
191
192    tcg_debug_assert(addend == 0);
193    tcg_debug_assert(type == 20);
194
195    if (diff == sextract32(diff, 0, type)) {
196        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
197        return true;
198    }
199    return false;
200}
201
202static void stack_bounds_check(TCGReg base, intptr_t offset)
203{
204    if (base == TCG_REG_CALL_STACK) {
205        tcg_debug_assert(offset >= 0);
206        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
207                                   TCG_STATIC_FRAME_SIZE));
208    }
209}
210
211static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
212{
213    tcg_insn_unit insn = 0;
214
215    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
216    insn = deposit32(insn, 0, 8, op);
217    tcg_out32(s, insn);
218}
219
220static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
221{
222    tcg_insn_unit insn = 0;
223    intptr_t diff;
224
225    /* Special case for exit_tb: map null -> 0. */
226    if (p0 == NULL) {
227        diff = 0;
228    } else {
229        diff = p0 - (void *)(s->code_ptr + 1);
230        tcg_debug_assert(diff != 0);
231        if (diff != sextract32(diff, 0, 20)) {
232            tcg_raise_tb_overflow(s);
233        }
234    }
235    insn = deposit32(insn, 0, 8, op);
236    insn = deposit32(insn, 12, 20, diff);
237    tcg_out32(s, insn);
238}
239
240static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
241{
242    tcg_insn_unit insn = 0;
243
244    insn = deposit32(insn, 0, 8, op);
245    insn = deposit32(insn, 8, 4, r0);
246    tcg_out32(s, insn);
247}
248
249static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
250{
251    tcg_out32(s, (uint8_t)op);
252}
253
254static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
255{
256    tcg_insn_unit insn = 0;
257
258    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
259    insn = deposit32(insn, 0, 8, op);
260    insn = deposit32(insn, 8, 4, r0);
261    insn = deposit32(insn, 12, 20, i1);
262    tcg_out32(s, insn);
263}
264
265static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
266{
267    tcg_insn_unit insn = 0;
268
269    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
270    insn = deposit32(insn, 0, 8, op);
271    insn = deposit32(insn, 8, 4, r0);
272    tcg_out32(s, insn);
273}
274
275static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
276{
277    tcg_insn_unit insn = 0;
278
279    insn = deposit32(insn, 0, 8, op);
280    insn = deposit32(insn, 8, 4, r0);
281    insn = deposit32(insn, 12, 4, r1);
282    tcg_out32(s, insn);
283}
284
285static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
286                           TCGReg r0, TCGReg r1, TCGArg m2)
287{
288    tcg_insn_unit insn = 0;
289
290    tcg_debug_assert(m2 == extract32(m2, 0, 16));
291    insn = deposit32(insn, 0, 8, op);
292    insn = deposit32(insn, 8, 4, r0);
293    insn = deposit32(insn, 12, 4, r1);
294    insn = deposit32(insn, 16, 16, m2);
295    tcg_out32(s, insn);
296}
297
298static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
299                           TCGReg r0, TCGReg r1, TCGReg r2)
300{
301    tcg_insn_unit insn = 0;
302
303    insn = deposit32(insn, 0, 8, op);
304    insn = deposit32(insn, 8, 4, r0);
305    insn = deposit32(insn, 12, 4, r1);
306    insn = deposit32(insn, 16, 4, r2);
307    tcg_out32(s, insn);
308}
309
310static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
311                           TCGReg r0, TCGReg r1, intptr_t i2)
312{
313    tcg_insn_unit insn = 0;
314
315    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
316    insn = deposit32(insn, 0, 8, op);
317    insn = deposit32(insn, 8, 4, r0);
318    insn = deposit32(insn, 12, 4, r1);
319    insn = deposit32(insn, 16, 16, i2);
320    tcg_out32(s, insn);
321}
322
323static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
324                            TCGReg r1, uint8_t b2, uint8_t b3)
325{
326    tcg_insn_unit insn = 0;
327
328    tcg_debug_assert(b2 == extract32(b2, 0, 6));
329    tcg_debug_assert(b3 == extract32(b3, 0, 6));
330    insn = deposit32(insn, 0, 8, op);
331    insn = deposit32(insn, 8, 4, r0);
332    insn = deposit32(insn, 12, 4, r1);
333    insn = deposit32(insn, 16, 6, b2);
334    insn = deposit32(insn, 22, 6, b3);
335    tcg_out32(s, insn);
336}
337
338static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
339                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
340{
341    tcg_insn_unit insn = 0;
342
343    insn = deposit32(insn, 0, 8, op);
344    insn = deposit32(insn, 8, 4, r0);
345    insn = deposit32(insn, 12, 4, r1);
346    insn = deposit32(insn, 16, 4, r2);
347    insn = deposit32(insn, 20, 4, c3);
348    tcg_out32(s, insn);
349}
350
351static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
352                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
353{
354    tcg_insn_unit insn = 0;
355
356    tcg_debug_assert(b3 == extract32(b3, 0, 6));
357    tcg_debug_assert(b4 == extract32(b4, 0, 6));
358    insn = deposit32(insn, 0, 8, op);
359    insn = deposit32(insn, 8, 4, r0);
360    insn = deposit32(insn, 12, 4, r1);
361    insn = deposit32(insn, 16, 4, r2);
362    insn = deposit32(insn, 20, 6, b3);
363    insn = deposit32(insn, 26, 6, b4);
364    tcg_out32(s, insn);
365}
366
367static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
368                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
369{
370    tcg_insn_unit insn = 0;
371
372    insn = deposit32(insn, 0, 8, op);
373    insn = deposit32(insn, 8, 4, r0);
374    insn = deposit32(insn, 12, 4, r1);
375    insn = deposit32(insn, 16, 4, r2);
376    insn = deposit32(insn, 20, 4, r3);
377    tcg_out32(s, insn);
378}
379
380static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
381                              TCGReg r0, TCGReg r1, TCGReg r2,
382                              TCGReg r3, TCGReg r4, TCGCond c5)
383{
384    tcg_insn_unit insn = 0;
385
386    insn = deposit32(insn, 0, 8, op);
387    insn = deposit32(insn, 8, 4, r0);
388    insn = deposit32(insn, 12, 4, r1);
389    insn = deposit32(insn, 16, 4, r2);
390    insn = deposit32(insn, 20, 4, r3);
391    insn = deposit32(insn, 24, 4, r4);
392    insn = deposit32(insn, 28, 4, c5);
393    tcg_out32(s, insn);
394}
395
396static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
397                              TCGReg r0, TCGReg r1, TCGReg r2,
398                              TCGReg r3, TCGReg r4, TCGReg r5)
399{
400    tcg_insn_unit insn = 0;
401
402    insn = deposit32(insn, 0, 8, op);
403    insn = deposit32(insn, 8, 4, r0);
404    insn = deposit32(insn, 12, 4, r1);
405    insn = deposit32(insn, 16, 4, r2);
406    insn = deposit32(insn, 20, 4, r3);
407    insn = deposit32(insn, 24, 4, r4);
408    insn = deposit32(insn, 28, 4, r5);
409    tcg_out32(s, insn);
410}
411
412static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
413                         TCGReg base, intptr_t offset)
414{
415    stack_bounds_check(base, offset);
416    if (offset != sextract32(offset, 0, 16)) {
417        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
418        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
419        base = TCG_REG_TMP;
420        offset = 0;
421    }
422    tcg_out_op_rrs(s, op, val, base, offset);
423}
424
425static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
426                       intptr_t offset)
427{
428    switch (type) {
429    case TCG_TYPE_I32:
430        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
431        break;
432#if TCG_TARGET_REG_BITS == 64
433    case TCG_TYPE_I64:
434        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
435        break;
436#endif
437    default:
438        g_assert_not_reached();
439    }
440}
441
442static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
443{
444    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
445    return true;
446}
447
448static void tcg_out_movi(TCGContext *s, TCGType type,
449                         TCGReg ret, tcg_target_long arg)
450{
451    switch (type) {
452    case TCG_TYPE_I32:
453#if TCG_TARGET_REG_BITS == 64
454        arg = (int32_t)arg;
455        /* fall through */
456    case TCG_TYPE_I64:
457#endif
458        break;
459    default:
460        g_assert_not_reached();
461    }
462
463    if (arg == sextract32(arg, 0, 20)) {
464        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
465    } else {
466        tcg_insn_unit insn = 0;
467
468        new_pool_label(s, arg, 20, s->code_ptr, 0);
469        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
470        insn = deposit32(insn, 8, 4, ret);
471        tcg_out32(s, insn);
472    }
473}
474
475static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
476                            TCGReg rs, unsigned pos, unsigned len)
477{
478    TCGOpcode opc = type == TCG_TYPE_I32 ?
479                    INDEX_op_extract_i32 :
480                    INDEX_op_extract_i64;
481    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
482}
483
484static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
485                             TCGReg rs, unsigned pos, unsigned len)
486{
487    TCGOpcode opc = type == TCG_TYPE_I32 ?
488                    INDEX_op_sextract_i32 :
489                    INDEX_op_sextract_i64;
490    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
491}
492
493static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
494{
495    tcg_out_sextract(s, type, rd, rs, 0, 8);
496}
497
498static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
499{
500    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
501}
502
503static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
504{
505    tcg_out_sextract(s, type, rd, rs, 0, 16);
506}
507
508static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
509{
510    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
511}
512
513static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
514{
515    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
516    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
517}
518
519static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
520{
521    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
522    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
523}
524
525static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
526{
527    tcg_out_ext32s(s, rd, rs);
528}
529
530static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
531{
532    tcg_out_ext32u(s, rd, rs);
533}
534
535static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
536{
537    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
538    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
539}
540
541static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
542{
543    return false;
544}
545
546static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
547                             tcg_target_long imm)
548{
549    /* This function is only used for passing structs by reference. */
550    g_assert_not_reached();
551}
552
553static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
554                         const TCGHelperInfo *info)
555{
556    ffi_cif *cif = info->cif;
557    tcg_insn_unit insn = 0;
558    uint8_t which;
559
560    if (cif->rtype == &ffi_type_void) {
561        which = 0;
562    } else {
563        tcg_debug_assert(cif->rtype->size == 4 ||
564                         cif->rtype->size == 8 ||
565                         cif->rtype->size == 16);
566        which = ctz32(cif->rtype->size) - 1;
567    }
568    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
569    insn = deposit32(insn, 0, 8, INDEX_op_call);
570    insn = deposit32(insn, 8, 4, which);
571    tcg_out32(s, insn);
572}
573
574#if TCG_TARGET_REG_BITS == 64
575# define CASE_32_64(x) \
576        case glue(glue(INDEX_op_, x), _i64): \
577        case glue(glue(INDEX_op_, x), _i32):
578# define CASE_64(x) \
579        case glue(glue(INDEX_op_, x), _i64):
580#else
581# define CASE_32_64(x) \
582        case glue(glue(INDEX_op_, x), _i32):
583# define CASE_64(x)
584#endif
585
586static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
587{
588    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
589}
590
591static void tcg_out_goto_tb(TCGContext *s, int which)
592{
593    /* indirect jump method. */
594    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
595    set_jmp_reset_offset(s, which);
596}
597
598void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
599                              uintptr_t jmp_rx, uintptr_t jmp_rw)
600{
601    /* Always indirect, nothing to do */
602}
603
604static void tgen_add(TCGContext *s, TCGType type,
605                     TCGReg a0, TCGReg a1, TCGReg a2)
606{
607    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
608}
609
610static const TCGOutOpBinary outop_add = {
611    .base.static_constraint = C_O1_I2(r, r, r),
612    .out_rrr = tgen_add,
613};
614
615static void tgen_and(TCGContext *s, TCGType type,
616                     TCGReg a0, TCGReg a1, TCGReg a2)
617{
618    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
619}
620
621static const TCGOutOpBinary outop_and = {
622    .base.static_constraint = C_O1_I2(r, r, r),
623    .out_rrr = tgen_and,
624};
625
626static void tgen_andc(TCGContext *s, TCGType type,
627                      TCGReg a0, TCGReg a1, TCGReg a2)
628{
629    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
630}
631
632static const TCGOutOpBinary outop_andc = {
633    .base.static_constraint = C_O1_I2(r, r, r),
634    .out_rrr = tgen_andc,
635};
636
637static void tgen_divs(TCGContext *s, TCGType type,
638                      TCGReg a0, TCGReg a1, TCGReg a2)
639{
640    TCGOpcode opc = (type == TCG_TYPE_I32
641                     ? INDEX_op_tci_divs32
642                     : INDEX_op_divs);
643    tcg_out_op_rrr(s, opc, a0, a1, a2);
644}
645
646static const TCGOutOpBinary outop_divs = {
647    .base.static_constraint = C_O1_I2(r, r, r),
648    .out_rrr = tgen_divs,
649};
650
651static const TCGOutOpDivRem outop_divs2 = {
652    .base.static_constraint = C_NotImplemented,
653};
654
655static void tgen_divu(TCGContext *s, TCGType type,
656                      TCGReg a0, TCGReg a1, TCGReg a2)
657{
658    TCGOpcode opc = (type == TCG_TYPE_I32
659                     ? INDEX_op_tci_divu32
660                     : INDEX_op_divu);
661    tcg_out_op_rrr(s, opc, a0, a1, a2);
662}
663
664static const TCGOutOpBinary outop_divu = {
665    .base.static_constraint = C_O1_I2(r, r, r),
666    .out_rrr = tgen_divu,
667};
668
669static const TCGOutOpDivRem outop_divu2 = {
670    .base.static_constraint = C_NotImplemented,
671};
672
673static void tgen_eqv(TCGContext *s, TCGType type,
674                     TCGReg a0, TCGReg a1, TCGReg a2)
675{
676    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
677}
678
679static const TCGOutOpBinary outop_eqv = {
680    .base.static_constraint = C_O1_I2(r, r, r),
681    .out_rrr = tgen_eqv,
682};
683
684static void tgen_mul(TCGContext *s, TCGType type,
685                     TCGReg a0, TCGReg a1, TCGReg a2)
686{
687    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
688}
689
690static const TCGOutOpBinary outop_mul = {
691    .base.static_constraint = C_O1_I2(r, r, r),
692    .out_rrr = tgen_mul,
693};
694
695static const TCGOutOpBinary outop_mulsh = {
696    .base.static_constraint = C_NotImplemented,
697};
698
699static const TCGOutOpBinary outop_muluh = {
700    .base.static_constraint = C_NotImplemented,
701};
702
703static void tgen_nand(TCGContext *s, TCGType type,
704                     TCGReg a0, TCGReg a1, TCGReg a2)
705{
706    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
707}
708
709static const TCGOutOpBinary outop_nand = {
710    .base.static_constraint = C_O1_I2(r, r, r),
711    .out_rrr = tgen_nand,
712};
713
714static void tgen_nor(TCGContext *s, TCGType type,
715                     TCGReg a0, TCGReg a1, TCGReg a2)
716{
717    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
718}
719
720static const TCGOutOpBinary outop_nor = {
721    .base.static_constraint = C_O1_I2(r, r, r),
722    .out_rrr = tgen_nor,
723};
724
725static void tgen_or(TCGContext *s, TCGType type,
726                     TCGReg a0, TCGReg a1, TCGReg a2)
727{
728    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
729}
730
731static const TCGOutOpBinary outop_or = {
732    .base.static_constraint = C_O1_I2(r, r, r),
733    .out_rrr = tgen_or,
734};
735
736static void tgen_orc(TCGContext *s, TCGType type,
737                     TCGReg a0, TCGReg a1, TCGReg a2)
738{
739    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
740}
741
742static const TCGOutOpBinary outop_orc = {
743    .base.static_constraint = C_O1_I2(r, r, r),
744    .out_rrr = tgen_orc,
745};
746
747static void tgen_rems(TCGContext *s, TCGType type,
748                      TCGReg a0, TCGReg a1, TCGReg a2)
749{
750    TCGOpcode opc = (type == TCG_TYPE_I32
751                     ? INDEX_op_tci_rems32
752                     : INDEX_op_rems);
753    tcg_out_op_rrr(s, opc, a0, a1, a2);
754}
755
756static const TCGOutOpBinary outop_rems = {
757    .base.static_constraint = C_O1_I2(r, r, r),
758    .out_rrr = tgen_rems,
759};
760
761static void tgen_remu(TCGContext *s, TCGType type,
762                      TCGReg a0, TCGReg a1, TCGReg a2)
763{
764    TCGOpcode opc = (type == TCG_TYPE_I32
765                     ? INDEX_op_tci_remu32
766                     : INDEX_op_remu);
767    tcg_out_op_rrr(s, opc, a0, a1, a2);
768}
769
770static const TCGOutOpBinary outop_remu = {
771    .base.static_constraint = C_O1_I2(r, r, r),
772    .out_rrr = tgen_remu,
773};
774
775static void tgen_sar(TCGContext *s, TCGType type,
776                     TCGReg a0, TCGReg a1, TCGReg a2)
777{
778    if (type < TCG_TYPE_REG) {
779        tcg_out_ext32s(s, TCG_REG_TMP, a1);
780        a1 = TCG_REG_TMP;
781    }
782    tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
783}
784
785static const TCGOutOpBinary outop_sar = {
786    .base.static_constraint = C_O1_I2(r, r, r),
787    .out_rrr = tgen_sar,
788};
789
790static void tgen_shl(TCGContext *s, TCGType type,
791                     TCGReg a0, TCGReg a1, TCGReg a2)
792{
793    tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
794}
795
796static const TCGOutOpBinary outop_shl = {
797    .base.static_constraint = C_O1_I2(r, r, r),
798    .out_rrr = tgen_shl,
799};
800
801static void tgen_shr(TCGContext *s, TCGType type,
802                     TCGReg a0, TCGReg a1, TCGReg a2)
803{
804    if (type < TCG_TYPE_REG) {
805        tcg_out_ext32u(s, TCG_REG_TMP, a1);
806        a1 = TCG_REG_TMP;
807    }
808    tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
809}
810
811static const TCGOutOpBinary outop_shr = {
812    .base.static_constraint = C_O1_I2(r, r, r),
813    .out_rrr = tgen_shr,
814};
815
816static void tgen_sub(TCGContext *s, TCGType type,
817                     TCGReg a0, TCGReg a1, TCGReg a2)
818{
819    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
820}
821
822static const TCGOutOpSubtract outop_sub = {
823    .base.static_constraint = C_O1_I2(r, r, r),
824    .out_rrr = tgen_sub,
825};
826
827static void tgen_xor(TCGContext *s, TCGType type,
828                     TCGReg a0, TCGReg a1, TCGReg a2)
829{
830    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
831}
832
833static const TCGOutOpBinary outop_xor = {
834    .base.static_constraint = C_O1_I2(r, r, r),
835    .out_rrr = tgen_xor,
836};
837
838static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
839{
840    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
841}
842
843static const TCGOutOpUnary outop_neg = {
844    .base.static_constraint = C_O1_I1(r, r),
845    .out_rr = tgen_neg,
846};
847
848static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
849{
850    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
851}
852
853static const TCGOutOpUnary outop_not = {
854    .base.static_constraint = C_O1_I1(r, r),
855    .out_rr = tgen_not,
856};
857
858
859static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
860                       const TCGArg args[TCG_MAX_OP_ARGS],
861                       const int const_args[TCG_MAX_OP_ARGS])
862{
863    int width;
864
865    switch (opc) {
866    case INDEX_op_goto_ptr:
867        tcg_out_op_r(s, opc, args[0]);
868        break;
869
870    case INDEX_op_br:
871        tcg_out_op_l(s, opc, arg_label(args[0]));
872        break;
873
874    CASE_32_64(setcond)
875        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
876        break;
877
878    CASE_32_64(movcond)
879    case INDEX_op_setcond2_i32:
880        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
881                          args[3], args[4], args[5]);
882        break;
883
884    CASE_32_64(ld8u)
885    CASE_32_64(ld8s)
886    CASE_32_64(ld16u)
887    CASE_32_64(ld16s)
888    case INDEX_op_ld_i32:
889    CASE_64(ld32u)
890    CASE_64(ld32s)
891    CASE_64(ld)
892    CASE_32_64(st8)
893    CASE_32_64(st16)
894    case INDEX_op_st_i32:
895    CASE_64(st32)
896    CASE_64(st)
897        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
898        break;
899
900    CASE_32_64(rotl)     /* Optional (TCG_TARGET_HAS_rot_*). */
901    CASE_32_64(rotr)     /* Optional (TCG_TARGET_HAS_rot_*). */
902    CASE_32_64(clz)      /* Optional (TCG_TARGET_HAS_clz_*). */
903    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
904        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
905        break;
906
907    CASE_32_64(deposit)
908        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
909        break;
910
911    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
912    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
913        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
914        break;
915
916    CASE_32_64(brcond)
917        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
918                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
919                        TCG_REG_TMP, args[0], args[1], args[2]);
920        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
921        break;
922
923    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
924    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
925    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
926        tcg_out_op_rr(s, opc, args[0], args[1]);
927        break;
928
929    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
930    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
931        width = 16;
932        goto do_bswap;
933    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
934        width = 32;
935    do_bswap:
936        /* The base tci bswaps zero-extend, and ignore high bits. */
937        tcg_out_op_rr(s, opc, args[0], args[1]);
938        if (args[2] & TCG_BSWAP_OS) {
939            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
940        }
941        break;
942
943    CASE_32_64(add2)
944    CASE_32_64(sub2)
945        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
946                          args[3], args[4], args[5]);
947        break;
948
949#if TCG_TARGET_REG_BITS == 32
950    case INDEX_op_brcond2_i32:
951        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
952                          args[0], args[1], args[2], args[3], args[4]);
953        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
954        break;
955#endif
956
957    CASE_32_64(mulu2)
958    CASE_32_64(muls2)
959        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
960        break;
961
962    case INDEX_op_qemu_ld_i64:
963    case INDEX_op_qemu_st_i64:
964        if (TCG_TARGET_REG_BITS == 32) {
965            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
966            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
967            break;
968        }
969        /* fall through */
970    case INDEX_op_qemu_ld_i32:
971    case INDEX_op_qemu_st_i32:
972        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
973            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
974            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
975        } else {
976            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
977        }
978        break;
979
980    case INDEX_op_mb:
981        tcg_out_op_v(s, opc);
982        break;
983
984    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
985    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
986    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
987    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
988    case INDEX_op_extu_i32_i64:
989    case INDEX_op_extrl_i64_i32:
990    default:
991        g_assert_not_reached();
992    }
993}
994
995static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
996                       intptr_t offset)
997{
998    switch (type) {
999    case TCG_TYPE_I32:
1000        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
1001        break;
1002#if TCG_TARGET_REG_BITS == 64
1003    case TCG_TYPE_I64:
1004        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
1005        break;
1006#endif
1007    default:
1008        g_assert_not_reached();
1009    }
1010}
1011
1012static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1013                               TCGReg base, intptr_t ofs)
1014{
1015    return false;
1016}
1017
1018/* Test if a constant matches the constraint. */
1019static bool tcg_target_const_match(int64_t val, int ct,
1020                                   TCGType type, TCGCond cond, int vece)
1021{
1022    return ct & TCG_CT_CONST;
1023}
1024
1025static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1026{
1027    memset(p, 0, sizeof(*p) * count);
1028}
1029
1030static void tcg_target_init(TCGContext *s)
1031{
1032    /* The current code uses uint8_t for tcg operations. */
1033    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
1034
1035    /* Registers available for 32 bit operations. */
1036    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
1037    /* Registers available for 64 bit operations. */
1038    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
1039    /*
1040     * The interpreter "registers" are in the local stack frame and
1041     * cannot be clobbered by the called helper functions.  However,
1042     * the interpreter assumes a 128-bit return value and assigns to
1043     * the return value registers.
1044     */
1045    tcg_target_call_clobber_regs =
1046        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1047
1048    s->reserved_regs = 0;
1049    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1050    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1051
1052    /* The call arguments come first, followed by the temp storage. */
1053    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1054                  TCG_STATIC_FRAME_SIZE);
1055}
1056
1057/* Generate global QEMU prologue and epilogue code. */
1058static inline void tcg_target_qemu_prologue(TCGContext *s)
1059{
1060}
1061
1062static void tcg_out_tb_start(TCGContext *s)
1063{
1064    /* nothing to do */
1065}
1066
1067bool tcg_target_has_memory_bswap(MemOp memop)
1068{
1069    return true;
1070}
1071
1072static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1073{
1074    g_assert_not_reached();
1075}
1076
1077static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1078{
1079    g_assert_not_reached();
1080}
1081