xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision bfe964809bf6ce951b2e674929d7b730c754e298)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69        return C_O1_I1(r, r);
70
71    case INDEX_op_st8_i32:
72    case INDEX_op_st16_i32:
73    case INDEX_op_st_i32:
74    case INDEX_op_st8_i64:
75    case INDEX_op_st16_i64:
76    case INDEX_op_st32_i64:
77    case INDEX_op_st_i64:
78        return C_O0_I2(r, r);
79
80    case INDEX_op_setcond_i32:
81    case INDEX_op_setcond_i64:
82    case INDEX_op_deposit_i32:
83    case INDEX_op_deposit_i64:
84        return C_O1_I2(r, r, r);
85
86    case INDEX_op_brcond_i32:
87    case INDEX_op_brcond_i64:
88        return C_O0_I2(r, r);
89
90    case INDEX_op_add2_i32:
91    case INDEX_op_add2_i64:
92    case INDEX_op_sub2_i32:
93    case INDEX_op_sub2_i64:
94        return C_O2_I4(r, r, r, r, r, r);
95
96#if TCG_TARGET_REG_BITS == 32
97    case INDEX_op_brcond2_i32:
98        return C_O0_I4(r, r, r, r);
99#endif
100
101    case INDEX_op_mulu2_i32:
102    case INDEX_op_mulu2_i64:
103        return C_O2_I2(r, r, r, r);
104
105    case INDEX_op_movcond_i32:
106    case INDEX_op_movcond_i64:
107    case INDEX_op_setcond2_i32:
108        return C_O1_I4(r, r, r, r, r);
109
110    case INDEX_op_qemu_ld_i32:
111        return C_O1_I1(r, r);
112    case INDEX_op_qemu_ld_i64:
113        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
114    case INDEX_op_qemu_st_i32:
115        return C_O0_I2(r, r);
116    case INDEX_op_qemu_st_i64:
117        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
118
119    default:
120        return C_NotImplemented;
121    }
122}
123
124static const int tcg_target_reg_alloc_order[] = {
125    TCG_REG_R4,
126    TCG_REG_R5,
127    TCG_REG_R6,
128    TCG_REG_R7,
129    TCG_REG_R8,
130    TCG_REG_R9,
131    TCG_REG_R10,
132    TCG_REG_R11,
133    TCG_REG_R12,
134    TCG_REG_R13,
135    TCG_REG_R14,
136    TCG_REG_R15,
137    /* Either 2 or 4 of these are call clobbered, so use them last. */
138    TCG_REG_R3,
139    TCG_REG_R2,
140    TCG_REG_R1,
141    TCG_REG_R0,
142};
143
144/* No call arguments via registers.  All will be stored on the "stack". */
145static const int tcg_target_call_iarg_regs[] = { };
146
147static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
148{
149    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
150    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
151    return TCG_REG_R0 + slot;
152}
153
154#ifdef CONFIG_DEBUG_TCG
155static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
156    "r00",
157    "r01",
158    "r02",
159    "r03",
160    "r04",
161    "r05",
162    "r06",
163    "r07",
164    "r08",
165    "r09",
166    "r10",
167    "r11",
168    "r12",
169    "r13",
170    "r14",
171    "r15",
172};
173#endif
174
175static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
176                        intptr_t value, intptr_t addend)
177{
178    intptr_t diff = value - (intptr_t)(code_ptr + 1);
179
180    tcg_debug_assert(addend == 0);
181    tcg_debug_assert(type == 20);
182
183    if (diff == sextract32(diff, 0, type)) {
184        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
185        return true;
186    }
187    return false;
188}
189
190static void stack_bounds_check(TCGReg base, intptr_t offset)
191{
192    if (base == TCG_REG_CALL_STACK) {
193        tcg_debug_assert(offset >= 0);
194        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
195                                   TCG_STATIC_FRAME_SIZE));
196    }
197}
198
199static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
200{
201    tcg_insn_unit insn = 0;
202
203    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
204    insn = deposit32(insn, 0, 8, op);
205    tcg_out32(s, insn);
206}
207
208static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
209{
210    tcg_insn_unit insn = 0;
211    intptr_t diff;
212
213    /* Special case for exit_tb: map null -> 0. */
214    if (p0 == NULL) {
215        diff = 0;
216    } else {
217        diff = p0 - (void *)(s->code_ptr + 1);
218        tcg_debug_assert(diff != 0);
219        if (diff != sextract32(diff, 0, 20)) {
220            tcg_raise_tb_overflow(s);
221        }
222    }
223    insn = deposit32(insn, 0, 8, op);
224    insn = deposit32(insn, 12, 20, diff);
225    tcg_out32(s, insn);
226}
227
228static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
229{
230    tcg_insn_unit insn = 0;
231
232    insn = deposit32(insn, 0, 8, op);
233    insn = deposit32(insn, 8, 4, r0);
234    tcg_out32(s, insn);
235}
236
237static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
238{
239    tcg_out32(s, (uint8_t)op);
240}
241
242static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
243{
244    tcg_insn_unit insn = 0;
245
246    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
247    insn = deposit32(insn, 0, 8, op);
248    insn = deposit32(insn, 8, 4, r0);
249    insn = deposit32(insn, 12, 20, i1);
250    tcg_out32(s, insn);
251}
252
253static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
254{
255    tcg_insn_unit insn = 0;
256
257    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
258    insn = deposit32(insn, 0, 8, op);
259    insn = deposit32(insn, 8, 4, r0);
260    tcg_out32(s, insn);
261}
262
263static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
264{
265    tcg_insn_unit insn = 0;
266
267    insn = deposit32(insn, 0, 8, op);
268    insn = deposit32(insn, 8, 4, r0);
269    insn = deposit32(insn, 12, 4, r1);
270    tcg_out32(s, insn);
271}
272
273static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
274                           TCGReg r0, TCGReg r1, TCGArg m2)
275{
276    tcg_insn_unit insn = 0;
277
278    tcg_debug_assert(m2 == extract32(m2, 0, 16));
279    insn = deposit32(insn, 0, 8, op);
280    insn = deposit32(insn, 8, 4, r0);
281    insn = deposit32(insn, 12, 4, r1);
282    insn = deposit32(insn, 16, 16, m2);
283    tcg_out32(s, insn);
284}
285
286static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
287                           TCGReg r0, TCGReg r1, TCGReg r2)
288{
289    tcg_insn_unit insn = 0;
290
291    insn = deposit32(insn, 0, 8, op);
292    insn = deposit32(insn, 8, 4, r0);
293    insn = deposit32(insn, 12, 4, r1);
294    insn = deposit32(insn, 16, 4, r2);
295    tcg_out32(s, insn);
296}
297
298static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
299                           TCGReg r0, TCGReg r1, intptr_t i2)
300{
301    tcg_insn_unit insn = 0;
302
303    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
304    insn = deposit32(insn, 0, 8, op);
305    insn = deposit32(insn, 8, 4, r0);
306    insn = deposit32(insn, 12, 4, r1);
307    insn = deposit32(insn, 16, 16, i2);
308    tcg_out32(s, insn);
309}
310
311static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
312                            TCGReg r1, uint8_t b2, uint8_t b3)
313{
314    tcg_insn_unit insn = 0;
315
316    tcg_debug_assert(b2 == extract32(b2, 0, 6));
317    tcg_debug_assert(b3 == extract32(b3, 0, 6));
318    insn = deposit32(insn, 0, 8, op);
319    insn = deposit32(insn, 8, 4, r0);
320    insn = deposit32(insn, 12, 4, r1);
321    insn = deposit32(insn, 16, 6, b2);
322    insn = deposit32(insn, 22, 6, b3);
323    tcg_out32(s, insn);
324}
325
326static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
327                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
328{
329    tcg_insn_unit insn = 0;
330
331    insn = deposit32(insn, 0, 8, op);
332    insn = deposit32(insn, 8, 4, r0);
333    insn = deposit32(insn, 12, 4, r1);
334    insn = deposit32(insn, 16, 4, r2);
335    insn = deposit32(insn, 20, 4, c3);
336    tcg_out32(s, insn);
337}
338
339static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
340                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
341{
342    tcg_insn_unit insn = 0;
343
344    tcg_debug_assert(b3 == extract32(b3, 0, 6));
345    tcg_debug_assert(b4 == extract32(b4, 0, 6));
346    insn = deposit32(insn, 0, 8, op);
347    insn = deposit32(insn, 8, 4, r0);
348    insn = deposit32(insn, 12, 4, r1);
349    insn = deposit32(insn, 16, 4, r2);
350    insn = deposit32(insn, 20, 6, b3);
351    insn = deposit32(insn, 26, 6, b4);
352    tcg_out32(s, insn);
353}
354
355static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
356                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
357{
358    tcg_insn_unit insn = 0;
359
360    insn = deposit32(insn, 0, 8, op);
361    insn = deposit32(insn, 8, 4, r0);
362    insn = deposit32(insn, 12, 4, r1);
363    insn = deposit32(insn, 16, 4, r2);
364    insn = deposit32(insn, 20, 4, r3);
365    tcg_out32(s, insn);
366}
367
368static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
369                              TCGReg r0, TCGReg r1, TCGReg r2,
370                              TCGReg r3, TCGReg r4, TCGCond c5)
371{
372    tcg_insn_unit insn = 0;
373
374    insn = deposit32(insn, 0, 8, op);
375    insn = deposit32(insn, 8, 4, r0);
376    insn = deposit32(insn, 12, 4, r1);
377    insn = deposit32(insn, 16, 4, r2);
378    insn = deposit32(insn, 20, 4, r3);
379    insn = deposit32(insn, 24, 4, r4);
380    insn = deposit32(insn, 28, 4, c5);
381    tcg_out32(s, insn);
382}
383
384static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
385                              TCGReg r0, TCGReg r1, TCGReg r2,
386                              TCGReg r3, TCGReg r4, TCGReg r5)
387{
388    tcg_insn_unit insn = 0;
389
390    insn = deposit32(insn, 0, 8, op);
391    insn = deposit32(insn, 8, 4, r0);
392    insn = deposit32(insn, 12, 4, r1);
393    insn = deposit32(insn, 16, 4, r2);
394    insn = deposit32(insn, 20, 4, r3);
395    insn = deposit32(insn, 24, 4, r4);
396    insn = deposit32(insn, 28, 4, r5);
397    tcg_out32(s, insn);
398}
399
400static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
401                         TCGReg base, intptr_t offset)
402{
403    stack_bounds_check(base, offset);
404    if (offset != sextract32(offset, 0, 16)) {
405        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
406        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
407        base = TCG_REG_TMP;
408        offset = 0;
409    }
410    tcg_out_op_rrs(s, op, val, base, offset);
411}
412
413static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
414                       intptr_t offset)
415{
416    switch (type) {
417    case TCG_TYPE_I32:
418        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
419        break;
420#if TCG_TARGET_REG_BITS == 64
421    case TCG_TYPE_I64:
422        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
423        break;
424#endif
425    default:
426        g_assert_not_reached();
427    }
428}
429
430static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
431{
432    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
433    return true;
434}
435
436static void tcg_out_movi(TCGContext *s, TCGType type,
437                         TCGReg ret, tcg_target_long arg)
438{
439    switch (type) {
440    case TCG_TYPE_I32:
441#if TCG_TARGET_REG_BITS == 64
442        arg = (int32_t)arg;
443        /* fall through */
444    case TCG_TYPE_I64:
445#endif
446        break;
447    default:
448        g_assert_not_reached();
449    }
450
451    if (arg == sextract32(arg, 0, 20)) {
452        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
453    } else {
454        tcg_insn_unit insn = 0;
455
456        new_pool_label(s, arg, 20, s->code_ptr, 0);
457        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
458        insn = deposit32(insn, 8, 4, ret);
459        tcg_out32(s, insn);
460    }
461}
462
463static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
464                            TCGReg rs, unsigned pos, unsigned len)
465{
466    TCGOpcode opc = type == TCG_TYPE_I32 ?
467                    INDEX_op_extract_i32 :
468                    INDEX_op_extract_i64;
469    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
470}
471
472static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
473                             TCGReg rs, unsigned pos, unsigned len)
474{
475    TCGOpcode opc = type == TCG_TYPE_I32 ?
476                    INDEX_op_sextract_i32 :
477                    INDEX_op_sextract_i64;
478    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
479}
480
481static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
482{
483    tcg_out_sextract(s, type, rd, rs, 0, 8);
484}
485
486static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
487{
488    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
489}
490
491static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
492{
493    tcg_out_sextract(s, type, rd, rs, 0, 16);
494}
495
496static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
497{
498    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
499}
500
501static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
502{
503    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
504    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
505}
506
507static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
508{
509    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
510    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
511}
512
513static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
514{
515    tcg_out_ext32s(s, rd, rs);
516}
517
518static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
519{
520    tcg_out_ext32u(s, rd, rs);
521}
522
523static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
524{
525    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
526    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
527}
528
529static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
530{
531    return false;
532}
533
534static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
535                             tcg_target_long imm)
536{
537    /* This function is only used for passing structs by reference. */
538    g_assert_not_reached();
539}
540
541static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
542                         const TCGHelperInfo *info)
543{
544    ffi_cif *cif = info->cif;
545    tcg_insn_unit insn = 0;
546    uint8_t which;
547
548    if (cif->rtype == &ffi_type_void) {
549        which = 0;
550    } else {
551        tcg_debug_assert(cif->rtype->size == 4 ||
552                         cif->rtype->size == 8 ||
553                         cif->rtype->size == 16);
554        which = ctz32(cif->rtype->size) - 1;
555    }
556    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
557    insn = deposit32(insn, 0, 8, INDEX_op_call);
558    insn = deposit32(insn, 8, 4, which);
559    tcg_out32(s, insn);
560}
561
562#if TCG_TARGET_REG_BITS == 64
563# define CASE_32_64(x) \
564        case glue(glue(INDEX_op_, x), _i64): \
565        case glue(glue(INDEX_op_, x), _i32):
566# define CASE_64(x) \
567        case glue(glue(INDEX_op_, x), _i64):
568#else
569# define CASE_32_64(x) \
570        case glue(glue(INDEX_op_, x), _i32):
571# define CASE_64(x)
572#endif
573
574static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
575{
576    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
577}
578
579static void tcg_out_goto_tb(TCGContext *s, int which)
580{
581    /* indirect jump method. */
582    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
583    set_jmp_reset_offset(s, which);
584}
585
586void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
587                              uintptr_t jmp_rx, uintptr_t jmp_rw)
588{
589    /* Always indirect, nothing to do */
590}
591
592static void tgen_add(TCGContext *s, TCGType type,
593                     TCGReg a0, TCGReg a1, TCGReg a2)
594{
595    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
596}
597
598static const TCGOutOpBinary outop_add = {
599    .base.static_constraint = C_O1_I2(r, r, r),
600    .out_rrr = tgen_add,
601};
602
603static void tgen_and(TCGContext *s, TCGType type,
604                     TCGReg a0, TCGReg a1, TCGReg a2)
605{
606    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
607}
608
609static const TCGOutOpBinary outop_and = {
610    .base.static_constraint = C_O1_I2(r, r, r),
611    .out_rrr = tgen_and,
612};
613
614static void tgen_andc(TCGContext *s, TCGType type,
615                      TCGReg a0, TCGReg a1, TCGReg a2)
616{
617    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
618}
619
620static const TCGOutOpBinary outop_andc = {
621    .base.static_constraint = C_O1_I2(r, r, r),
622    .out_rrr = tgen_andc,
623};
624
625static void tgen_clz(TCGContext *s, TCGType type,
626                      TCGReg a0, TCGReg a1, TCGReg a2)
627{
628    TCGOpcode opc = (type == TCG_TYPE_I32
629                     ? INDEX_op_tci_clz32
630                     : INDEX_op_clz);
631    tcg_out_op_rrr(s, opc, a0, a1, a2);
632}
633
634static const TCGOutOpBinary outop_clz = {
635    .base.static_constraint = C_O1_I2(r, r, r),
636    .out_rrr = tgen_clz,
637};
638
639static void tgen_ctz(TCGContext *s, TCGType type,
640                      TCGReg a0, TCGReg a1, TCGReg a2)
641{
642    TCGOpcode opc = (type == TCG_TYPE_I32
643                     ? INDEX_op_tci_ctz32
644                     : INDEX_op_ctz);
645    tcg_out_op_rrr(s, opc, a0, a1, a2);
646}
647
648static const TCGOutOpBinary outop_ctz = {
649    .base.static_constraint = C_O1_I2(r, r, r),
650    .out_rrr = tgen_ctz,
651};
652
653static void tgen_divs(TCGContext *s, TCGType type,
654                      TCGReg a0, TCGReg a1, TCGReg a2)
655{
656    TCGOpcode opc = (type == TCG_TYPE_I32
657                     ? INDEX_op_tci_divs32
658                     : INDEX_op_divs);
659    tcg_out_op_rrr(s, opc, a0, a1, a2);
660}
661
662static const TCGOutOpBinary outop_divs = {
663    .base.static_constraint = C_O1_I2(r, r, r),
664    .out_rrr = tgen_divs,
665};
666
667static const TCGOutOpDivRem outop_divs2 = {
668    .base.static_constraint = C_NotImplemented,
669};
670
671static void tgen_divu(TCGContext *s, TCGType type,
672                      TCGReg a0, TCGReg a1, TCGReg a2)
673{
674    TCGOpcode opc = (type == TCG_TYPE_I32
675                     ? INDEX_op_tci_divu32
676                     : INDEX_op_divu);
677    tcg_out_op_rrr(s, opc, a0, a1, a2);
678}
679
680static const TCGOutOpBinary outop_divu = {
681    .base.static_constraint = C_O1_I2(r, r, r),
682    .out_rrr = tgen_divu,
683};
684
685static const TCGOutOpDivRem outop_divu2 = {
686    .base.static_constraint = C_NotImplemented,
687};
688
689static void tgen_eqv(TCGContext *s, TCGType type,
690                     TCGReg a0, TCGReg a1, TCGReg a2)
691{
692    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
693}
694
695static const TCGOutOpBinary outop_eqv = {
696    .base.static_constraint = C_O1_I2(r, r, r),
697    .out_rrr = tgen_eqv,
698};
699
700static void tgen_mul(TCGContext *s, TCGType type,
701                     TCGReg a0, TCGReg a1, TCGReg a2)
702{
703    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
704}
705
706static const TCGOutOpBinary outop_mul = {
707    .base.static_constraint = C_O1_I2(r, r, r),
708    .out_rrr = tgen_mul,
709};
710
711static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
712{
713    return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented;
714}
715
716static void tgen_muls2(TCGContext *s, TCGType type,
717                       TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
718{
719    tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3);
720}
721
722static const TCGOutOpMul2 outop_muls2 = {
723    .base.static_constraint = C_Dynamic,
724    .base.dynamic_constraint = cset_mul2,
725    .out_rrrr = tgen_muls2,
726};
727
728static const TCGOutOpBinary outop_mulsh = {
729    .base.static_constraint = C_NotImplemented,
730};
731
732static const TCGOutOpBinary outop_muluh = {
733    .base.static_constraint = C_NotImplemented,
734};
735
736static void tgen_nand(TCGContext *s, TCGType type,
737                     TCGReg a0, TCGReg a1, TCGReg a2)
738{
739    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
740}
741
742static const TCGOutOpBinary outop_nand = {
743    .base.static_constraint = C_O1_I2(r, r, r),
744    .out_rrr = tgen_nand,
745};
746
747static void tgen_nor(TCGContext *s, TCGType type,
748                     TCGReg a0, TCGReg a1, TCGReg a2)
749{
750    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
751}
752
753static const TCGOutOpBinary outop_nor = {
754    .base.static_constraint = C_O1_I2(r, r, r),
755    .out_rrr = tgen_nor,
756};
757
758static void tgen_or(TCGContext *s, TCGType type,
759                     TCGReg a0, TCGReg a1, TCGReg a2)
760{
761    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
762}
763
764static const TCGOutOpBinary outop_or = {
765    .base.static_constraint = C_O1_I2(r, r, r),
766    .out_rrr = tgen_or,
767};
768
769static void tgen_orc(TCGContext *s, TCGType type,
770                     TCGReg a0, TCGReg a1, TCGReg a2)
771{
772    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
773}
774
775static const TCGOutOpBinary outop_orc = {
776    .base.static_constraint = C_O1_I2(r, r, r),
777    .out_rrr = tgen_orc,
778};
779
780static void tgen_rems(TCGContext *s, TCGType type,
781                      TCGReg a0, TCGReg a1, TCGReg a2)
782{
783    TCGOpcode opc = (type == TCG_TYPE_I32
784                     ? INDEX_op_tci_rems32
785                     : INDEX_op_rems);
786    tcg_out_op_rrr(s, opc, a0, a1, a2);
787}
788
789static const TCGOutOpBinary outop_rems = {
790    .base.static_constraint = C_O1_I2(r, r, r),
791    .out_rrr = tgen_rems,
792};
793
794static void tgen_remu(TCGContext *s, TCGType type,
795                      TCGReg a0, TCGReg a1, TCGReg a2)
796{
797    TCGOpcode opc = (type == TCG_TYPE_I32
798                     ? INDEX_op_tci_remu32
799                     : INDEX_op_remu);
800    tcg_out_op_rrr(s, opc, a0, a1, a2);
801}
802
803static const TCGOutOpBinary outop_remu = {
804    .base.static_constraint = C_O1_I2(r, r, r),
805    .out_rrr = tgen_remu,
806};
807
808static void tgen_rotl(TCGContext *s, TCGType type,
809                     TCGReg a0, TCGReg a1, TCGReg a2)
810{
811    TCGOpcode opc = (type == TCG_TYPE_I32
812                     ? INDEX_op_tci_rotl32
813                     : INDEX_op_rotl);
814    tcg_out_op_rrr(s, opc, a0, a1, a2);
815}
816
817static const TCGOutOpBinary outop_rotl = {
818    .base.static_constraint = C_O1_I2(r, r, r),
819    .out_rrr = tgen_rotl,
820};
821
822static void tgen_rotr(TCGContext *s, TCGType type,
823                     TCGReg a0, TCGReg a1, TCGReg a2)
824{
825    TCGOpcode opc = (type == TCG_TYPE_I32
826                     ? INDEX_op_tci_rotr32
827                     : INDEX_op_rotr);
828    tcg_out_op_rrr(s, opc, a0, a1, a2);
829}
830
831static const TCGOutOpBinary outop_rotr = {
832    .base.static_constraint = C_O1_I2(r, r, r),
833    .out_rrr = tgen_rotr,
834};
835
836static void tgen_sar(TCGContext *s, TCGType type,
837                     TCGReg a0, TCGReg a1, TCGReg a2)
838{
839    if (type < TCG_TYPE_REG) {
840        tcg_out_ext32s(s, TCG_REG_TMP, a1);
841        a1 = TCG_REG_TMP;
842    }
843    tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2);
844}
845
846static const TCGOutOpBinary outop_sar = {
847    .base.static_constraint = C_O1_I2(r, r, r),
848    .out_rrr = tgen_sar,
849};
850
851static void tgen_shl(TCGContext *s, TCGType type,
852                     TCGReg a0, TCGReg a1, TCGReg a2)
853{
854    tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2);
855}
856
857static const TCGOutOpBinary outop_shl = {
858    .base.static_constraint = C_O1_I2(r, r, r),
859    .out_rrr = tgen_shl,
860};
861
862static void tgen_shr(TCGContext *s, TCGType type,
863                     TCGReg a0, TCGReg a1, TCGReg a2)
864{
865    if (type < TCG_TYPE_REG) {
866        tcg_out_ext32u(s, TCG_REG_TMP, a1);
867        a1 = TCG_REG_TMP;
868    }
869    tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2);
870}
871
872static const TCGOutOpBinary outop_shr = {
873    .base.static_constraint = C_O1_I2(r, r, r),
874    .out_rrr = tgen_shr,
875};
876
877static void tgen_sub(TCGContext *s, TCGType type,
878                     TCGReg a0, TCGReg a1, TCGReg a2)
879{
880    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
881}
882
883static const TCGOutOpSubtract outop_sub = {
884    .base.static_constraint = C_O1_I2(r, r, r),
885    .out_rrr = tgen_sub,
886};
887
888static void tgen_xor(TCGContext *s, TCGType type,
889                     TCGReg a0, TCGReg a1, TCGReg a2)
890{
891    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
892}
893
894static const TCGOutOpBinary outop_xor = {
895    .base.static_constraint = C_O1_I2(r, r, r),
896    .out_rrr = tgen_xor,
897};
898
899static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
900{
901    tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1);
902}
903
904static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
905{
906    return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented;
907}
908
909static const TCGOutOpUnary outop_ctpop = {
910    .base.static_constraint = C_Dynamic,
911    .base.dynamic_constraint = cset_ctpop,
912    .out_rr = tgen_ctpop,
913};
914
915static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
916{
917    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
918}
919
920static const TCGOutOpUnary outop_neg = {
921    .base.static_constraint = C_O1_I1(r, r),
922    .out_rr = tgen_neg,
923};
924
925static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
926{
927    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
928}
929
930static const TCGOutOpUnary outop_not = {
931    .base.static_constraint = C_O1_I1(r, r),
932    .out_rr = tgen_not,
933};
934
935
936static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
937                       const TCGArg args[TCG_MAX_OP_ARGS],
938                       const int const_args[TCG_MAX_OP_ARGS])
939{
940    int width;
941
942    switch (opc) {
943    case INDEX_op_goto_ptr:
944        tcg_out_op_r(s, opc, args[0]);
945        break;
946
947    case INDEX_op_br:
948        tcg_out_op_l(s, opc, arg_label(args[0]));
949        break;
950
951    CASE_32_64(setcond)
952        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
953        break;
954
955    CASE_32_64(movcond)
956    case INDEX_op_setcond2_i32:
957        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
958                          args[3], args[4], args[5]);
959        break;
960
961    CASE_32_64(ld8u)
962    CASE_32_64(ld8s)
963    CASE_32_64(ld16u)
964    CASE_32_64(ld16s)
965    case INDEX_op_ld_i32:
966    CASE_64(ld32u)
967    CASE_64(ld32s)
968    CASE_64(ld)
969    CASE_32_64(st8)
970    CASE_32_64(st16)
971    case INDEX_op_st_i32:
972    CASE_64(st32)
973    CASE_64(st)
974        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
975        break;
976
977    CASE_32_64(deposit)
978        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
979        break;
980
981    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
982    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
983        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
984        break;
985
986    CASE_32_64(brcond)
987        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
988                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
989                        TCG_REG_TMP, args[0], args[1], args[2]);
990        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
991        break;
992
993    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
994    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
995        tcg_out_op_rr(s, opc, args[0], args[1]);
996        break;
997
998    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
999    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
1000        width = 16;
1001        goto do_bswap;
1002    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
1003        width = 32;
1004    do_bswap:
1005        /* The base tci bswaps zero-extend, and ignore high bits. */
1006        tcg_out_op_rr(s, opc, args[0], args[1]);
1007        if (args[2] & TCG_BSWAP_OS) {
1008            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
1009        }
1010        break;
1011
1012    CASE_32_64(add2)
1013    CASE_32_64(sub2)
1014        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
1015                          args[3], args[4], args[5]);
1016        break;
1017
1018#if TCG_TARGET_REG_BITS == 32
1019    case INDEX_op_brcond2_i32:
1020        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
1021                          args[0], args[1], args[2], args[3], args[4]);
1022        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
1023        break;
1024#endif
1025
1026    CASE_32_64(mulu2)
1027        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
1028        break;
1029
1030    case INDEX_op_qemu_ld_i64:
1031    case INDEX_op_qemu_st_i64:
1032        if (TCG_TARGET_REG_BITS == 32) {
1033            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
1034            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
1035            break;
1036        }
1037        /* fall through */
1038    case INDEX_op_qemu_ld_i32:
1039    case INDEX_op_qemu_st_i32:
1040        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
1041            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
1042            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
1043        } else {
1044            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
1045        }
1046        break;
1047
1048    case INDEX_op_mb:
1049        tcg_out_op_v(s, opc);
1050        break;
1051
1052    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1053    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1054    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1055    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
1056    case INDEX_op_extu_i32_i64:
1057    case INDEX_op_extrl_i64_i32:
1058    default:
1059        g_assert_not_reached();
1060    }
1061}
1062
1063static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
1064                       intptr_t offset)
1065{
1066    switch (type) {
1067    case TCG_TYPE_I32:
1068        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
1069        break;
1070#if TCG_TARGET_REG_BITS == 64
1071    case TCG_TYPE_I64:
1072        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
1073        break;
1074#endif
1075    default:
1076        g_assert_not_reached();
1077    }
1078}
1079
1080static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1081                               TCGReg base, intptr_t ofs)
1082{
1083    return false;
1084}
1085
1086/* Test if a constant matches the constraint. */
1087static bool tcg_target_const_match(int64_t val, int ct,
1088                                   TCGType type, TCGCond cond, int vece)
1089{
1090    return ct & TCG_CT_CONST;
1091}
1092
1093static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1094{
1095    memset(p, 0, sizeof(*p) * count);
1096}
1097
1098static void tcg_target_init(TCGContext *s)
1099{
1100    /* The current code uses uint8_t for tcg operations. */
1101    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
1102
1103    /* Registers available for 32 bit operations. */
1104    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
1105    /* Registers available for 64 bit operations. */
1106    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
1107    /*
1108     * The interpreter "registers" are in the local stack frame and
1109     * cannot be clobbered by the called helper functions.  However,
1110     * the interpreter assumes a 128-bit return value and assigns to
1111     * the return value registers.
1112     */
1113    tcg_target_call_clobber_regs =
1114        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1115
1116    s->reserved_regs = 0;
1117    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1118    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1119
1120    /* The call arguments come first, followed by the temp storage. */
1121    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1122                  TCG_STATIC_FRAME_SIZE);
1123}
1124
1125/* Generate global QEMU prologue and epilogue code. */
1126static inline void tcg_target_qemu_prologue(TCGContext *s)
1127{
1128}
1129
1130static void tcg_out_tb_start(TCGContext *s)
1131{
1132    /* nothing to do */
1133}
1134
1135bool tcg_target_has_memory_bswap(MemOp memop)
1136{
1137    return true;
1138}
1139
1140static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1141{
1142    g_assert_not_reached();
1143}
1144
1145static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1146{
1147    g_assert_not_reached();
1148}
1149