xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision 9a6bc1840ec105902bda1a59c42e9e0c56a9ed05)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_remu_i32:
83    case INDEX_op_remu_i64:
84    case INDEX_op_shl_i32:
85    case INDEX_op_shl_i64:
86    case INDEX_op_shr_i32:
87    case INDEX_op_shr_i64:
88    case INDEX_op_sar_i32:
89    case INDEX_op_sar_i64:
90    case INDEX_op_rotl_i32:
91    case INDEX_op_rotl_i64:
92    case INDEX_op_rotr_i32:
93    case INDEX_op_rotr_i64:
94    case INDEX_op_setcond_i32:
95    case INDEX_op_setcond_i64:
96    case INDEX_op_deposit_i32:
97    case INDEX_op_deposit_i64:
98    case INDEX_op_clz_i32:
99    case INDEX_op_clz_i64:
100    case INDEX_op_ctz_i32:
101    case INDEX_op_ctz_i64:
102        return C_O1_I2(r, r, r);
103
104    case INDEX_op_brcond_i32:
105    case INDEX_op_brcond_i64:
106        return C_O0_I2(r, r);
107
108    case INDEX_op_add2_i32:
109    case INDEX_op_add2_i64:
110    case INDEX_op_sub2_i32:
111    case INDEX_op_sub2_i64:
112        return C_O2_I4(r, r, r, r, r, r);
113
114#if TCG_TARGET_REG_BITS == 32
115    case INDEX_op_brcond2_i32:
116        return C_O0_I4(r, r, r, r);
117#endif
118
119    case INDEX_op_mulu2_i32:
120    case INDEX_op_mulu2_i64:
121    case INDEX_op_muls2_i32:
122    case INDEX_op_muls2_i64:
123        return C_O2_I2(r, r, r, r);
124
125    case INDEX_op_movcond_i32:
126    case INDEX_op_movcond_i64:
127    case INDEX_op_setcond2_i32:
128        return C_O1_I4(r, r, r, r, r);
129
130    case INDEX_op_qemu_ld_i32:
131        return C_O1_I1(r, r);
132    case INDEX_op_qemu_ld_i64:
133        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
134    case INDEX_op_qemu_st_i32:
135        return C_O0_I2(r, r);
136    case INDEX_op_qemu_st_i64:
137        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
138
139    default:
140        return C_NotImplemented;
141    }
142}
143
144static const int tcg_target_reg_alloc_order[] = {
145    TCG_REG_R4,
146    TCG_REG_R5,
147    TCG_REG_R6,
148    TCG_REG_R7,
149    TCG_REG_R8,
150    TCG_REG_R9,
151    TCG_REG_R10,
152    TCG_REG_R11,
153    TCG_REG_R12,
154    TCG_REG_R13,
155    TCG_REG_R14,
156    TCG_REG_R15,
157    /* Either 2 or 4 of these are call clobbered, so use them last. */
158    TCG_REG_R3,
159    TCG_REG_R2,
160    TCG_REG_R1,
161    TCG_REG_R0,
162};
163
164/* No call arguments via registers.  All will be stored on the "stack". */
165static const int tcg_target_call_iarg_regs[] = { };
166
167static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
168{
169    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
170    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
171    return TCG_REG_R0 + slot;
172}
173
174#ifdef CONFIG_DEBUG_TCG
175static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
176    "r00",
177    "r01",
178    "r02",
179    "r03",
180    "r04",
181    "r05",
182    "r06",
183    "r07",
184    "r08",
185    "r09",
186    "r10",
187    "r11",
188    "r12",
189    "r13",
190    "r14",
191    "r15",
192};
193#endif
194
195static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
196                        intptr_t value, intptr_t addend)
197{
198    intptr_t diff = value - (intptr_t)(code_ptr + 1);
199
200    tcg_debug_assert(addend == 0);
201    tcg_debug_assert(type == 20);
202
203    if (diff == sextract32(diff, 0, type)) {
204        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
205        return true;
206    }
207    return false;
208}
209
210static void stack_bounds_check(TCGReg base, intptr_t offset)
211{
212    if (base == TCG_REG_CALL_STACK) {
213        tcg_debug_assert(offset >= 0);
214        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
215                                   TCG_STATIC_FRAME_SIZE));
216    }
217}
218
219static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
220{
221    tcg_insn_unit insn = 0;
222
223    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
224    insn = deposit32(insn, 0, 8, op);
225    tcg_out32(s, insn);
226}
227
228static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
229{
230    tcg_insn_unit insn = 0;
231    intptr_t diff;
232
233    /* Special case for exit_tb: map null -> 0. */
234    if (p0 == NULL) {
235        diff = 0;
236    } else {
237        diff = p0 - (void *)(s->code_ptr + 1);
238        tcg_debug_assert(diff != 0);
239        if (diff != sextract32(diff, 0, 20)) {
240            tcg_raise_tb_overflow(s);
241        }
242    }
243    insn = deposit32(insn, 0, 8, op);
244    insn = deposit32(insn, 12, 20, diff);
245    tcg_out32(s, insn);
246}
247
248static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
249{
250    tcg_insn_unit insn = 0;
251
252    insn = deposit32(insn, 0, 8, op);
253    insn = deposit32(insn, 8, 4, r0);
254    tcg_out32(s, insn);
255}
256
257static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
258{
259    tcg_out32(s, (uint8_t)op);
260}
261
262static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
263{
264    tcg_insn_unit insn = 0;
265
266    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
267    insn = deposit32(insn, 0, 8, op);
268    insn = deposit32(insn, 8, 4, r0);
269    insn = deposit32(insn, 12, 20, i1);
270    tcg_out32(s, insn);
271}
272
273static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
274{
275    tcg_insn_unit insn = 0;
276
277    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
278    insn = deposit32(insn, 0, 8, op);
279    insn = deposit32(insn, 8, 4, r0);
280    tcg_out32(s, insn);
281}
282
283static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
284{
285    tcg_insn_unit insn = 0;
286
287    insn = deposit32(insn, 0, 8, op);
288    insn = deposit32(insn, 8, 4, r0);
289    insn = deposit32(insn, 12, 4, r1);
290    tcg_out32(s, insn);
291}
292
293static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
294                           TCGReg r0, TCGReg r1, TCGArg m2)
295{
296    tcg_insn_unit insn = 0;
297
298    tcg_debug_assert(m2 == extract32(m2, 0, 16));
299    insn = deposit32(insn, 0, 8, op);
300    insn = deposit32(insn, 8, 4, r0);
301    insn = deposit32(insn, 12, 4, r1);
302    insn = deposit32(insn, 16, 16, m2);
303    tcg_out32(s, insn);
304}
305
306static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
307                           TCGReg r0, TCGReg r1, TCGReg r2)
308{
309    tcg_insn_unit insn = 0;
310
311    insn = deposit32(insn, 0, 8, op);
312    insn = deposit32(insn, 8, 4, r0);
313    insn = deposit32(insn, 12, 4, r1);
314    insn = deposit32(insn, 16, 4, r2);
315    tcg_out32(s, insn);
316}
317
318static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
319                           TCGReg r0, TCGReg r1, intptr_t i2)
320{
321    tcg_insn_unit insn = 0;
322
323    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
324    insn = deposit32(insn, 0, 8, op);
325    insn = deposit32(insn, 8, 4, r0);
326    insn = deposit32(insn, 12, 4, r1);
327    insn = deposit32(insn, 16, 16, i2);
328    tcg_out32(s, insn);
329}
330
331static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
332                            TCGReg r1, uint8_t b2, uint8_t b3)
333{
334    tcg_insn_unit insn = 0;
335
336    tcg_debug_assert(b2 == extract32(b2, 0, 6));
337    tcg_debug_assert(b3 == extract32(b3, 0, 6));
338    insn = deposit32(insn, 0, 8, op);
339    insn = deposit32(insn, 8, 4, r0);
340    insn = deposit32(insn, 12, 4, r1);
341    insn = deposit32(insn, 16, 6, b2);
342    insn = deposit32(insn, 22, 6, b3);
343    tcg_out32(s, insn);
344}
345
346static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
347                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
348{
349    tcg_insn_unit insn = 0;
350
351    insn = deposit32(insn, 0, 8, op);
352    insn = deposit32(insn, 8, 4, r0);
353    insn = deposit32(insn, 12, 4, r1);
354    insn = deposit32(insn, 16, 4, r2);
355    insn = deposit32(insn, 20, 4, c3);
356    tcg_out32(s, insn);
357}
358
359static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
360                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
361{
362    tcg_insn_unit insn = 0;
363
364    tcg_debug_assert(b3 == extract32(b3, 0, 6));
365    tcg_debug_assert(b4 == extract32(b4, 0, 6));
366    insn = deposit32(insn, 0, 8, op);
367    insn = deposit32(insn, 8, 4, r0);
368    insn = deposit32(insn, 12, 4, r1);
369    insn = deposit32(insn, 16, 4, r2);
370    insn = deposit32(insn, 20, 6, b3);
371    insn = deposit32(insn, 26, 6, b4);
372    tcg_out32(s, insn);
373}
374
375static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
376                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
377{
378    tcg_insn_unit insn = 0;
379
380    insn = deposit32(insn, 0, 8, op);
381    insn = deposit32(insn, 8, 4, r0);
382    insn = deposit32(insn, 12, 4, r1);
383    insn = deposit32(insn, 16, 4, r2);
384    insn = deposit32(insn, 20, 4, r3);
385    tcg_out32(s, insn);
386}
387
388static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
389                              TCGReg r0, TCGReg r1, TCGReg r2,
390                              TCGReg r3, TCGReg r4, TCGCond c5)
391{
392    tcg_insn_unit insn = 0;
393
394    insn = deposit32(insn, 0, 8, op);
395    insn = deposit32(insn, 8, 4, r0);
396    insn = deposit32(insn, 12, 4, r1);
397    insn = deposit32(insn, 16, 4, r2);
398    insn = deposit32(insn, 20, 4, r3);
399    insn = deposit32(insn, 24, 4, r4);
400    insn = deposit32(insn, 28, 4, c5);
401    tcg_out32(s, insn);
402}
403
404static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
405                              TCGReg r0, TCGReg r1, TCGReg r2,
406                              TCGReg r3, TCGReg r4, TCGReg r5)
407{
408    tcg_insn_unit insn = 0;
409
410    insn = deposit32(insn, 0, 8, op);
411    insn = deposit32(insn, 8, 4, r0);
412    insn = deposit32(insn, 12, 4, r1);
413    insn = deposit32(insn, 16, 4, r2);
414    insn = deposit32(insn, 20, 4, r3);
415    insn = deposit32(insn, 24, 4, r4);
416    insn = deposit32(insn, 28, 4, r5);
417    tcg_out32(s, insn);
418}
419
420static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
421                         TCGReg base, intptr_t offset)
422{
423    stack_bounds_check(base, offset);
424    if (offset != sextract32(offset, 0, 16)) {
425        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
426        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
427        base = TCG_REG_TMP;
428        offset = 0;
429    }
430    tcg_out_op_rrs(s, op, val, base, offset);
431}
432
433static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
434                       intptr_t offset)
435{
436    switch (type) {
437    case TCG_TYPE_I32:
438        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
439        break;
440#if TCG_TARGET_REG_BITS == 64
441    case TCG_TYPE_I64:
442        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
443        break;
444#endif
445    default:
446        g_assert_not_reached();
447    }
448}
449
450static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
451{
452    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
453    return true;
454}
455
456static void tcg_out_movi(TCGContext *s, TCGType type,
457                         TCGReg ret, tcg_target_long arg)
458{
459    switch (type) {
460    case TCG_TYPE_I32:
461#if TCG_TARGET_REG_BITS == 64
462        arg = (int32_t)arg;
463        /* fall through */
464    case TCG_TYPE_I64:
465#endif
466        break;
467    default:
468        g_assert_not_reached();
469    }
470
471    if (arg == sextract32(arg, 0, 20)) {
472        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
473    } else {
474        tcg_insn_unit insn = 0;
475
476        new_pool_label(s, arg, 20, s->code_ptr, 0);
477        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
478        insn = deposit32(insn, 8, 4, ret);
479        tcg_out32(s, insn);
480    }
481}
482
483static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
484                            TCGReg rs, unsigned pos, unsigned len)
485{
486    TCGOpcode opc = type == TCG_TYPE_I32 ?
487                    INDEX_op_extract_i32 :
488                    INDEX_op_extract_i64;
489    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
490}
491
492static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
493                             TCGReg rs, unsigned pos, unsigned len)
494{
495    TCGOpcode opc = type == TCG_TYPE_I32 ?
496                    INDEX_op_sextract_i32 :
497                    INDEX_op_sextract_i64;
498    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
499}
500
501static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
502{
503    tcg_out_sextract(s, type, rd, rs, 0, 8);
504}
505
506static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
507{
508    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
509}
510
511static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
512{
513    tcg_out_sextract(s, type, rd, rs, 0, 16);
514}
515
516static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
517{
518    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
519}
520
521static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
522{
523    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
524    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
525}
526
527static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
528{
529    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
530    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
531}
532
533static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
534{
535    tcg_out_ext32s(s, rd, rs);
536}
537
538static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
539{
540    tcg_out_ext32u(s, rd, rs);
541}
542
543static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
544{
545    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
546    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
547}
548
549static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
550{
551    return false;
552}
553
554static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
555                             tcg_target_long imm)
556{
557    /* This function is only used for passing structs by reference. */
558    g_assert_not_reached();
559}
560
561static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
562                         const TCGHelperInfo *info)
563{
564    ffi_cif *cif = info->cif;
565    tcg_insn_unit insn = 0;
566    uint8_t which;
567
568    if (cif->rtype == &ffi_type_void) {
569        which = 0;
570    } else {
571        tcg_debug_assert(cif->rtype->size == 4 ||
572                         cif->rtype->size == 8 ||
573                         cif->rtype->size == 16);
574        which = ctz32(cif->rtype->size) - 1;
575    }
576    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
577    insn = deposit32(insn, 0, 8, INDEX_op_call);
578    insn = deposit32(insn, 8, 4, which);
579    tcg_out32(s, insn);
580}
581
582#if TCG_TARGET_REG_BITS == 64
583# define CASE_32_64(x) \
584        case glue(glue(INDEX_op_, x), _i64): \
585        case glue(glue(INDEX_op_, x), _i32):
586# define CASE_64(x) \
587        case glue(glue(INDEX_op_, x), _i64):
588#else
589# define CASE_32_64(x) \
590        case glue(glue(INDEX_op_, x), _i32):
591# define CASE_64(x)
592#endif
593
594static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
595{
596    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
597}
598
599static void tcg_out_goto_tb(TCGContext *s, int which)
600{
601    /* indirect jump method. */
602    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
603    set_jmp_reset_offset(s, which);
604}
605
606void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
607                              uintptr_t jmp_rx, uintptr_t jmp_rw)
608{
609    /* Always indirect, nothing to do */
610}
611
612static void tgen_add(TCGContext *s, TCGType type,
613                     TCGReg a0, TCGReg a1, TCGReg a2)
614{
615    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
616}
617
618static const TCGOutOpBinary outop_add = {
619    .base.static_constraint = C_O1_I2(r, r, r),
620    .out_rrr = tgen_add,
621};
622
623static void tgen_and(TCGContext *s, TCGType type,
624                     TCGReg a0, TCGReg a1, TCGReg a2)
625{
626    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
627}
628
629static const TCGOutOpBinary outop_and = {
630    .base.static_constraint = C_O1_I2(r, r, r),
631    .out_rrr = tgen_and,
632};
633
634static void tgen_andc(TCGContext *s, TCGType type,
635                      TCGReg a0, TCGReg a1, TCGReg a2)
636{
637    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
638}
639
640static const TCGOutOpBinary outop_andc = {
641    .base.static_constraint = C_O1_I2(r, r, r),
642    .out_rrr = tgen_andc,
643};
644
645static void tgen_divs(TCGContext *s, TCGType type,
646                      TCGReg a0, TCGReg a1, TCGReg a2)
647{
648    TCGOpcode opc = (type == TCG_TYPE_I32
649                     ? INDEX_op_tci_divs32
650                     : INDEX_op_divs);
651    tcg_out_op_rrr(s, opc, a0, a1, a2);
652}
653
654static const TCGOutOpBinary outop_divs = {
655    .base.static_constraint = C_O1_I2(r, r, r),
656    .out_rrr = tgen_divs,
657};
658
659static const TCGOutOpDivRem outop_divs2 = {
660    .base.static_constraint = C_NotImplemented,
661};
662
663static void tgen_divu(TCGContext *s, TCGType type,
664                      TCGReg a0, TCGReg a1, TCGReg a2)
665{
666    TCGOpcode opc = (type == TCG_TYPE_I32
667                     ? INDEX_op_tci_divu32
668                     : INDEX_op_divu);
669    tcg_out_op_rrr(s, opc, a0, a1, a2);
670}
671
672static const TCGOutOpBinary outop_divu = {
673    .base.static_constraint = C_O1_I2(r, r, r),
674    .out_rrr = tgen_divu,
675};
676
677static const TCGOutOpDivRem outop_divu2 = {
678    .base.static_constraint = C_NotImplemented,
679};
680
681static void tgen_eqv(TCGContext *s, TCGType type,
682                     TCGReg a0, TCGReg a1, TCGReg a2)
683{
684    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
685}
686
687static const TCGOutOpBinary outop_eqv = {
688    .base.static_constraint = C_O1_I2(r, r, r),
689    .out_rrr = tgen_eqv,
690};
691
692static void tgen_mul(TCGContext *s, TCGType type,
693                     TCGReg a0, TCGReg a1, TCGReg a2)
694{
695    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
696}
697
698static const TCGOutOpBinary outop_mul = {
699    .base.static_constraint = C_O1_I2(r, r, r),
700    .out_rrr = tgen_mul,
701};
702
703static const TCGOutOpBinary outop_mulsh = {
704    .base.static_constraint = C_NotImplemented,
705};
706
707static const TCGOutOpBinary outop_muluh = {
708    .base.static_constraint = C_NotImplemented,
709};
710
711static void tgen_nand(TCGContext *s, TCGType type,
712                     TCGReg a0, TCGReg a1, TCGReg a2)
713{
714    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
715}
716
717static const TCGOutOpBinary outop_nand = {
718    .base.static_constraint = C_O1_I2(r, r, r),
719    .out_rrr = tgen_nand,
720};
721
722static void tgen_nor(TCGContext *s, TCGType type,
723                     TCGReg a0, TCGReg a1, TCGReg a2)
724{
725    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
726}
727
728static const TCGOutOpBinary outop_nor = {
729    .base.static_constraint = C_O1_I2(r, r, r),
730    .out_rrr = tgen_nor,
731};
732
733static void tgen_or(TCGContext *s, TCGType type,
734                     TCGReg a0, TCGReg a1, TCGReg a2)
735{
736    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
737}
738
739static const TCGOutOpBinary outop_or = {
740    .base.static_constraint = C_O1_I2(r, r, r),
741    .out_rrr = tgen_or,
742};
743
744static void tgen_orc(TCGContext *s, TCGType type,
745                     TCGReg a0, TCGReg a1, TCGReg a2)
746{
747    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
748}
749
750static const TCGOutOpBinary outop_orc = {
751    .base.static_constraint = C_O1_I2(r, r, r),
752    .out_rrr = tgen_orc,
753};
754
755static void tgen_rems(TCGContext *s, TCGType type,
756                      TCGReg a0, TCGReg a1, TCGReg a2)
757{
758    TCGOpcode opc = (type == TCG_TYPE_I32
759                     ? INDEX_op_tci_rems32
760                     : INDEX_op_rems);
761    tcg_out_op_rrr(s, opc, a0, a1, a2);
762}
763
764static const TCGOutOpBinary outop_rems = {
765    .base.static_constraint = C_O1_I2(r, r, r),
766    .out_rrr = tgen_rems,
767};
768
769static void tgen_sub(TCGContext *s, TCGType type,
770                     TCGReg a0, TCGReg a1, TCGReg a2)
771{
772    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
773}
774
775static const TCGOutOpSubtract outop_sub = {
776    .base.static_constraint = C_O1_I2(r, r, r),
777    .out_rrr = tgen_sub,
778};
779
780static void tgen_xor(TCGContext *s, TCGType type,
781                     TCGReg a0, TCGReg a1, TCGReg a2)
782{
783    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
784}
785
786static const TCGOutOpBinary outop_xor = {
787    .base.static_constraint = C_O1_I2(r, r, r),
788    .out_rrr = tgen_xor,
789};
790
791static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
792{
793    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
794}
795
796static const TCGOutOpUnary outop_neg = {
797    .base.static_constraint = C_O1_I1(r, r),
798    .out_rr = tgen_neg,
799};
800
801static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
802{
803    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
804}
805
806static const TCGOutOpUnary outop_not = {
807    .base.static_constraint = C_O1_I1(r, r),
808    .out_rr = tgen_not,
809};
810
811
812static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
813                       const TCGArg args[TCG_MAX_OP_ARGS],
814                       const int const_args[TCG_MAX_OP_ARGS])
815{
816    int width;
817
818    switch (opc) {
819    case INDEX_op_goto_ptr:
820        tcg_out_op_r(s, opc, args[0]);
821        break;
822
823    case INDEX_op_br:
824        tcg_out_op_l(s, opc, arg_label(args[0]));
825        break;
826
827    CASE_32_64(setcond)
828        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
829        break;
830
831    CASE_32_64(movcond)
832    case INDEX_op_setcond2_i32:
833        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
834                          args[3], args[4], args[5]);
835        break;
836
837    CASE_32_64(ld8u)
838    CASE_32_64(ld8s)
839    CASE_32_64(ld16u)
840    CASE_32_64(ld16s)
841    case INDEX_op_ld_i32:
842    CASE_64(ld32u)
843    CASE_64(ld32s)
844    CASE_64(ld)
845    CASE_32_64(st8)
846    CASE_32_64(st16)
847    case INDEX_op_st_i32:
848    CASE_64(st32)
849    CASE_64(st)
850        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
851        break;
852
853    CASE_32_64(shl)
854    CASE_32_64(shr)
855    CASE_32_64(sar)
856    CASE_32_64(rotl)     /* Optional (TCG_TARGET_HAS_rot_*). */
857    CASE_32_64(rotr)     /* Optional (TCG_TARGET_HAS_rot_*). */
858    CASE_32_64(remu)     /* Optional (TCG_TARGET_HAS_div_*). */
859    CASE_32_64(clz)      /* Optional (TCG_TARGET_HAS_clz_*). */
860    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
861        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
862        break;
863
864    CASE_32_64(deposit)
865        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
866        break;
867
868    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
869    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
870        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
871        break;
872
873    CASE_32_64(brcond)
874        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
875                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
876                        TCG_REG_TMP, args[0], args[1], args[2]);
877        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
878        break;
879
880    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
881    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
882    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
883        tcg_out_op_rr(s, opc, args[0], args[1]);
884        break;
885
886    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
887    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
888        width = 16;
889        goto do_bswap;
890    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
891        width = 32;
892    do_bswap:
893        /* The base tci bswaps zero-extend, and ignore high bits. */
894        tcg_out_op_rr(s, opc, args[0], args[1]);
895        if (args[2] & TCG_BSWAP_OS) {
896            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
897        }
898        break;
899
900    CASE_32_64(add2)
901    CASE_32_64(sub2)
902        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
903                          args[3], args[4], args[5]);
904        break;
905
906#if TCG_TARGET_REG_BITS == 32
907    case INDEX_op_brcond2_i32:
908        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
909                          args[0], args[1], args[2], args[3], args[4]);
910        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
911        break;
912#endif
913
914    CASE_32_64(mulu2)
915    CASE_32_64(muls2)
916        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
917        break;
918
919    case INDEX_op_qemu_ld_i64:
920    case INDEX_op_qemu_st_i64:
921        if (TCG_TARGET_REG_BITS == 32) {
922            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
923            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
924            break;
925        }
926        /* fall through */
927    case INDEX_op_qemu_ld_i32:
928    case INDEX_op_qemu_st_i32:
929        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
930            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
931            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
932        } else {
933            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
934        }
935        break;
936
937    case INDEX_op_mb:
938        tcg_out_op_v(s, opc);
939        break;
940
941    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
942    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
943    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
944    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
945    case INDEX_op_extu_i32_i64:
946    case INDEX_op_extrl_i64_i32:
947    default:
948        g_assert_not_reached();
949    }
950}
951
952static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
953                       intptr_t offset)
954{
955    switch (type) {
956    case TCG_TYPE_I32:
957        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
958        break;
959#if TCG_TARGET_REG_BITS == 64
960    case TCG_TYPE_I64:
961        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
962        break;
963#endif
964    default:
965        g_assert_not_reached();
966    }
967}
968
969static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
970                               TCGReg base, intptr_t ofs)
971{
972    return false;
973}
974
975/* Test if a constant matches the constraint. */
976static bool tcg_target_const_match(int64_t val, int ct,
977                                   TCGType type, TCGCond cond, int vece)
978{
979    return ct & TCG_CT_CONST;
980}
981
982static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
983{
984    memset(p, 0, sizeof(*p) * count);
985}
986
987static void tcg_target_init(TCGContext *s)
988{
989    /* The current code uses uint8_t for tcg operations. */
990    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
991
992    /* Registers available for 32 bit operations. */
993    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
994    /* Registers available for 64 bit operations. */
995    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
996    /*
997     * The interpreter "registers" are in the local stack frame and
998     * cannot be clobbered by the called helper functions.  However,
999     * the interpreter assumes a 128-bit return value and assigns to
1000     * the return value registers.
1001     */
1002    tcg_target_call_clobber_regs =
1003        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
1004
1005    s->reserved_regs = 0;
1006    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
1007    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1008
1009    /* The call arguments come first, followed by the temp storage. */
1010    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
1011                  TCG_STATIC_FRAME_SIZE);
1012}
1013
1014/* Generate global QEMU prologue and epilogue code. */
1015static inline void tcg_target_qemu_prologue(TCGContext *s)
1016{
1017}
1018
1019static void tcg_out_tb_start(TCGContext *s)
1020{
1021    /* nothing to do */
1022}
1023
1024bool tcg_target_has_memory_bswap(MemOp memop)
1025{
1026    return true;
1027}
1028
1029static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1030{
1031    g_assert_not_reached();
1032}
1033
1034static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1035{
1036    g_assert_not_reached();
1037}
1038