xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision c742824dd8df3283098d5339291d49e65e515751)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_div_i32:
83    case INDEX_op_div_i64:
84    case INDEX_op_divu_i32:
85    case INDEX_op_divu_i64:
86    case INDEX_op_rem_i32:
87    case INDEX_op_rem_i64:
88    case INDEX_op_remu_i32:
89    case INDEX_op_remu_i64:
90    case INDEX_op_shl_i32:
91    case INDEX_op_shl_i64:
92    case INDEX_op_shr_i32:
93    case INDEX_op_shr_i64:
94    case INDEX_op_sar_i32:
95    case INDEX_op_sar_i64:
96    case INDEX_op_rotl_i32:
97    case INDEX_op_rotl_i64:
98    case INDEX_op_rotr_i32:
99    case INDEX_op_rotr_i64:
100    case INDEX_op_setcond_i32:
101    case INDEX_op_setcond_i64:
102    case INDEX_op_deposit_i32:
103    case INDEX_op_deposit_i64:
104    case INDEX_op_clz_i32:
105    case INDEX_op_clz_i64:
106    case INDEX_op_ctz_i32:
107    case INDEX_op_ctz_i64:
108        return C_O1_I2(r, r, r);
109
110    case INDEX_op_brcond_i32:
111    case INDEX_op_brcond_i64:
112        return C_O0_I2(r, r);
113
114    case INDEX_op_add2_i32:
115    case INDEX_op_add2_i64:
116    case INDEX_op_sub2_i32:
117    case INDEX_op_sub2_i64:
118        return C_O2_I4(r, r, r, r, r, r);
119
120#if TCG_TARGET_REG_BITS == 32
121    case INDEX_op_brcond2_i32:
122        return C_O0_I4(r, r, r, r);
123#endif
124
125    case INDEX_op_mulu2_i32:
126    case INDEX_op_mulu2_i64:
127    case INDEX_op_muls2_i32:
128    case INDEX_op_muls2_i64:
129        return C_O2_I2(r, r, r, r);
130
131    case INDEX_op_movcond_i32:
132    case INDEX_op_movcond_i64:
133    case INDEX_op_setcond2_i32:
134        return C_O1_I4(r, r, r, r, r);
135
136    case INDEX_op_qemu_ld_i32:
137        return C_O1_I1(r, r);
138    case INDEX_op_qemu_ld_i64:
139        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
140    case INDEX_op_qemu_st_i32:
141        return C_O0_I2(r, r);
142    case INDEX_op_qemu_st_i64:
143        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
144
145    default:
146        return C_NotImplemented;
147    }
148}
149
150static const int tcg_target_reg_alloc_order[] = {
151    TCG_REG_R4,
152    TCG_REG_R5,
153    TCG_REG_R6,
154    TCG_REG_R7,
155    TCG_REG_R8,
156    TCG_REG_R9,
157    TCG_REG_R10,
158    TCG_REG_R11,
159    TCG_REG_R12,
160    TCG_REG_R13,
161    TCG_REG_R14,
162    TCG_REG_R15,
163    /* Either 2 or 4 of these are call clobbered, so use them last. */
164    TCG_REG_R3,
165    TCG_REG_R2,
166    TCG_REG_R1,
167    TCG_REG_R0,
168};
169
170/* No call arguments via registers.  All will be stored on the "stack". */
171static const int tcg_target_call_iarg_regs[] = { };
172
173static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
174{
175    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
176    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
177    return TCG_REG_R0 + slot;
178}
179
180#ifdef CONFIG_DEBUG_TCG
181static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
182    "r00",
183    "r01",
184    "r02",
185    "r03",
186    "r04",
187    "r05",
188    "r06",
189    "r07",
190    "r08",
191    "r09",
192    "r10",
193    "r11",
194    "r12",
195    "r13",
196    "r14",
197    "r15",
198};
199#endif
200
201static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
202                        intptr_t value, intptr_t addend)
203{
204    intptr_t diff = value - (intptr_t)(code_ptr + 1);
205
206    tcg_debug_assert(addend == 0);
207    tcg_debug_assert(type == 20);
208
209    if (diff == sextract32(diff, 0, type)) {
210        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
211        return true;
212    }
213    return false;
214}
215
216static void stack_bounds_check(TCGReg base, intptr_t offset)
217{
218    if (base == TCG_REG_CALL_STACK) {
219        tcg_debug_assert(offset >= 0);
220        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
221                                   TCG_STATIC_FRAME_SIZE));
222    }
223}
224
225static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
226{
227    tcg_insn_unit insn = 0;
228
229    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
230    insn = deposit32(insn, 0, 8, op);
231    tcg_out32(s, insn);
232}
233
234static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
235{
236    tcg_insn_unit insn = 0;
237    intptr_t diff;
238
239    /* Special case for exit_tb: map null -> 0. */
240    if (p0 == NULL) {
241        diff = 0;
242    } else {
243        diff = p0 - (void *)(s->code_ptr + 1);
244        tcg_debug_assert(diff != 0);
245        if (diff != sextract32(diff, 0, 20)) {
246            tcg_raise_tb_overflow(s);
247        }
248    }
249    insn = deposit32(insn, 0, 8, op);
250    insn = deposit32(insn, 12, 20, diff);
251    tcg_out32(s, insn);
252}
253
254static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
255{
256    tcg_insn_unit insn = 0;
257
258    insn = deposit32(insn, 0, 8, op);
259    insn = deposit32(insn, 8, 4, r0);
260    tcg_out32(s, insn);
261}
262
263static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
264{
265    tcg_out32(s, (uint8_t)op);
266}
267
268static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
269{
270    tcg_insn_unit insn = 0;
271
272    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
273    insn = deposit32(insn, 0, 8, op);
274    insn = deposit32(insn, 8, 4, r0);
275    insn = deposit32(insn, 12, 20, i1);
276    tcg_out32(s, insn);
277}
278
279static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
280{
281    tcg_insn_unit insn = 0;
282
283    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
284    insn = deposit32(insn, 0, 8, op);
285    insn = deposit32(insn, 8, 4, r0);
286    tcg_out32(s, insn);
287}
288
289static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
290{
291    tcg_insn_unit insn = 0;
292
293    insn = deposit32(insn, 0, 8, op);
294    insn = deposit32(insn, 8, 4, r0);
295    insn = deposit32(insn, 12, 4, r1);
296    tcg_out32(s, insn);
297}
298
299static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
300                           TCGReg r0, TCGReg r1, TCGArg m2)
301{
302    tcg_insn_unit insn = 0;
303
304    tcg_debug_assert(m2 == extract32(m2, 0, 16));
305    insn = deposit32(insn, 0, 8, op);
306    insn = deposit32(insn, 8, 4, r0);
307    insn = deposit32(insn, 12, 4, r1);
308    insn = deposit32(insn, 16, 16, m2);
309    tcg_out32(s, insn);
310}
311
312static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
313                           TCGReg r0, TCGReg r1, TCGReg r2)
314{
315    tcg_insn_unit insn = 0;
316
317    insn = deposit32(insn, 0, 8, op);
318    insn = deposit32(insn, 8, 4, r0);
319    insn = deposit32(insn, 12, 4, r1);
320    insn = deposit32(insn, 16, 4, r2);
321    tcg_out32(s, insn);
322}
323
324static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
325                           TCGReg r0, TCGReg r1, intptr_t i2)
326{
327    tcg_insn_unit insn = 0;
328
329    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
330    insn = deposit32(insn, 0, 8, op);
331    insn = deposit32(insn, 8, 4, r0);
332    insn = deposit32(insn, 12, 4, r1);
333    insn = deposit32(insn, 16, 16, i2);
334    tcg_out32(s, insn);
335}
336
337static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
338                            TCGReg r1, uint8_t b2, uint8_t b3)
339{
340    tcg_insn_unit insn = 0;
341
342    tcg_debug_assert(b2 == extract32(b2, 0, 6));
343    tcg_debug_assert(b3 == extract32(b3, 0, 6));
344    insn = deposit32(insn, 0, 8, op);
345    insn = deposit32(insn, 8, 4, r0);
346    insn = deposit32(insn, 12, 4, r1);
347    insn = deposit32(insn, 16, 6, b2);
348    insn = deposit32(insn, 22, 6, b3);
349    tcg_out32(s, insn);
350}
351
352static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
353                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
354{
355    tcg_insn_unit insn = 0;
356
357    insn = deposit32(insn, 0, 8, op);
358    insn = deposit32(insn, 8, 4, r0);
359    insn = deposit32(insn, 12, 4, r1);
360    insn = deposit32(insn, 16, 4, r2);
361    insn = deposit32(insn, 20, 4, c3);
362    tcg_out32(s, insn);
363}
364
365static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
366                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
367{
368    tcg_insn_unit insn = 0;
369
370    tcg_debug_assert(b3 == extract32(b3, 0, 6));
371    tcg_debug_assert(b4 == extract32(b4, 0, 6));
372    insn = deposit32(insn, 0, 8, op);
373    insn = deposit32(insn, 8, 4, r0);
374    insn = deposit32(insn, 12, 4, r1);
375    insn = deposit32(insn, 16, 4, r2);
376    insn = deposit32(insn, 20, 6, b3);
377    insn = deposit32(insn, 26, 6, b4);
378    tcg_out32(s, insn);
379}
380
381static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
382                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
383{
384    tcg_insn_unit insn = 0;
385
386    insn = deposit32(insn, 0, 8, op);
387    insn = deposit32(insn, 8, 4, r0);
388    insn = deposit32(insn, 12, 4, r1);
389    insn = deposit32(insn, 16, 4, r2);
390    insn = deposit32(insn, 20, 4, r3);
391    tcg_out32(s, insn);
392}
393
394static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
395                              TCGReg r0, TCGReg r1, TCGReg r2,
396                              TCGReg r3, TCGReg r4, TCGCond c5)
397{
398    tcg_insn_unit insn = 0;
399
400    insn = deposit32(insn, 0, 8, op);
401    insn = deposit32(insn, 8, 4, r0);
402    insn = deposit32(insn, 12, 4, r1);
403    insn = deposit32(insn, 16, 4, r2);
404    insn = deposit32(insn, 20, 4, r3);
405    insn = deposit32(insn, 24, 4, r4);
406    insn = deposit32(insn, 28, 4, c5);
407    tcg_out32(s, insn);
408}
409
410static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
411                              TCGReg r0, TCGReg r1, TCGReg r2,
412                              TCGReg r3, TCGReg r4, TCGReg r5)
413{
414    tcg_insn_unit insn = 0;
415
416    insn = deposit32(insn, 0, 8, op);
417    insn = deposit32(insn, 8, 4, r0);
418    insn = deposit32(insn, 12, 4, r1);
419    insn = deposit32(insn, 16, 4, r2);
420    insn = deposit32(insn, 20, 4, r3);
421    insn = deposit32(insn, 24, 4, r4);
422    insn = deposit32(insn, 28, 4, r5);
423    tcg_out32(s, insn);
424}
425
426static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
427                         TCGReg base, intptr_t offset)
428{
429    stack_bounds_check(base, offset);
430    if (offset != sextract32(offset, 0, 16)) {
431        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
432        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
433        base = TCG_REG_TMP;
434        offset = 0;
435    }
436    tcg_out_op_rrs(s, op, val, base, offset);
437}
438
439static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
440                       intptr_t offset)
441{
442    switch (type) {
443    case TCG_TYPE_I32:
444        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
445        break;
446#if TCG_TARGET_REG_BITS == 64
447    case TCG_TYPE_I64:
448        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
449        break;
450#endif
451    default:
452        g_assert_not_reached();
453    }
454}
455
456static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
457{
458    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
459    return true;
460}
461
462static void tcg_out_movi(TCGContext *s, TCGType type,
463                         TCGReg ret, tcg_target_long arg)
464{
465    switch (type) {
466    case TCG_TYPE_I32:
467#if TCG_TARGET_REG_BITS == 64
468        arg = (int32_t)arg;
469        /* fall through */
470    case TCG_TYPE_I64:
471#endif
472        break;
473    default:
474        g_assert_not_reached();
475    }
476
477    if (arg == sextract32(arg, 0, 20)) {
478        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
479    } else {
480        tcg_insn_unit insn = 0;
481
482        new_pool_label(s, arg, 20, s->code_ptr, 0);
483        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
484        insn = deposit32(insn, 8, 4, ret);
485        tcg_out32(s, insn);
486    }
487}
488
489static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
490                            TCGReg rs, unsigned pos, unsigned len)
491{
492    TCGOpcode opc = type == TCG_TYPE_I32 ?
493                    INDEX_op_extract_i32 :
494                    INDEX_op_extract_i64;
495    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
496}
497
498static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
499                             TCGReg rs, unsigned pos, unsigned len)
500{
501    TCGOpcode opc = type == TCG_TYPE_I32 ?
502                    INDEX_op_sextract_i32 :
503                    INDEX_op_sextract_i64;
504    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
505}
506
507static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
508{
509    tcg_out_sextract(s, type, rd, rs, 0, 8);
510}
511
512static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
513{
514    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
515}
516
517static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
518{
519    tcg_out_sextract(s, type, rd, rs, 0, 16);
520}
521
522static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
523{
524    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
525}
526
527static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
528{
529    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
530    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
531}
532
533static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
534{
535    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
536    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
537}
538
539static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
540{
541    tcg_out_ext32s(s, rd, rs);
542}
543
544static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
545{
546    tcg_out_ext32u(s, rd, rs);
547}
548
549static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
550{
551    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
552    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
553}
554
555static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
556{
557    return false;
558}
559
560static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
561                             tcg_target_long imm)
562{
563    /* This function is only used for passing structs by reference. */
564    g_assert_not_reached();
565}
566
567static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
568                         const TCGHelperInfo *info)
569{
570    ffi_cif *cif = info->cif;
571    tcg_insn_unit insn = 0;
572    uint8_t which;
573
574    if (cif->rtype == &ffi_type_void) {
575        which = 0;
576    } else {
577        tcg_debug_assert(cif->rtype->size == 4 ||
578                         cif->rtype->size == 8 ||
579                         cif->rtype->size == 16);
580        which = ctz32(cif->rtype->size) - 1;
581    }
582    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
583    insn = deposit32(insn, 0, 8, INDEX_op_call);
584    insn = deposit32(insn, 8, 4, which);
585    tcg_out32(s, insn);
586}
587
588#if TCG_TARGET_REG_BITS == 64
589# define CASE_32_64(x) \
590        case glue(glue(INDEX_op_, x), _i64): \
591        case glue(glue(INDEX_op_, x), _i32):
592# define CASE_64(x) \
593        case glue(glue(INDEX_op_, x), _i64):
594#else
595# define CASE_32_64(x) \
596        case glue(glue(INDEX_op_, x), _i32):
597# define CASE_64(x)
598#endif
599
600static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
601{
602    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
603}
604
605static void tcg_out_goto_tb(TCGContext *s, int which)
606{
607    /* indirect jump method. */
608    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
609    set_jmp_reset_offset(s, which);
610}
611
612void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
613                              uintptr_t jmp_rx, uintptr_t jmp_rw)
614{
615    /* Always indirect, nothing to do */
616}
617
618static void tgen_add(TCGContext *s, TCGType type,
619                     TCGReg a0, TCGReg a1, TCGReg a2)
620{
621    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
622}
623
624static const TCGOutOpBinary outop_add = {
625    .base.static_constraint = C_O1_I2(r, r, r),
626    .out_rrr = tgen_add,
627};
628
629static void tgen_and(TCGContext *s, TCGType type,
630                     TCGReg a0, TCGReg a1, TCGReg a2)
631{
632    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
633}
634
635static const TCGOutOpBinary outop_and = {
636    .base.static_constraint = C_O1_I2(r, r, r),
637    .out_rrr = tgen_and,
638};
639
640static void tgen_andc(TCGContext *s, TCGType type,
641                      TCGReg a0, TCGReg a1, TCGReg a2)
642{
643    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
644}
645
646static const TCGOutOpBinary outop_andc = {
647    .base.static_constraint = C_O1_I2(r, r, r),
648    .out_rrr = tgen_andc,
649};
650
651static void tgen_eqv(TCGContext *s, TCGType type,
652                     TCGReg a0, TCGReg a1, TCGReg a2)
653{
654    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
655}
656
657static const TCGOutOpBinary outop_eqv = {
658    .base.static_constraint = C_O1_I2(r, r, r),
659    .out_rrr = tgen_eqv,
660};
661
662static void tgen_mul(TCGContext *s, TCGType type,
663                     TCGReg a0, TCGReg a1, TCGReg a2)
664{
665    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
666}
667
668static const TCGOutOpBinary outop_mul = {
669    .base.static_constraint = C_O1_I2(r, r, r),
670    .out_rrr = tgen_mul,
671};
672
673static const TCGOutOpBinary outop_mulsh = {
674    .base.static_constraint = C_NotImplemented,
675};
676
677static const TCGOutOpBinary outop_muluh = {
678    .base.static_constraint = C_NotImplemented,
679};
680
681static void tgen_nand(TCGContext *s, TCGType type,
682                     TCGReg a0, TCGReg a1, TCGReg a2)
683{
684    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
685}
686
687static const TCGOutOpBinary outop_nand = {
688    .base.static_constraint = C_O1_I2(r, r, r),
689    .out_rrr = tgen_nand,
690};
691
692static void tgen_nor(TCGContext *s, TCGType type,
693                     TCGReg a0, TCGReg a1, TCGReg a2)
694{
695    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
696}
697
698static const TCGOutOpBinary outop_nor = {
699    .base.static_constraint = C_O1_I2(r, r, r),
700    .out_rrr = tgen_nor,
701};
702
703static void tgen_or(TCGContext *s, TCGType type,
704                     TCGReg a0, TCGReg a1, TCGReg a2)
705{
706    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
707}
708
709static const TCGOutOpBinary outop_or = {
710    .base.static_constraint = C_O1_I2(r, r, r),
711    .out_rrr = tgen_or,
712};
713
714static void tgen_orc(TCGContext *s, TCGType type,
715                     TCGReg a0, TCGReg a1, TCGReg a2)
716{
717    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
718}
719
720static const TCGOutOpBinary outop_orc = {
721    .base.static_constraint = C_O1_I2(r, r, r),
722    .out_rrr = tgen_orc,
723};
724
725static void tgen_sub(TCGContext *s, TCGType type,
726                     TCGReg a0, TCGReg a1, TCGReg a2)
727{
728    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
729}
730
731static const TCGOutOpSubtract outop_sub = {
732    .base.static_constraint = C_O1_I2(r, r, r),
733    .out_rrr = tgen_sub,
734};
735
736static void tgen_xor(TCGContext *s, TCGType type,
737                     TCGReg a0, TCGReg a1, TCGReg a2)
738{
739    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
740}
741
742static const TCGOutOpBinary outop_xor = {
743    .base.static_constraint = C_O1_I2(r, r, r),
744    .out_rrr = tgen_xor,
745};
746
747static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
748{
749    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
750}
751
752static const TCGOutOpUnary outop_neg = {
753    .base.static_constraint = C_O1_I1(r, r),
754    .out_rr = tgen_neg,
755};
756
757static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
758{
759    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
760}
761
762static const TCGOutOpUnary outop_not = {
763    .base.static_constraint = C_O1_I1(r, r),
764    .out_rr = tgen_not,
765};
766
767
768static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
769                       const TCGArg args[TCG_MAX_OP_ARGS],
770                       const int const_args[TCG_MAX_OP_ARGS])
771{
772    int width;
773
774    switch (opc) {
775    case INDEX_op_goto_ptr:
776        tcg_out_op_r(s, opc, args[0]);
777        break;
778
779    case INDEX_op_br:
780        tcg_out_op_l(s, opc, arg_label(args[0]));
781        break;
782
783    CASE_32_64(setcond)
784        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
785        break;
786
787    CASE_32_64(movcond)
788    case INDEX_op_setcond2_i32:
789        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
790                          args[3], args[4], args[5]);
791        break;
792
793    CASE_32_64(ld8u)
794    CASE_32_64(ld8s)
795    CASE_32_64(ld16u)
796    CASE_32_64(ld16s)
797    case INDEX_op_ld_i32:
798    CASE_64(ld32u)
799    CASE_64(ld32s)
800    CASE_64(ld)
801    CASE_32_64(st8)
802    CASE_32_64(st16)
803    case INDEX_op_st_i32:
804    CASE_64(st32)
805    CASE_64(st)
806        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
807        break;
808
809    CASE_32_64(shl)
810    CASE_32_64(shr)
811    CASE_32_64(sar)
812    CASE_32_64(rotl)     /* Optional (TCG_TARGET_HAS_rot_*). */
813    CASE_32_64(rotr)     /* Optional (TCG_TARGET_HAS_rot_*). */
814    CASE_32_64(div)      /* Optional (TCG_TARGET_HAS_div_*). */
815    CASE_32_64(divu)     /* Optional (TCG_TARGET_HAS_div_*). */
816    CASE_32_64(rem)      /* Optional (TCG_TARGET_HAS_div_*). */
817    CASE_32_64(remu)     /* Optional (TCG_TARGET_HAS_div_*). */
818    CASE_32_64(clz)      /* Optional (TCG_TARGET_HAS_clz_*). */
819    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
820        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
821        break;
822
823    CASE_32_64(deposit)
824        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
825        break;
826
827    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
828    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
829        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
830        break;
831
832    CASE_32_64(brcond)
833        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
834                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
835                        TCG_REG_TMP, args[0], args[1], args[2]);
836        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
837        break;
838
839    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
840    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
841    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
842        tcg_out_op_rr(s, opc, args[0], args[1]);
843        break;
844
845    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
846    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
847        width = 16;
848        goto do_bswap;
849    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
850        width = 32;
851    do_bswap:
852        /* The base tci bswaps zero-extend, and ignore high bits. */
853        tcg_out_op_rr(s, opc, args[0], args[1]);
854        if (args[2] & TCG_BSWAP_OS) {
855            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
856        }
857        break;
858
859    CASE_32_64(add2)
860    CASE_32_64(sub2)
861        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
862                          args[3], args[4], args[5]);
863        break;
864
865#if TCG_TARGET_REG_BITS == 32
866    case INDEX_op_brcond2_i32:
867        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
868                          args[0], args[1], args[2], args[3], args[4]);
869        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
870        break;
871#endif
872
873    CASE_32_64(mulu2)
874    CASE_32_64(muls2)
875        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
876        break;
877
878    case INDEX_op_qemu_ld_i64:
879    case INDEX_op_qemu_st_i64:
880        if (TCG_TARGET_REG_BITS == 32) {
881            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
882            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
883            break;
884        }
885        /* fall through */
886    case INDEX_op_qemu_ld_i32:
887    case INDEX_op_qemu_st_i32:
888        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
889            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
890            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
891        } else {
892            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
893        }
894        break;
895
896    case INDEX_op_mb:
897        tcg_out_op_v(s, opc);
898        break;
899
900    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
901    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
902    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
903    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
904    case INDEX_op_extu_i32_i64:
905    case INDEX_op_extrl_i64_i32:
906    default:
907        g_assert_not_reached();
908    }
909}
910
911static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
912                       intptr_t offset)
913{
914    switch (type) {
915    case TCG_TYPE_I32:
916        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
917        break;
918#if TCG_TARGET_REG_BITS == 64
919    case TCG_TYPE_I64:
920        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
921        break;
922#endif
923    default:
924        g_assert_not_reached();
925    }
926}
927
928static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
929                               TCGReg base, intptr_t ofs)
930{
931    return false;
932}
933
934/* Test if a constant matches the constraint. */
935static bool tcg_target_const_match(int64_t val, int ct,
936                                   TCGType type, TCGCond cond, int vece)
937{
938    return ct & TCG_CT_CONST;
939}
940
941static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
942{
943    memset(p, 0, sizeof(*p) * count);
944}
945
946static void tcg_target_init(TCGContext *s)
947{
948    /* The current code uses uint8_t for tcg operations. */
949    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
950
951    /* Registers available for 32 bit operations. */
952    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
953    /* Registers available for 64 bit operations. */
954    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
955    /*
956     * The interpreter "registers" are in the local stack frame and
957     * cannot be clobbered by the called helper functions.  However,
958     * the interpreter assumes a 128-bit return value and assigns to
959     * the return value registers.
960     */
961    tcg_target_call_clobber_regs =
962        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
963
964    s->reserved_regs = 0;
965    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
966    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
967
968    /* The call arguments come first, followed by the temp storage. */
969    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
970                  TCG_STATIC_FRAME_SIZE);
971}
972
973/* Generate global QEMU prologue and epilogue code. */
974static inline void tcg_target_qemu_prologue(TCGContext *s)
975{
976}
977
978static void tcg_out_tb_start(TCGContext *s)
979{
980    /* nothing to do */
981}
982
983bool tcg_target_has_memory_bswap(MemOp memop)
984{
985    return true;
986}
987
988static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
989{
990    g_assert_not_reached();
991}
992
993static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
994{
995    g_assert_not_reached();
996}
997