xref: /openbmc/qemu/tcg/tci/tcg-target.c.inc (revision b2c514f9d5cab89814dc8a6b7c98c653ca8523d3)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Used for function call generation. */
26#define TCG_TARGET_CALL_STACK_OFFSET    0
27#define TCG_TARGET_STACK_ALIGN          8
28#if TCG_TARGET_REG_BITS == 32
29# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_EVEN
30# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_EVEN
31# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_EVEN
32#else
33# define TCG_TARGET_CALL_ARG_I32        TCG_CALL_ARG_NORMAL
34# define TCG_TARGET_CALL_ARG_I64        TCG_CALL_ARG_NORMAL
35# define TCG_TARGET_CALL_ARG_I128       TCG_CALL_ARG_NORMAL
36#endif
37#define TCG_TARGET_CALL_RET_I128        TCG_CALL_RET_NORMAL
38
39static TCGConstraintSetIndex
40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
41{
42    switch (op) {
43    case INDEX_op_goto_ptr:
44        return C_O0_I1(r);
45
46    case INDEX_op_ld8u_i32:
47    case INDEX_op_ld8s_i32:
48    case INDEX_op_ld16u_i32:
49    case INDEX_op_ld16s_i32:
50    case INDEX_op_ld_i32:
51    case INDEX_op_ld8u_i64:
52    case INDEX_op_ld8s_i64:
53    case INDEX_op_ld16u_i64:
54    case INDEX_op_ld16s_i64:
55    case INDEX_op_ld32u_i64:
56    case INDEX_op_ld32s_i64:
57    case INDEX_op_ld_i64:
58    case INDEX_op_ext_i32_i64:
59    case INDEX_op_extu_i32_i64:
60    case INDEX_op_bswap16_i32:
61    case INDEX_op_bswap16_i64:
62    case INDEX_op_bswap32_i32:
63    case INDEX_op_bswap32_i64:
64    case INDEX_op_bswap64_i64:
65    case INDEX_op_extract_i32:
66    case INDEX_op_extract_i64:
67    case INDEX_op_sextract_i32:
68    case INDEX_op_sextract_i64:
69    case INDEX_op_ctpop_i32:
70    case INDEX_op_ctpop_i64:
71        return C_O1_I1(r, r);
72
73    case INDEX_op_st8_i32:
74    case INDEX_op_st16_i32:
75    case INDEX_op_st_i32:
76    case INDEX_op_st8_i64:
77    case INDEX_op_st16_i64:
78    case INDEX_op_st32_i64:
79    case INDEX_op_st_i64:
80        return C_O0_I2(r, r);
81
82    case INDEX_op_divu_i32:
83    case INDEX_op_divu_i64:
84    case INDEX_op_rem_i32:
85    case INDEX_op_rem_i64:
86    case INDEX_op_remu_i32:
87    case INDEX_op_remu_i64:
88    case INDEX_op_shl_i32:
89    case INDEX_op_shl_i64:
90    case INDEX_op_shr_i32:
91    case INDEX_op_shr_i64:
92    case INDEX_op_sar_i32:
93    case INDEX_op_sar_i64:
94    case INDEX_op_rotl_i32:
95    case INDEX_op_rotl_i64:
96    case INDEX_op_rotr_i32:
97    case INDEX_op_rotr_i64:
98    case INDEX_op_setcond_i32:
99    case INDEX_op_setcond_i64:
100    case INDEX_op_deposit_i32:
101    case INDEX_op_deposit_i64:
102    case INDEX_op_clz_i32:
103    case INDEX_op_clz_i64:
104    case INDEX_op_ctz_i32:
105    case INDEX_op_ctz_i64:
106        return C_O1_I2(r, r, r);
107
108    case INDEX_op_brcond_i32:
109    case INDEX_op_brcond_i64:
110        return C_O0_I2(r, r);
111
112    case INDEX_op_add2_i32:
113    case INDEX_op_add2_i64:
114    case INDEX_op_sub2_i32:
115    case INDEX_op_sub2_i64:
116        return C_O2_I4(r, r, r, r, r, r);
117
118#if TCG_TARGET_REG_BITS == 32
119    case INDEX_op_brcond2_i32:
120        return C_O0_I4(r, r, r, r);
121#endif
122
123    case INDEX_op_mulu2_i32:
124    case INDEX_op_mulu2_i64:
125    case INDEX_op_muls2_i32:
126    case INDEX_op_muls2_i64:
127        return C_O2_I2(r, r, r, r);
128
129    case INDEX_op_movcond_i32:
130    case INDEX_op_movcond_i64:
131    case INDEX_op_setcond2_i32:
132        return C_O1_I4(r, r, r, r, r);
133
134    case INDEX_op_qemu_ld_i32:
135        return C_O1_I1(r, r);
136    case INDEX_op_qemu_ld_i64:
137        return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
138    case INDEX_op_qemu_st_i32:
139        return C_O0_I2(r, r);
140    case INDEX_op_qemu_st_i64:
141        return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
142
143    default:
144        return C_NotImplemented;
145    }
146}
147
148static const int tcg_target_reg_alloc_order[] = {
149    TCG_REG_R4,
150    TCG_REG_R5,
151    TCG_REG_R6,
152    TCG_REG_R7,
153    TCG_REG_R8,
154    TCG_REG_R9,
155    TCG_REG_R10,
156    TCG_REG_R11,
157    TCG_REG_R12,
158    TCG_REG_R13,
159    TCG_REG_R14,
160    TCG_REG_R15,
161    /* Either 2 or 4 of these are call clobbered, so use them last. */
162    TCG_REG_R3,
163    TCG_REG_R2,
164    TCG_REG_R1,
165    TCG_REG_R0,
166};
167
168/* No call arguments via registers.  All will be stored on the "stack". */
169static const int tcg_target_call_iarg_regs[] = { };
170
171static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
172{
173    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
174    tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS);
175    return TCG_REG_R0 + slot;
176}
177
178#ifdef CONFIG_DEBUG_TCG
179static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
180    "r00",
181    "r01",
182    "r02",
183    "r03",
184    "r04",
185    "r05",
186    "r06",
187    "r07",
188    "r08",
189    "r09",
190    "r10",
191    "r11",
192    "r12",
193    "r13",
194    "r14",
195    "r15",
196};
197#endif
198
199static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
200                        intptr_t value, intptr_t addend)
201{
202    intptr_t diff = value - (intptr_t)(code_ptr + 1);
203
204    tcg_debug_assert(addend == 0);
205    tcg_debug_assert(type == 20);
206
207    if (diff == sextract32(diff, 0, type)) {
208        tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
209        return true;
210    }
211    return false;
212}
213
214static void stack_bounds_check(TCGReg base, intptr_t offset)
215{
216    if (base == TCG_REG_CALL_STACK) {
217        tcg_debug_assert(offset >= 0);
218        tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
219                                   TCG_STATIC_FRAME_SIZE));
220    }
221}
222
223static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
224{
225    tcg_insn_unit insn = 0;
226
227    tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
228    insn = deposit32(insn, 0, 8, op);
229    tcg_out32(s, insn);
230}
231
232static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
233{
234    tcg_insn_unit insn = 0;
235    intptr_t diff;
236
237    /* Special case for exit_tb: map null -> 0. */
238    if (p0 == NULL) {
239        diff = 0;
240    } else {
241        diff = p0 - (void *)(s->code_ptr + 1);
242        tcg_debug_assert(diff != 0);
243        if (diff != sextract32(diff, 0, 20)) {
244            tcg_raise_tb_overflow(s);
245        }
246    }
247    insn = deposit32(insn, 0, 8, op);
248    insn = deposit32(insn, 12, 20, diff);
249    tcg_out32(s, insn);
250}
251
252static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
253{
254    tcg_insn_unit insn = 0;
255
256    insn = deposit32(insn, 0, 8, op);
257    insn = deposit32(insn, 8, 4, r0);
258    tcg_out32(s, insn);
259}
260
261static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
262{
263    tcg_out32(s, (uint8_t)op);
264}
265
266static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
267{
268    tcg_insn_unit insn = 0;
269
270    tcg_debug_assert(i1 == sextract32(i1, 0, 20));
271    insn = deposit32(insn, 0, 8, op);
272    insn = deposit32(insn, 8, 4, r0);
273    insn = deposit32(insn, 12, 20, i1);
274    tcg_out32(s, insn);
275}
276
277static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
278{
279    tcg_insn_unit insn = 0;
280
281    tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
282    insn = deposit32(insn, 0, 8, op);
283    insn = deposit32(insn, 8, 4, r0);
284    tcg_out32(s, insn);
285}
286
287static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
288{
289    tcg_insn_unit insn = 0;
290
291    insn = deposit32(insn, 0, 8, op);
292    insn = deposit32(insn, 8, 4, r0);
293    insn = deposit32(insn, 12, 4, r1);
294    tcg_out32(s, insn);
295}
296
297static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
298                           TCGReg r0, TCGReg r1, TCGArg m2)
299{
300    tcg_insn_unit insn = 0;
301
302    tcg_debug_assert(m2 == extract32(m2, 0, 16));
303    insn = deposit32(insn, 0, 8, op);
304    insn = deposit32(insn, 8, 4, r0);
305    insn = deposit32(insn, 12, 4, r1);
306    insn = deposit32(insn, 16, 16, m2);
307    tcg_out32(s, insn);
308}
309
310static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
311                           TCGReg r0, TCGReg r1, TCGReg r2)
312{
313    tcg_insn_unit insn = 0;
314
315    insn = deposit32(insn, 0, 8, op);
316    insn = deposit32(insn, 8, 4, r0);
317    insn = deposit32(insn, 12, 4, r1);
318    insn = deposit32(insn, 16, 4, r2);
319    tcg_out32(s, insn);
320}
321
322static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
323                           TCGReg r0, TCGReg r1, intptr_t i2)
324{
325    tcg_insn_unit insn = 0;
326
327    tcg_debug_assert(i2 == sextract32(i2, 0, 16));
328    insn = deposit32(insn, 0, 8, op);
329    insn = deposit32(insn, 8, 4, r0);
330    insn = deposit32(insn, 12, 4, r1);
331    insn = deposit32(insn, 16, 16, i2);
332    tcg_out32(s, insn);
333}
334
335static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
336                            TCGReg r1, uint8_t b2, uint8_t b3)
337{
338    tcg_insn_unit insn = 0;
339
340    tcg_debug_assert(b2 == extract32(b2, 0, 6));
341    tcg_debug_assert(b3 == extract32(b3, 0, 6));
342    insn = deposit32(insn, 0, 8, op);
343    insn = deposit32(insn, 8, 4, r0);
344    insn = deposit32(insn, 12, 4, r1);
345    insn = deposit32(insn, 16, 6, b2);
346    insn = deposit32(insn, 22, 6, b3);
347    tcg_out32(s, insn);
348}
349
350static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
351                            TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
352{
353    tcg_insn_unit insn = 0;
354
355    insn = deposit32(insn, 0, 8, op);
356    insn = deposit32(insn, 8, 4, r0);
357    insn = deposit32(insn, 12, 4, r1);
358    insn = deposit32(insn, 16, 4, r2);
359    insn = deposit32(insn, 20, 4, c3);
360    tcg_out32(s, insn);
361}
362
363static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
364                             TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
365{
366    tcg_insn_unit insn = 0;
367
368    tcg_debug_assert(b3 == extract32(b3, 0, 6));
369    tcg_debug_assert(b4 == extract32(b4, 0, 6));
370    insn = deposit32(insn, 0, 8, op);
371    insn = deposit32(insn, 8, 4, r0);
372    insn = deposit32(insn, 12, 4, r1);
373    insn = deposit32(insn, 16, 4, r2);
374    insn = deposit32(insn, 20, 6, b3);
375    insn = deposit32(insn, 26, 6, b4);
376    tcg_out32(s, insn);
377}
378
379static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
380                            TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
381{
382    tcg_insn_unit insn = 0;
383
384    insn = deposit32(insn, 0, 8, op);
385    insn = deposit32(insn, 8, 4, r0);
386    insn = deposit32(insn, 12, 4, r1);
387    insn = deposit32(insn, 16, 4, r2);
388    insn = deposit32(insn, 20, 4, r3);
389    tcg_out32(s, insn);
390}
391
392static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
393                              TCGReg r0, TCGReg r1, TCGReg r2,
394                              TCGReg r3, TCGReg r4, TCGCond c5)
395{
396    tcg_insn_unit insn = 0;
397
398    insn = deposit32(insn, 0, 8, op);
399    insn = deposit32(insn, 8, 4, r0);
400    insn = deposit32(insn, 12, 4, r1);
401    insn = deposit32(insn, 16, 4, r2);
402    insn = deposit32(insn, 20, 4, r3);
403    insn = deposit32(insn, 24, 4, r4);
404    insn = deposit32(insn, 28, 4, c5);
405    tcg_out32(s, insn);
406}
407
408static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
409                              TCGReg r0, TCGReg r1, TCGReg r2,
410                              TCGReg r3, TCGReg r4, TCGReg r5)
411{
412    tcg_insn_unit insn = 0;
413
414    insn = deposit32(insn, 0, 8, op);
415    insn = deposit32(insn, 8, 4, r0);
416    insn = deposit32(insn, 12, 4, r1);
417    insn = deposit32(insn, 16, 4, r2);
418    insn = deposit32(insn, 20, 4, r3);
419    insn = deposit32(insn, 24, 4, r4);
420    insn = deposit32(insn, 28, 4, r5);
421    tcg_out32(s, insn);
422}
423
424static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
425                         TCGReg base, intptr_t offset)
426{
427    stack_bounds_check(base, offset);
428    if (offset != sextract32(offset, 0, 16)) {
429        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
430        tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
431        base = TCG_REG_TMP;
432        offset = 0;
433    }
434    tcg_out_op_rrs(s, op, val, base, offset);
435}
436
437static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
438                       intptr_t offset)
439{
440    switch (type) {
441    case TCG_TYPE_I32:
442        tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
443        break;
444#if TCG_TARGET_REG_BITS == 64
445    case TCG_TYPE_I64:
446        tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
447        break;
448#endif
449    default:
450        g_assert_not_reached();
451    }
452}
453
454static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
455{
456    tcg_out_op_rr(s, INDEX_op_mov, ret, arg);
457    return true;
458}
459
460static void tcg_out_movi(TCGContext *s, TCGType type,
461                         TCGReg ret, tcg_target_long arg)
462{
463    switch (type) {
464    case TCG_TYPE_I32:
465#if TCG_TARGET_REG_BITS == 64
466        arg = (int32_t)arg;
467        /* fall through */
468    case TCG_TYPE_I64:
469#endif
470        break;
471    default:
472        g_assert_not_reached();
473    }
474
475    if (arg == sextract32(arg, 0, 20)) {
476        tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
477    } else {
478        tcg_insn_unit insn = 0;
479
480        new_pool_label(s, arg, 20, s->code_ptr, 0);
481        insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
482        insn = deposit32(insn, 8, 4, ret);
483        tcg_out32(s, insn);
484    }
485}
486
487static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd,
488                            TCGReg rs, unsigned pos, unsigned len)
489{
490    TCGOpcode opc = type == TCG_TYPE_I32 ?
491                    INDEX_op_extract_i32 :
492                    INDEX_op_extract_i64;
493    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
494}
495
496static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd,
497                             TCGReg rs, unsigned pos, unsigned len)
498{
499    TCGOpcode opc = type == TCG_TYPE_I32 ?
500                    INDEX_op_sextract_i32 :
501                    INDEX_op_sextract_i64;
502    tcg_out_op_rrbb(s, opc, rd, rs, pos, len);
503}
504
505static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
506{
507    tcg_out_sextract(s, type, rd, rs, 0, 8);
508}
509
510static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
511{
512    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8);
513}
514
515static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
516{
517    tcg_out_sextract(s, type, rd, rs, 0, 16);
518}
519
520static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
521{
522    tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16);
523}
524
525static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
526{
527    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
528    tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32);
529}
530
531static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
532{
533    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
534    tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32);
535}
536
537static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
538{
539    tcg_out_ext32s(s, rd, rs);
540}
541
542static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
543{
544    tcg_out_ext32u(s, rd, rs);
545}
546
547static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
548{
549    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
550    tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
551}
552
553static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
554{
555    return false;
556}
557
558static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
559                             tcg_target_long imm)
560{
561    /* This function is only used for passing structs by reference. */
562    g_assert_not_reached();
563}
564
565static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
566                         const TCGHelperInfo *info)
567{
568    ffi_cif *cif = info->cif;
569    tcg_insn_unit insn = 0;
570    uint8_t which;
571
572    if (cif->rtype == &ffi_type_void) {
573        which = 0;
574    } else {
575        tcg_debug_assert(cif->rtype->size == 4 ||
576                         cif->rtype->size == 8 ||
577                         cif->rtype->size == 16);
578        which = ctz32(cif->rtype->size) - 1;
579    }
580    new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
581    insn = deposit32(insn, 0, 8, INDEX_op_call);
582    insn = deposit32(insn, 8, 4, which);
583    tcg_out32(s, insn);
584}
585
586#if TCG_TARGET_REG_BITS == 64
587# define CASE_32_64(x) \
588        case glue(glue(INDEX_op_, x), _i64): \
589        case glue(glue(INDEX_op_, x), _i32):
590# define CASE_64(x) \
591        case glue(glue(INDEX_op_, x), _i64):
592#else
593# define CASE_32_64(x) \
594        case glue(glue(INDEX_op_, x), _i32):
595# define CASE_64(x)
596#endif
597
598static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
599{
600    tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
601}
602
603static void tcg_out_goto_tb(TCGContext *s, int which)
604{
605    /* indirect jump method. */
606    tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
607    set_jmp_reset_offset(s, which);
608}
609
610void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
611                              uintptr_t jmp_rx, uintptr_t jmp_rw)
612{
613    /* Always indirect, nothing to do */
614}
615
616static void tgen_add(TCGContext *s, TCGType type,
617                     TCGReg a0, TCGReg a1, TCGReg a2)
618{
619    tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
620}
621
622static const TCGOutOpBinary outop_add = {
623    .base.static_constraint = C_O1_I2(r, r, r),
624    .out_rrr = tgen_add,
625};
626
627static void tgen_and(TCGContext *s, TCGType type,
628                     TCGReg a0, TCGReg a1, TCGReg a2)
629{
630    tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2);
631}
632
633static const TCGOutOpBinary outop_and = {
634    .base.static_constraint = C_O1_I2(r, r, r),
635    .out_rrr = tgen_and,
636};
637
638static void tgen_andc(TCGContext *s, TCGType type,
639                      TCGReg a0, TCGReg a1, TCGReg a2)
640{
641    tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2);
642}
643
644static const TCGOutOpBinary outop_andc = {
645    .base.static_constraint = C_O1_I2(r, r, r),
646    .out_rrr = tgen_andc,
647};
648
649static void tgen_divs(TCGContext *s, TCGType type,
650                      TCGReg a0, TCGReg a1, TCGReg a2)
651{
652    TCGOpcode opc = (type == TCG_TYPE_I32
653                     ? INDEX_op_tci_divs32
654                     : INDEX_op_divs);
655    tcg_out_op_rrr(s, opc, a0, a1, a2);
656}
657
658static const TCGOutOpBinary outop_divs = {
659    .base.static_constraint = C_O1_I2(r, r, r),
660    .out_rrr = tgen_divs,
661};
662
663static void tgen_eqv(TCGContext *s, TCGType type,
664                     TCGReg a0, TCGReg a1, TCGReg a2)
665{
666    tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2);
667}
668
669static const TCGOutOpBinary outop_eqv = {
670    .base.static_constraint = C_O1_I2(r, r, r),
671    .out_rrr = tgen_eqv,
672};
673
674static void tgen_mul(TCGContext *s, TCGType type,
675                     TCGReg a0, TCGReg a1, TCGReg a2)
676{
677    tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2);
678}
679
680static const TCGOutOpBinary outop_mul = {
681    .base.static_constraint = C_O1_I2(r, r, r),
682    .out_rrr = tgen_mul,
683};
684
685static const TCGOutOpBinary outop_mulsh = {
686    .base.static_constraint = C_NotImplemented,
687};
688
689static const TCGOutOpBinary outop_muluh = {
690    .base.static_constraint = C_NotImplemented,
691};
692
693static void tgen_nand(TCGContext *s, TCGType type,
694                     TCGReg a0, TCGReg a1, TCGReg a2)
695{
696    tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2);
697}
698
699static const TCGOutOpBinary outop_nand = {
700    .base.static_constraint = C_O1_I2(r, r, r),
701    .out_rrr = tgen_nand,
702};
703
704static void tgen_nor(TCGContext *s, TCGType type,
705                     TCGReg a0, TCGReg a1, TCGReg a2)
706{
707    tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2);
708}
709
710static const TCGOutOpBinary outop_nor = {
711    .base.static_constraint = C_O1_I2(r, r, r),
712    .out_rrr = tgen_nor,
713};
714
715static void tgen_or(TCGContext *s, TCGType type,
716                     TCGReg a0, TCGReg a1, TCGReg a2)
717{
718    tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2);
719}
720
721static const TCGOutOpBinary outop_or = {
722    .base.static_constraint = C_O1_I2(r, r, r),
723    .out_rrr = tgen_or,
724};
725
726static void tgen_orc(TCGContext *s, TCGType type,
727                     TCGReg a0, TCGReg a1, TCGReg a2)
728{
729    tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2);
730}
731
732static const TCGOutOpBinary outop_orc = {
733    .base.static_constraint = C_O1_I2(r, r, r),
734    .out_rrr = tgen_orc,
735};
736
737static void tgen_sub(TCGContext *s, TCGType type,
738                     TCGReg a0, TCGReg a1, TCGReg a2)
739{
740    tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2);
741}
742
743static const TCGOutOpSubtract outop_sub = {
744    .base.static_constraint = C_O1_I2(r, r, r),
745    .out_rrr = tgen_sub,
746};
747
748static void tgen_xor(TCGContext *s, TCGType type,
749                     TCGReg a0, TCGReg a1, TCGReg a2)
750{
751    tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2);
752}
753
754static const TCGOutOpBinary outop_xor = {
755    .base.static_constraint = C_O1_I2(r, r, r),
756    .out_rrr = tgen_xor,
757};
758
759static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
760{
761    tcg_out_op_rr(s, INDEX_op_neg, a0, a1);
762}
763
764static const TCGOutOpUnary outop_neg = {
765    .base.static_constraint = C_O1_I1(r, r),
766    .out_rr = tgen_neg,
767};
768
769static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
770{
771    tcg_out_op_rr(s, INDEX_op_not, a0, a1);
772}
773
774static const TCGOutOpUnary outop_not = {
775    .base.static_constraint = C_O1_I1(r, r),
776    .out_rr = tgen_not,
777};
778
779
780static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
781                       const TCGArg args[TCG_MAX_OP_ARGS],
782                       const int const_args[TCG_MAX_OP_ARGS])
783{
784    int width;
785
786    switch (opc) {
787    case INDEX_op_goto_ptr:
788        tcg_out_op_r(s, opc, args[0]);
789        break;
790
791    case INDEX_op_br:
792        tcg_out_op_l(s, opc, arg_label(args[0]));
793        break;
794
795    CASE_32_64(setcond)
796        tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
797        break;
798
799    CASE_32_64(movcond)
800    case INDEX_op_setcond2_i32:
801        tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
802                          args[3], args[4], args[5]);
803        break;
804
805    CASE_32_64(ld8u)
806    CASE_32_64(ld8s)
807    CASE_32_64(ld16u)
808    CASE_32_64(ld16s)
809    case INDEX_op_ld_i32:
810    CASE_64(ld32u)
811    CASE_64(ld32s)
812    CASE_64(ld)
813    CASE_32_64(st8)
814    CASE_32_64(st16)
815    case INDEX_op_st_i32:
816    CASE_64(st32)
817    CASE_64(st)
818        tcg_out_ldst(s, opc, args[0], args[1], args[2]);
819        break;
820
821    CASE_32_64(shl)
822    CASE_32_64(shr)
823    CASE_32_64(sar)
824    CASE_32_64(rotl)     /* Optional (TCG_TARGET_HAS_rot_*). */
825    CASE_32_64(rotr)     /* Optional (TCG_TARGET_HAS_rot_*). */
826    CASE_32_64(divu)     /* Optional (TCG_TARGET_HAS_div_*). */
827    CASE_32_64(rem)      /* Optional (TCG_TARGET_HAS_div_*). */
828    CASE_32_64(remu)     /* Optional (TCG_TARGET_HAS_div_*). */
829    CASE_32_64(clz)      /* Optional (TCG_TARGET_HAS_clz_*). */
830    CASE_32_64(ctz)      /* Optional (TCG_TARGET_HAS_ctz_*). */
831        tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
832        break;
833
834    CASE_32_64(deposit)
835        tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
836        break;
837
838    CASE_32_64(extract)  /* Optional (TCG_TARGET_HAS_extract_*). */
839    CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
840        tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
841        break;
842
843    CASE_32_64(brcond)
844        tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
845                            ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
846                        TCG_REG_TMP, args[0], args[1], args[2]);
847        tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
848        break;
849
850    CASE_32_64(ctpop)    /* Optional (TCG_TARGET_HAS_ctpop_*). */
851    case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
852    case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
853        tcg_out_op_rr(s, opc, args[0], args[1]);
854        break;
855
856    case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
857    case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
858        width = 16;
859        goto do_bswap;
860    case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
861        width = 32;
862    do_bswap:
863        /* The base tci bswaps zero-extend, and ignore high bits. */
864        tcg_out_op_rr(s, opc, args[0], args[1]);
865        if (args[2] & TCG_BSWAP_OS) {
866            tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width);
867        }
868        break;
869
870    CASE_32_64(add2)
871    CASE_32_64(sub2)
872        tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
873                          args[3], args[4], args[5]);
874        break;
875
876#if TCG_TARGET_REG_BITS == 32
877    case INDEX_op_brcond2_i32:
878        tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
879                          args[0], args[1], args[2], args[3], args[4]);
880        tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
881        break;
882#endif
883
884    CASE_32_64(mulu2)
885    CASE_32_64(muls2)
886        tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
887        break;
888
889    case INDEX_op_qemu_ld_i64:
890    case INDEX_op_qemu_st_i64:
891        if (TCG_TARGET_REG_BITS == 32) {
892            tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]);
893            tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP);
894            break;
895        }
896        /* fall through */
897    case INDEX_op_qemu_ld_i32:
898    case INDEX_op_qemu_st_i32:
899        if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) {
900            tcg_out_ext32u(s, TCG_REG_TMP, args[1]);
901            tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]);
902        } else {
903            tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
904        }
905        break;
906
907    case INDEX_op_mb:
908        tcg_out_op_v(s, opc);
909        break;
910
911    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
912    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
913    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
914    case INDEX_op_ext_i32_i64:  /* Always emitted via tcg_reg_alloc_op.  */
915    case INDEX_op_extu_i32_i64:
916    case INDEX_op_extrl_i64_i32:
917    default:
918        g_assert_not_reached();
919    }
920}
921
922static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
923                       intptr_t offset)
924{
925    switch (type) {
926    case TCG_TYPE_I32:
927        tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
928        break;
929#if TCG_TARGET_REG_BITS == 64
930    case TCG_TYPE_I64:
931        tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
932        break;
933#endif
934    default:
935        g_assert_not_reached();
936    }
937}
938
939static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
940                               TCGReg base, intptr_t ofs)
941{
942    return false;
943}
944
945/* Test if a constant matches the constraint. */
946static bool tcg_target_const_match(int64_t val, int ct,
947                                   TCGType type, TCGCond cond, int vece)
948{
949    return ct & TCG_CT_CONST;
950}
951
952static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
953{
954    memset(p, 0, sizeof(*p) * count);
955}
956
957static void tcg_target_init(TCGContext *s)
958{
959    /* The current code uses uint8_t for tcg operations. */
960    tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
961
962    /* Registers available for 32 bit operations. */
963    tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
964    /* Registers available for 64 bit operations. */
965    tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
966    /*
967     * The interpreter "registers" are in the local stack frame and
968     * cannot be clobbered by the called helper functions.  However,
969     * the interpreter assumes a 128-bit return value and assigns to
970     * the return value registers.
971     */
972    tcg_target_call_clobber_regs =
973        MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS);
974
975    s->reserved_regs = 0;
976    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
977    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
978
979    /* The call arguments come first, followed by the temp storage. */
980    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
981                  TCG_STATIC_FRAME_SIZE);
982}
983
984/* Generate global QEMU prologue and epilogue code. */
985static inline void tcg_target_qemu_prologue(TCGContext *s)
986{
987}
988
989static void tcg_out_tb_start(TCGContext *s)
990{
991    /* nothing to do */
992}
993
994bool tcg_target_has_memory_bswap(MemOp memop)
995{
996    return true;
997}
998
999static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1000{
1001    g_assert_not_reached();
1002}
1003
1004static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1005{
1006    g_assert_not_reached();
1007}
1008