xref: /openbmc/qemu/tcg/tcg.c (revision caf3eacc8f7f372c589432f53fb5835680a8863b)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 
27 /* Define to jump the ELF file used to communicate with GDB.  */
28 #undef DEBUG_JIT
29 
30 #include "qemu/error-report.h"
31 #include "qemu/cutils.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/qemu-print.h"
34 #include "qemu/cacheflush.h"
35 #include "qemu/cacheinfo.h"
36 #include "qemu/timer.h"
37 #include "exec/translation-block.h"
38 #include "exec/tlb-common.h"
39 #include "tcg/startup.h"
40 #include "tcg/tcg-op-common.h"
41 
42 #if UINTPTR_MAX == UINT32_MAX
43 # define ELF_CLASS  ELFCLASS32
44 #else
45 # define ELF_CLASS  ELFCLASS64
46 #endif
47 #if HOST_BIG_ENDIAN
48 # define ELF_DATA   ELFDATA2MSB
49 #else
50 # define ELF_DATA   ELFDATA2LSB
51 #endif
52 
53 #include "elf.h"
54 #include "exec/log.h"
55 #include "tcg/tcg-ldst.h"
56 #include "tcg/tcg-temp-internal.h"
57 #include "tcg-internal.h"
58 #include "tcg/perf.h"
59 #ifdef CONFIG_USER_ONLY
60 #include "exec/user/guest-base.h"
61 #endif
62 
63 /* Forward declarations for functions declared in tcg-target.c.inc and
64    used here. */
65 static void tcg_target_init(TCGContext *s);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
68                         intptr_t value, intptr_t addend);
69 
70 /* The CIE and FDE header definitions will be common to all hosts.  */
71 typedef struct {
72     uint32_t len __attribute__((aligned((sizeof(void *)))));
73     uint32_t id;
74     uint8_t version;
75     char augmentation[1];
76     uint8_t code_align;
77     uint8_t data_align;
78     uint8_t return_column;
79 } DebugFrameCIE;
80 
81 typedef struct QEMU_PACKED {
82     uint32_t len __attribute__((aligned((sizeof(void *)))));
83     uint32_t cie_offset;
84     uintptr_t func_start;
85     uintptr_t func_len;
86 } DebugFrameFDEHeader;
87 
88 typedef struct QEMU_PACKED {
89     DebugFrameCIE cie;
90     DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
92 
93 typedef struct TCGLabelQemuLdst {
94     bool is_ld;             /* qemu_ld: true, qemu_st: false */
95     MemOpIdx oi;
96     TCGType type;           /* result type of a load */
97     TCGReg addrlo_reg;      /* reg index for low word of guest virtual addr */
98     TCGReg addrhi_reg;      /* reg index for high word of guest virtual addr */
99     TCGReg datalo_reg;      /* reg index for low word to be loaded or stored */
100     TCGReg datahi_reg;      /* reg index for high word to be loaded or stored */
101     const tcg_insn_unit *raddr;   /* addr of the next IR of qemu_ld/st IR */
102     tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
103     QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
104 } TCGLabelQemuLdst;
105 
106 static void tcg_register_jit_int(const void *buf, size_t size,
107                                  const void *debug_frame,
108                                  size_t debug_frame_size)
109     __attribute__((unused));
110 
111 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
112 static void tcg_out_tb_start(TCGContext *s);
113 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
114                        intptr_t arg2);
115 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
116 static void tcg_out_movi(TCGContext *s, TCGType type,
117                          TCGReg ret, tcg_target_long arg);
118 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
119 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
120 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
121 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
122 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
123 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
124 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
125 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
126 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg);
127 static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
128 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
129 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
130 static void tcg_out_goto_tb(TCGContext *s, int which);
131 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
132                        const TCGArg args[TCG_MAX_OP_ARGS],
133                        const int const_args[TCG_MAX_OP_ARGS]);
134 #if TCG_TARGET_MAYBE_vec
135 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
136                             TCGReg dst, TCGReg src);
137 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
138                              TCGReg dst, TCGReg base, intptr_t offset);
139 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
140                              TCGReg dst, int64_t arg);
141 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
142                            unsigned vecl, unsigned vece,
143                            const TCGArg args[TCG_MAX_OP_ARGS],
144                            const int const_args[TCG_MAX_OP_ARGS]);
145 #else
146 static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
147                                    TCGReg dst, TCGReg src)
148 {
149     g_assert_not_reached();
150 }
151 static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
152                                     TCGReg dst, TCGReg base, intptr_t offset)
153 {
154     g_assert_not_reached();
155 }
156 static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
157                                     TCGReg dst, int64_t arg)
158 {
159     g_assert_not_reached();
160 }
161 static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
162                                   unsigned vecl, unsigned vece,
163                                   const TCGArg args[TCG_MAX_OP_ARGS],
164                                   const int const_args[TCG_MAX_OP_ARGS])
165 {
166     g_assert_not_reached();
167 }
168 #endif
169 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
170                        intptr_t arg2);
171 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
172                         TCGReg base, intptr_t ofs);
173 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
174                          const TCGHelperInfo *info);
175 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
176 static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
177 #ifdef TCG_TARGET_NEED_LDST_LABELS
178 static int tcg_out_ldst_finalize(TCGContext *s);
179 #endif
180 
181 #ifndef CONFIG_USER_ONLY
182 #define guest_base  ({ qemu_build_not_reached(); (uintptr_t)0; })
183 #endif
184 
185 typedef struct TCGLdstHelperParam {
186     TCGReg (*ra_gen)(TCGContext *s, const TCGLabelQemuLdst *l, int arg_reg);
187     unsigned ntmp;
188     int tmp[3];
189 } TCGLdstHelperParam;
190 
191 static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *l,
192                                    const TCGLdstHelperParam *p)
193     __attribute__((unused));
194 static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *l,
195                                   bool load_sign, const TCGLdstHelperParam *p)
196     __attribute__((unused));
197 static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *l,
198                                    const TCGLdstHelperParam *p)
199     __attribute__((unused));
200 
201 static void * const qemu_ld_helpers[MO_SSIZE + 1] __attribute__((unused)) = {
202     [MO_UB] = helper_ldub_mmu,
203     [MO_SB] = helper_ldsb_mmu,
204     [MO_UW] = helper_lduw_mmu,
205     [MO_SW] = helper_ldsw_mmu,
206     [MO_UL] = helper_ldul_mmu,
207     [MO_UQ] = helper_ldq_mmu,
208 #if TCG_TARGET_REG_BITS == 64
209     [MO_SL] = helper_ldsl_mmu,
210     [MO_128] = helper_ld16_mmu,
211 #endif
212 };
213 
214 static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = {
215     [MO_8]  = helper_stb_mmu,
216     [MO_16] = helper_stw_mmu,
217     [MO_32] = helper_stl_mmu,
218     [MO_64] = helper_stq_mmu,
219 #if TCG_TARGET_REG_BITS == 64
220     [MO_128] = helper_st16_mmu,
221 #endif
222 };
223 
224 typedef struct {
225     MemOp atom;   /* lg2 bits of atomicity required */
226     MemOp align;  /* lg2 bits of alignment to use */
227 } TCGAtomAlign;
228 
229 static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
230                                            MemOp host_atom, bool allow_two_ops)
231     __attribute__((unused));
232 
233 #ifdef CONFIG_USER_ONLY
234 bool tcg_use_softmmu;
235 #endif
236 
237 TCGContext tcg_init_ctx;
238 __thread TCGContext *tcg_ctx;
239 
240 TCGContext **tcg_ctxs;
241 unsigned int tcg_cur_ctxs;
242 unsigned int tcg_max_ctxs;
243 TCGv_env tcg_env;
244 const void *tcg_code_gen_epilogue;
245 uintptr_t tcg_splitwx_diff;
246 
247 #ifndef CONFIG_TCG_INTERPRETER
248 tcg_prologue_fn *tcg_qemu_tb_exec;
249 #endif
250 
251 static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
252 static TCGRegSet tcg_target_call_clobber_regs;
253 
254 #if TCG_TARGET_INSN_UNIT_SIZE == 1
255 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
256 {
257     *s->code_ptr++ = v;
258 }
259 
260 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
261                                                       uint8_t v)
262 {
263     *p = v;
264 }
265 #endif
266 
267 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
268 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
269 {
270     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
271         *s->code_ptr++ = v;
272     } else {
273         tcg_insn_unit *p = s->code_ptr;
274         memcpy(p, &v, sizeof(v));
275         s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
276     }
277 }
278 
279 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
280                                                        uint16_t v)
281 {
282     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
283         *p = v;
284     } else {
285         memcpy(p, &v, sizeof(v));
286     }
287 }
288 #endif
289 
290 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
291 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
292 {
293     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
294         *s->code_ptr++ = v;
295     } else {
296         tcg_insn_unit *p = s->code_ptr;
297         memcpy(p, &v, sizeof(v));
298         s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
299     }
300 }
301 
302 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
303                                                        uint32_t v)
304 {
305     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
306         *p = v;
307     } else {
308         memcpy(p, &v, sizeof(v));
309     }
310 }
311 #endif
312 
313 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
314 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
315 {
316     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
317         *s->code_ptr++ = v;
318     } else {
319         tcg_insn_unit *p = s->code_ptr;
320         memcpy(p, &v, sizeof(v));
321         s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
322     }
323 }
324 
325 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
326                                                        uint64_t v)
327 {
328     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
329         *p = v;
330     } else {
331         memcpy(p, &v, sizeof(v));
332     }
333 }
334 #endif
335 
336 /* label relocation processing */
337 
338 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
339                           TCGLabel *l, intptr_t addend)
340 {
341     TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
342 
343     r->type = type;
344     r->ptr = code_ptr;
345     r->addend = addend;
346     QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
347 }
348 
349 static void tcg_out_label(TCGContext *s, TCGLabel *l)
350 {
351     tcg_debug_assert(!l->has_value);
352     l->has_value = 1;
353     l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
354 }
355 
356 TCGLabel *gen_new_label(void)
357 {
358     TCGContext *s = tcg_ctx;
359     TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
360 
361     memset(l, 0, sizeof(TCGLabel));
362     l->id = s->nb_labels++;
363     QSIMPLEQ_INIT(&l->branches);
364     QSIMPLEQ_INIT(&l->relocs);
365 
366     QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
367 
368     return l;
369 }
370 
371 static bool tcg_resolve_relocs(TCGContext *s)
372 {
373     TCGLabel *l;
374 
375     QSIMPLEQ_FOREACH(l, &s->labels, next) {
376         TCGRelocation *r;
377         uintptr_t value = l->u.value;
378 
379         QSIMPLEQ_FOREACH(r, &l->relocs, next) {
380             if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
381                 return false;
382             }
383         }
384     }
385     return true;
386 }
387 
388 static void set_jmp_reset_offset(TCGContext *s, int which)
389 {
390     /*
391      * We will check for overflow at the end of the opcode loop in
392      * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
393      */
394     s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
395 }
396 
397 static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
398 {
399     /*
400      * We will check for overflow at the end of the opcode loop in
401      * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
402      */
403     s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
404 }
405 
406 static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
407 {
408     /*
409      * Return the read-execute version of the pointer, for the benefit
410      * of any pc-relative addressing mode.
411      */
412     return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
413 }
414 
415 static int __attribute__((unused))
416 tlb_mask_table_ofs(TCGContext *s, int which)
417 {
418     return (offsetof(CPUNegativeOffsetState, tlb.f[which]) -
419             sizeof(CPUNegativeOffsetState));
420 }
421 
422 /* Signal overflow, starting over with fewer guest insns. */
423 static G_NORETURN
424 void tcg_raise_tb_overflow(TCGContext *s)
425 {
426     siglongjmp(s->jmp_trans, -2);
427 }
428 
429 /*
430  * Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext.
431  * By the time we arrive at tcg_out_movext1, @dst is always a TCGReg.
432  *
433  * However, tcg_out_helper_load_slots reuses this field to hold an
434  * argument slot number (which may designate a argument register or an
435  * argument stack slot), converting to TCGReg once all arguments that
436  * are destined for the stack are processed.
437  */
438 typedef struct TCGMovExtend {
439     unsigned dst;
440     TCGReg src;
441     TCGType dst_type;
442     TCGType src_type;
443     MemOp src_ext;
444 } TCGMovExtend;
445 
446 /**
447  * tcg_out_movext -- move and extend
448  * @s: tcg context
449  * @dst_type: integral type for destination
450  * @dst: destination register
451  * @src_type: integral type for source
452  * @src_ext: extension to apply to source
453  * @src: source register
454  *
455  * Move or extend @src into @dst, depending on @src_ext and the types.
456  */
457 static void tcg_out_movext(TCGContext *s, TCGType dst_type, TCGReg dst,
458                            TCGType src_type, MemOp src_ext, TCGReg src)
459 {
460     switch (src_ext) {
461     case MO_UB:
462         tcg_out_ext8u(s, dst, src);
463         break;
464     case MO_SB:
465         tcg_out_ext8s(s, dst_type, dst, src);
466         break;
467     case MO_UW:
468         tcg_out_ext16u(s, dst, src);
469         break;
470     case MO_SW:
471         tcg_out_ext16s(s, dst_type, dst, src);
472         break;
473     case MO_UL:
474     case MO_SL:
475         if (dst_type == TCG_TYPE_I32) {
476             if (src_type == TCG_TYPE_I32) {
477                 tcg_out_mov(s, TCG_TYPE_I32, dst, src);
478             } else {
479                 tcg_out_extrl_i64_i32(s, dst, src);
480             }
481         } else if (src_type == TCG_TYPE_I32) {
482             if (src_ext & MO_SIGN) {
483                 tcg_out_exts_i32_i64(s, dst, src);
484             } else {
485                 tcg_out_extu_i32_i64(s, dst, src);
486             }
487         } else {
488             if (src_ext & MO_SIGN) {
489                 tcg_out_ext32s(s, dst, src);
490             } else {
491                 tcg_out_ext32u(s, dst, src);
492             }
493         }
494         break;
495     case MO_UQ:
496         tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
497         if (dst_type == TCG_TYPE_I32) {
498             tcg_out_extrl_i64_i32(s, dst, src);
499         } else {
500             tcg_out_mov(s, TCG_TYPE_I64, dst, src);
501         }
502         break;
503     default:
504         g_assert_not_reached();
505     }
506 }
507 
508 /* Minor variations on a theme, using a structure. */
509 static void tcg_out_movext1_new_src(TCGContext *s, const TCGMovExtend *i,
510                                     TCGReg src)
511 {
512     tcg_out_movext(s, i->dst_type, i->dst, i->src_type, i->src_ext, src);
513 }
514 
515 static void tcg_out_movext1(TCGContext *s, const TCGMovExtend *i)
516 {
517     tcg_out_movext1_new_src(s, i, i->src);
518 }
519 
520 /**
521  * tcg_out_movext2 -- move and extend two pair
522  * @s: tcg context
523  * @i1: first move description
524  * @i2: second move description
525  * @scratch: temporary register, or -1 for none
526  *
527  * As tcg_out_movext, for both @i1 and @i2, caring for overlap
528  * between the sources and destinations.
529  */
530 
531 static void tcg_out_movext2(TCGContext *s, const TCGMovExtend *i1,
532                             const TCGMovExtend *i2, int scratch)
533 {
534     TCGReg src1 = i1->src;
535     TCGReg src2 = i2->src;
536 
537     if (i1->dst != src2) {
538         tcg_out_movext1(s, i1);
539         tcg_out_movext1(s, i2);
540         return;
541     }
542     if (i2->dst == src1) {
543         TCGType src1_type = i1->src_type;
544         TCGType src2_type = i2->src_type;
545 
546         if (tcg_out_xchg(s, MAX(src1_type, src2_type), src1, src2)) {
547             /* The data is now in the correct registers, now extend. */
548             src1 = i2->src;
549             src2 = i1->src;
550         } else {
551             tcg_debug_assert(scratch >= 0);
552             tcg_out_mov(s, src1_type, scratch, src1);
553             src1 = scratch;
554         }
555     }
556     tcg_out_movext1_new_src(s, i2, src2);
557     tcg_out_movext1_new_src(s, i1, src1);
558 }
559 
560 /**
561  * tcg_out_movext3 -- move and extend three pair
562  * @s: tcg context
563  * @i1: first move description
564  * @i2: second move description
565  * @i3: third move description
566  * @scratch: temporary register, or -1 for none
567  *
568  * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap
569  * between the sources and destinations.
570  */
571 
572 static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1,
573                             const TCGMovExtend *i2, const TCGMovExtend *i3,
574                             int scratch)
575 {
576     TCGReg src1 = i1->src;
577     TCGReg src2 = i2->src;
578     TCGReg src3 = i3->src;
579 
580     if (i1->dst != src2 && i1->dst != src3) {
581         tcg_out_movext1(s, i1);
582         tcg_out_movext2(s, i2, i3, scratch);
583         return;
584     }
585     if (i2->dst != src1 && i2->dst != src3) {
586         tcg_out_movext1(s, i2);
587         tcg_out_movext2(s, i1, i3, scratch);
588         return;
589     }
590     if (i3->dst != src1 && i3->dst != src2) {
591         tcg_out_movext1(s, i3);
592         tcg_out_movext2(s, i1, i2, scratch);
593         return;
594     }
595 
596     /*
597      * There is a cycle.  Since there are only 3 nodes, the cycle is
598      * either "clockwise" or "anti-clockwise", and can be solved with
599      * a single scratch or two xchg.
600      */
601     if (i1->dst == src2 && i2->dst == src3 && i3->dst == src1) {
602         /* "Clockwise" */
603         if (tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2)) {
604             tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3);
605             /* The data is now in the correct registers, now extend. */
606             tcg_out_movext1_new_src(s, i1, i1->dst);
607             tcg_out_movext1_new_src(s, i2, i2->dst);
608             tcg_out_movext1_new_src(s, i3, i3->dst);
609         } else {
610             tcg_debug_assert(scratch >= 0);
611             tcg_out_mov(s, i1->src_type, scratch, src1);
612             tcg_out_movext1(s, i3);
613             tcg_out_movext1(s, i2);
614             tcg_out_movext1_new_src(s, i1, scratch);
615         }
616     } else if (i1->dst == src3 && i2->dst == src1 && i3->dst == src2) {
617         /* "Anti-clockwise" */
618         if (tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3)) {
619             tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2);
620             /* The data is now in the correct registers, now extend. */
621             tcg_out_movext1_new_src(s, i1, i1->dst);
622             tcg_out_movext1_new_src(s, i2, i2->dst);
623             tcg_out_movext1_new_src(s, i3, i3->dst);
624         } else {
625             tcg_debug_assert(scratch >= 0);
626             tcg_out_mov(s, i1->src_type, scratch, src1);
627             tcg_out_movext1(s, i2);
628             tcg_out_movext1(s, i3);
629             tcg_out_movext1_new_src(s, i1, scratch);
630         }
631     } else {
632         g_assert_not_reached();
633     }
634 }
635 
636 #define C_PFX1(P, A)                    P##A
637 #define C_PFX2(P, A, B)                 P##A##_##B
638 #define C_PFX3(P, A, B, C)              P##A##_##B##_##C
639 #define C_PFX4(P, A, B, C, D)           P##A##_##B##_##C##_##D
640 #define C_PFX5(P, A, B, C, D, E)        P##A##_##B##_##C##_##D##_##E
641 #define C_PFX6(P, A, B, C, D, E, F)     P##A##_##B##_##C##_##D##_##E##_##F
642 
643 /* Define an enumeration for the various combinations. */
644 
645 #define C_O0_I1(I1)                     C_PFX1(c_o0_i1_, I1),
646 #define C_O0_I2(I1, I2)                 C_PFX2(c_o0_i2_, I1, I2),
647 #define C_O0_I3(I1, I2, I3)             C_PFX3(c_o0_i3_, I1, I2, I3),
648 #define C_O0_I4(I1, I2, I3, I4)         C_PFX4(c_o0_i4_, I1, I2, I3, I4),
649 
650 #define C_O1_I1(O1, I1)                 C_PFX2(c_o1_i1_, O1, I1),
651 #define C_O1_I2(O1, I1, I2)             C_PFX3(c_o1_i2_, O1, I1, I2),
652 #define C_O1_I3(O1, I1, I2, I3)         C_PFX4(c_o1_i3_, O1, I1, I2, I3),
653 #define C_O1_I4(O1, I1, I2, I3, I4)     C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
654 
655 #define C_N1_I2(O1, I1, I2)             C_PFX3(c_n1_i2_, O1, I1, I2),
656 #define C_N1O1_I1(O1, O2, I1)           C_PFX3(c_n1o1_i1_, O1, O2, I1),
657 #define C_N2_I1(O1, O2, I1)             C_PFX3(c_n2_i1_, O1, O2, I1),
658 
659 #define C_O2_I1(O1, O2, I1)             C_PFX3(c_o2_i1_, O1, O2, I1),
660 #define C_O2_I2(O1, O2, I1, I2)         C_PFX4(c_o2_i2_, O1, O2, I1, I2),
661 #define C_O2_I3(O1, O2, I1, I2, I3)     C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
662 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
663 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
664 
665 typedef enum {
666 #include "tcg-target-con-set.h"
667 } TCGConstraintSetIndex;
668 
669 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
670 
671 #undef C_O0_I1
672 #undef C_O0_I2
673 #undef C_O0_I3
674 #undef C_O0_I4
675 #undef C_O1_I1
676 #undef C_O1_I2
677 #undef C_O1_I3
678 #undef C_O1_I4
679 #undef C_N1_I2
680 #undef C_N1O1_I1
681 #undef C_N2_I1
682 #undef C_O2_I1
683 #undef C_O2_I2
684 #undef C_O2_I3
685 #undef C_O2_I4
686 #undef C_N1_O1_I4
687 
688 /* Put all of the constraint sets into an array, indexed by the enum. */
689 
690 #define C_O0_I1(I1)                     { .args_ct_str = { #I1 } },
691 #define C_O0_I2(I1, I2)                 { .args_ct_str = { #I1, #I2 } },
692 #define C_O0_I3(I1, I2, I3)             { .args_ct_str = { #I1, #I2, #I3 } },
693 #define C_O0_I4(I1, I2, I3, I4)         { .args_ct_str = { #I1, #I2, #I3, #I4 } },
694 
695 #define C_O1_I1(O1, I1)                 { .args_ct_str = { #O1, #I1 } },
696 #define C_O1_I2(O1, I1, I2)             { .args_ct_str = { #O1, #I1, #I2 } },
697 #define C_O1_I3(O1, I1, I2, I3)         { .args_ct_str = { #O1, #I1, #I2, #I3 } },
698 #define C_O1_I4(O1, I1, I2, I3, I4)     { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
699 
700 #define C_N1_I2(O1, I1, I2)             { .args_ct_str = { "&" #O1, #I1, #I2 } },
701 #define C_N1O1_I1(O1, O2, I1)           { .args_ct_str = { "&" #O1, #O2, #I1 } },
702 #define C_N2_I1(O1, O2, I1)             { .args_ct_str = { "&" #O1, "&" #O2, #I1 } },
703 
704 #define C_O2_I1(O1, O2, I1)             { .args_ct_str = { #O1, #O2, #I1 } },
705 #define C_O2_I2(O1, O2, I1, I2)         { .args_ct_str = { #O1, #O2, #I1, #I2 } },
706 #define C_O2_I3(O1, O2, I1, I2, I3)     { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
707 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
708 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
709 
710 static const TCGTargetOpDef constraint_sets[] = {
711 #include "tcg-target-con-set.h"
712 };
713 
714 
715 #undef C_O0_I1
716 #undef C_O0_I2
717 #undef C_O0_I3
718 #undef C_O0_I4
719 #undef C_O1_I1
720 #undef C_O1_I2
721 #undef C_O1_I3
722 #undef C_O1_I4
723 #undef C_N1_I2
724 #undef C_N1O1_I1
725 #undef C_N2_I1
726 #undef C_O2_I1
727 #undef C_O2_I2
728 #undef C_O2_I3
729 #undef C_O2_I4
730 #undef C_N1_O1_I4
731 
732 /* Expand the enumerator to be returned from tcg_target_op_def(). */
733 
734 #define C_O0_I1(I1)                     C_PFX1(c_o0_i1_, I1)
735 #define C_O0_I2(I1, I2)                 C_PFX2(c_o0_i2_, I1, I2)
736 #define C_O0_I3(I1, I2, I3)             C_PFX3(c_o0_i3_, I1, I2, I3)
737 #define C_O0_I4(I1, I2, I3, I4)         C_PFX4(c_o0_i4_, I1, I2, I3, I4)
738 
739 #define C_O1_I1(O1, I1)                 C_PFX2(c_o1_i1_, O1, I1)
740 #define C_O1_I2(O1, I1, I2)             C_PFX3(c_o1_i2_, O1, I1, I2)
741 #define C_O1_I3(O1, I1, I2, I3)         C_PFX4(c_o1_i3_, O1, I1, I2, I3)
742 #define C_O1_I4(O1, I1, I2, I3, I4)     C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
743 
744 #define C_N1_I2(O1, I1, I2)             C_PFX3(c_n1_i2_, O1, I1, I2)
745 #define C_N1O1_I1(O1, O2, I1)           C_PFX3(c_n1o1_i1_, O1, O2, I1)
746 #define C_N2_I1(O1, O2, I1)             C_PFX3(c_n2_i1_, O1, O2, I1)
747 
748 #define C_O2_I1(O1, O2, I1)             C_PFX3(c_o2_i1_, O1, O2, I1)
749 #define C_O2_I2(O1, O2, I1, I2)         C_PFX4(c_o2_i2_, O1, O2, I1, I2)
750 #define C_O2_I3(O1, O2, I1, I2, I3)     C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
751 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
752 #define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
753 
754 #include "tcg-target.c.inc"
755 
756 #ifndef CONFIG_TCG_INTERPRETER
757 /* Validate CPUTLBDescFast placement. */
758 QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
759                         sizeof(CPUNegativeOffsetState))
760                   < MIN_TLB_MASK_TABLE_OFS);
761 #endif
762 
763 static void alloc_tcg_plugin_context(TCGContext *s)
764 {
765 #ifdef CONFIG_PLUGIN
766     s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
767     s->plugin_tb->insns =
768         g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
769 #endif
770 }
771 
772 /*
773  * All TCG threads except the parent (i.e. the one that called tcg_context_init
774  * and registered the target's TCG globals) must register with this function
775  * before initiating translation.
776  *
777  * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
778  * of tcg_region_init() for the reasoning behind this.
779  *
780  * In system-mode each caller registers its context in tcg_ctxs[]. Note that in
781  * system-mode tcg_ctxs[] does not track tcg_ctx_init, since the initial context
782  * is not used anymore for translation once this function is called.
783  *
784  * Not tracking tcg_init_ctx in tcg_ctxs[] in system-mode keeps code that
785  * iterates over the array (e.g. tcg_code_size() the same for both system/user
786  * modes.
787  */
788 #ifdef CONFIG_USER_ONLY
789 void tcg_register_thread(void)
790 {
791     tcg_ctx = &tcg_init_ctx;
792 }
793 #else
794 void tcg_register_thread(void)
795 {
796     TCGContext *s = g_malloc(sizeof(*s));
797     unsigned int i, n;
798 
799     *s = tcg_init_ctx;
800 
801     /* Relink mem_base.  */
802     for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
803         if (tcg_init_ctx.temps[i].mem_base) {
804             ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
805             tcg_debug_assert(b >= 0 && b < n);
806             s->temps[i].mem_base = &s->temps[b];
807         }
808     }
809 
810     /* Claim an entry in tcg_ctxs */
811     n = qatomic_fetch_inc(&tcg_cur_ctxs);
812     g_assert(n < tcg_max_ctxs);
813     qatomic_set(&tcg_ctxs[n], s);
814 
815     if (n > 0) {
816         alloc_tcg_plugin_context(s);
817         tcg_region_initial_alloc(s);
818     }
819 
820     tcg_ctx = s;
821 }
822 #endif /* !CONFIG_USER_ONLY */
823 
824 /* pool based memory allocation */
825 void *tcg_malloc_internal(TCGContext *s, int size)
826 {
827     TCGPool *p;
828     int pool_size;
829 
830     if (size > TCG_POOL_CHUNK_SIZE) {
831         /* big malloc: insert a new pool (XXX: could optimize) */
832         p = g_malloc(sizeof(TCGPool) + size);
833         p->size = size;
834         p->next = s->pool_first_large;
835         s->pool_first_large = p;
836         return p->data;
837     } else {
838         p = s->pool_current;
839         if (!p) {
840             p = s->pool_first;
841             if (!p)
842                 goto new_pool;
843         } else {
844             if (!p->next) {
845             new_pool:
846                 pool_size = TCG_POOL_CHUNK_SIZE;
847                 p = g_malloc(sizeof(TCGPool) + pool_size);
848                 p->size = pool_size;
849                 p->next = NULL;
850                 if (s->pool_current) {
851                     s->pool_current->next = p;
852                 } else {
853                     s->pool_first = p;
854                 }
855             } else {
856                 p = p->next;
857             }
858         }
859     }
860     s->pool_current = p;
861     s->pool_cur = p->data + size;
862     s->pool_end = p->data + p->size;
863     return p->data;
864 }
865 
866 void tcg_pool_reset(TCGContext *s)
867 {
868     TCGPool *p, *t;
869     for (p = s->pool_first_large; p; p = t) {
870         t = p->next;
871         g_free(p);
872     }
873     s->pool_first_large = NULL;
874     s->pool_cur = s->pool_end = NULL;
875     s->pool_current = NULL;
876 }
877 
878 /*
879  * Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
880  * akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
881  * We only use these for layout in tcg_out_ld_helper_ret and
882  * tcg_out_st_helper_args, and share them between several of
883  * the helpers, with the end result that it's easier to build manually.
884  */
885 
886 #if TCG_TARGET_REG_BITS == 32
887 # define dh_typecode_ttl  dh_typecode_i32
888 #else
889 # define dh_typecode_ttl  dh_typecode_i64
890 #endif
891 
892 static TCGHelperInfo info_helper_ld32_mmu = {
893     .flags = TCG_CALL_NO_WG,
894     .typemask = dh_typemask(ttl, 0)  /* return tcg_target_ulong */
895               | dh_typemask(env, 1)
896               | dh_typemask(i64, 2)  /* uint64_t addr */
897               | dh_typemask(i32, 3)  /* unsigned oi */
898               | dh_typemask(ptr, 4)  /* uintptr_t ra */
899 };
900 
901 static TCGHelperInfo info_helper_ld64_mmu = {
902     .flags = TCG_CALL_NO_WG,
903     .typemask = dh_typemask(i64, 0)  /* return uint64_t */
904               | dh_typemask(env, 1)
905               | dh_typemask(i64, 2)  /* uint64_t addr */
906               | dh_typemask(i32, 3)  /* unsigned oi */
907               | dh_typemask(ptr, 4)  /* uintptr_t ra */
908 };
909 
910 static TCGHelperInfo info_helper_ld128_mmu = {
911     .flags = TCG_CALL_NO_WG,
912     .typemask = dh_typemask(i128, 0) /* return Int128 */
913               | dh_typemask(env, 1)
914               | dh_typemask(i64, 2)  /* uint64_t addr */
915               | dh_typemask(i32, 3)  /* unsigned oi */
916               | dh_typemask(ptr, 4)  /* uintptr_t ra */
917 };
918 
919 static TCGHelperInfo info_helper_st32_mmu = {
920     .flags = TCG_CALL_NO_WG,
921     .typemask = dh_typemask(void, 0)
922               | dh_typemask(env, 1)
923               | dh_typemask(i64, 2)  /* uint64_t addr */
924               | dh_typemask(i32, 3)  /* uint32_t data */
925               | dh_typemask(i32, 4)  /* unsigned oi */
926               | dh_typemask(ptr, 5)  /* uintptr_t ra */
927 };
928 
929 static TCGHelperInfo info_helper_st64_mmu = {
930     .flags = TCG_CALL_NO_WG,
931     .typemask = dh_typemask(void, 0)
932               | dh_typemask(env, 1)
933               | dh_typemask(i64, 2)  /* uint64_t addr */
934               | dh_typemask(i64, 3)  /* uint64_t data */
935               | dh_typemask(i32, 4)  /* unsigned oi */
936               | dh_typemask(ptr, 5)  /* uintptr_t ra */
937 };
938 
939 static TCGHelperInfo info_helper_st128_mmu = {
940     .flags = TCG_CALL_NO_WG,
941     .typemask = dh_typemask(void, 0)
942               | dh_typemask(env, 1)
943               | dh_typemask(i64, 2)  /* uint64_t addr */
944               | dh_typemask(i128, 3) /* Int128 data */
945               | dh_typemask(i32, 4)  /* unsigned oi */
946               | dh_typemask(ptr, 5)  /* uintptr_t ra */
947 };
948 
949 #ifdef CONFIG_TCG_INTERPRETER
950 static ffi_type *typecode_to_ffi(int argmask)
951 {
952     /*
953      * libffi does not support __int128_t, so we have forced Int128
954      * to use the structure definition instead of the builtin type.
955      */
956     static ffi_type *ffi_type_i128_elements[3] = {
957         &ffi_type_uint64,
958         &ffi_type_uint64,
959         NULL
960     };
961     static ffi_type ffi_type_i128 = {
962         .size = 16,
963         .alignment = __alignof__(Int128),
964         .type = FFI_TYPE_STRUCT,
965         .elements = ffi_type_i128_elements,
966     };
967 
968     switch (argmask) {
969     case dh_typecode_void:
970         return &ffi_type_void;
971     case dh_typecode_i32:
972         return &ffi_type_uint32;
973     case dh_typecode_s32:
974         return &ffi_type_sint32;
975     case dh_typecode_i64:
976         return &ffi_type_uint64;
977     case dh_typecode_s64:
978         return &ffi_type_sint64;
979     case dh_typecode_ptr:
980         return &ffi_type_pointer;
981     case dh_typecode_i128:
982         return &ffi_type_i128;
983     }
984     g_assert_not_reached();
985 }
986 
987 static ffi_cif *init_ffi_layout(TCGHelperInfo *info)
988 {
989     unsigned typemask = info->typemask;
990     struct {
991         ffi_cif cif;
992         ffi_type *args[];
993     } *ca;
994     ffi_status status;
995     int nargs;
996 
997     /* Ignoring the return type, find the last non-zero field. */
998     nargs = 32 - clz32(typemask >> 3);
999     nargs = DIV_ROUND_UP(nargs, 3);
1000     assert(nargs <= MAX_CALL_IARGS);
1001 
1002     ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
1003     ca->cif.rtype = typecode_to_ffi(typemask & 7);
1004     ca->cif.nargs = nargs;
1005 
1006     if (nargs != 0) {
1007         ca->cif.arg_types = ca->args;
1008         for (int j = 0; j < nargs; ++j) {
1009             int typecode = extract32(typemask, (j + 1) * 3, 3);
1010             ca->args[j] = typecode_to_ffi(typecode);
1011         }
1012     }
1013 
1014     status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
1015                           ca->cif.rtype, ca->cif.arg_types);
1016     assert(status == FFI_OK);
1017 
1018     return &ca->cif;
1019 }
1020 
1021 #define HELPER_INFO_INIT(I)      (&(I)->cif)
1022 #define HELPER_INFO_INIT_VAL(I)  init_ffi_layout(I)
1023 #else
1024 #define HELPER_INFO_INIT(I)      (&(I)->init)
1025 #define HELPER_INFO_INIT_VAL(I)  1
1026 #endif /* CONFIG_TCG_INTERPRETER */
1027 
1028 static inline bool arg_slot_reg_p(unsigned arg_slot)
1029 {
1030     /*
1031      * Split the sizeof away from the comparison to avoid Werror from
1032      * "unsigned < 0 is always false", when iarg_regs is empty.
1033      */
1034     unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs);
1035     return arg_slot < nreg;
1036 }
1037 
1038 static inline int arg_slot_stk_ofs(unsigned arg_slot)
1039 {
1040     unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
1041     unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
1042 
1043     tcg_debug_assert(stk_slot < max);
1044     return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long);
1045 }
1046 
1047 typedef struct TCGCumulativeArgs {
1048     int arg_idx;                /* tcg_gen_callN args[] */
1049     int info_in_idx;            /* TCGHelperInfo in[] */
1050     int arg_slot;               /* regs+stack slot */
1051     int ref_slot;               /* stack slots for references */
1052 } TCGCumulativeArgs;
1053 
1054 static void layout_arg_even(TCGCumulativeArgs *cum)
1055 {
1056     cum->arg_slot += cum->arg_slot & 1;
1057 }
1058 
1059 static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info,
1060                          TCGCallArgumentKind kind)
1061 {
1062     TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
1063 
1064     *loc = (TCGCallArgumentLoc){
1065         .kind = kind,
1066         .arg_idx = cum->arg_idx,
1067         .arg_slot = cum->arg_slot,
1068     };
1069     cum->info_in_idx++;
1070     cum->arg_slot++;
1071 }
1072 
1073 static void layout_arg_normal_n(TCGCumulativeArgs *cum,
1074                                 TCGHelperInfo *info, int n)
1075 {
1076     TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
1077 
1078     for (int i = 0; i < n; ++i) {
1079         /* Layout all using the same arg_idx, adjusting the subindex. */
1080         loc[i] = (TCGCallArgumentLoc){
1081             .kind = TCG_CALL_ARG_NORMAL,
1082             .arg_idx = cum->arg_idx,
1083             .tmp_subindex = i,
1084             .arg_slot = cum->arg_slot + i,
1085         };
1086     }
1087     cum->info_in_idx += n;
1088     cum->arg_slot += n;
1089 }
1090 
1091 static void layout_arg_by_ref(TCGCumulativeArgs *cum, TCGHelperInfo *info)
1092 {
1093     TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
1094     int n = 128 / TCG_TARGET_REG_BITS;
1095 
1096     /* The first subindex carries the pointer. */
1097     layout_arg_1(cum, info, TCG_CALL_ARG_BY_REF);
1098 
1099     /*
1100      * The callee is allowed to clobber memory associated with
1101      * structure pass by-reference.  Therefore we must make copies.
1102      * Allocate space from "ref_slot", which will be adjusted to
1103      * follow the parameters on the stack.
1104      */
1105     loc[0].ref_slot = cum->ref_slot;
1106 
1107     /*
1108      * Subsequent words also go into the reference slot, but
1109      * do not accumulate into the regular arguments.
1110      */
1111     for (int i = 1; i < n; ++i) {
1112         loc[i] = (TCGCallArgumentLoc){
1113             .kind = TCG_CALL_ARG_BY_REF_N,
1114             .arg_idx = cum->arg_idx,
1115             .tmp_subindex = i,
1116             .ref_slot = cum->ref_slot + i,
1117         };
1118     }
1119     cum->info_in_idx += n - 1;  /* i=0 accounted for in layout_arg_1 */
1120     cum->ref_slot += n;
1121 }
1122 
1123 static void init_call_layout(TCGHelperInfo *info)
1124 {
1125     int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
1126     int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
1127     unsigned typemask = info->typemask;
1128     unsigned typecode;
1129     TCGCumulativeArgs cum = { };
1130 
1131     /*
1132      * Parse and place any function return value.
1133      */
1134     typecode = typemask & 7;
1135     switch (typecode) {
1136     case dh_typecode_void:
1137         info->nr_out = 0;
1138         break;
1139     case dh_typecode_i32:
1140     case dh_typecode_s32:
1141     case dh_typecode_ptr:
1142         info->nr_out = 1;
1143         info->out_kind = TCG_CALL_RET_NORMAL;
1144         break;
1145     case dh_typecode_i64:
1146     case dh_typecode_s64:
1147         info->nr_out = 64 / TCG_TARGET_REG_BITS;
1148         info->out_kind = TCG_CALL_RET_NORMAL;
1149         /* Query the last register now to trigger any assert early. */
1150         tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
1151         break;
1152     case dh_typecode_i128:
1153         info->nr_out = 128 / TCG_TARGET_REG_BITS;
1154         info->out_kind = TCG_TARGET_CALL_RET_I128;
1155         switch (TCG_TARGET_CALL_RET_I128) {
1156         case TCG_CALL_RET_NORMAL:
1157             /* Query the last register now to trigger any assert early. */
1158             tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
1159             break;
1160         case TCG_CALL_RET_BY_VEC:
1161             /* Query the single register now to trigger any assert early. */
1162             tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0);
1163             break;
1164         case TCG_CALL_RET_BY_REF:
1165             /*
1166              * Allocate the first argument to the output.
1167              * We don't need to store this anywhere, just make it
1168              * unavailable for use in the input loop below.
1169              */
1170             cum.arg_slot = 1;
1171             break;
1172         default:
1173             qemu_build_not_reached();
1174         }
1175         break;
1176     default:
1177         g_assert_not_reached();
1178     }
1179 
1180     /*
1181      * Parse and place function arguments.
1182      */
1183     for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) {
1184         TCGCallArgumentKind kind;
1185         TCGType type;
1186 
1187         typecode = typemask & 7;
1188         switch (typecode) {
1189         case dh_typecode_i32:
1190         case dh_typecode_s32:
1191             type = TCG_TYPE_I32;
1192             break;
1193         case dh_typecode_i64:
1194         case dh_typecode_s64:
1195             type = TCG_TYPE_I64;
1196             break;
1197         case dh_typecode_ptr:
1198             type = TCG_TYPE_PTR;
1199             break;
1200         case dh_typecode_i128:
1201             type = TCG_TYPE_I128;
1202             break;
1203         default:
1204             g_assert_not_reached();
1205         }
1206 
1207         switch (type) {
1208         case TCG_TYPE_I32:
1209             switch (TCG_TARGET_CALL_ARG_I32) {
1210             case TCG_CALL_ARG_EVEN:
1211                 layout_arg_even(&cum);
1212                 /* fall through */
1213             case TCG_CALL_ARG_NORMAL:
1214                 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
1215                 break;
1216             case TCG_CALL_ARG_EXTEND:
1217                 kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1);
1218                 layout_arg_1(&cum, info, kind);
1219                 break;
1220             default:
1221                 qemu_build_not_reached();
1222             }
1223             break;
1224 
1225         case TCG_TYPE_I64:
1226             switch (TCG_TARGET_CALL_ARG_I64) {
1227             case TCG_CALL_ARG_EVEN:
1228                 layout_arg_even(&cum);
1229                 /* fall through */
1230             case TCG_CALL_ARG_NORMAL:
1231                 if (TCG_TARGET_REG_BITS == 32) {
1232                     layout_arg_normal_n(&cum, info, 2);
1233                 } else {
1234                     layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
1235                 }
1236                 break;
1237             default:
1238                 qemu_build_not_reached();
1239             }
1240             break;
1241 
1242         case TCG_TYPE_I128:
1243             switch (TCG_TARGET_CALL_ARG_I128) {
1244             case TCG_CALL_ARG_EVEN:
1245                 layout_arg_even(&cum);
1246                 /* fall through */
1247             case TCG_CALL_ARG_NORMAL:
1248                 layout_arg_normal_n(&cum, info, 128 / TCG_TARGET_REG_BITS);
1249                 break;
1250             case TCG_CALL_ARG_BY_REF:
1251                 layout_arg_by_ref(&cum, info);
1252                 break;
1253             default:
1254                 qemu_build_not_reached();
1255             }
1256             break;
1257 
1258         default:
1259             g_assert_not_reached();
1260         }
1261     }
1262     info->nr_in = cum.info_in_idx;
1263 
1264     /* Validate that we didn't overrun the input array. */
1265     assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
1266     /* Validate the backend has enough argument space. */
1267     assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
1268 
1269     /*
1270      * Relocate the "ref_slot" area to the end of the parameters.
1271      * Minimizing this stack offset helps code size for x86,
1272      * which has a signed 8-bit offset encoding.
1273      */
1274     if (cum.ref_slot != 0) {
1275         int ref_base = 0;
1276 
1277         if (cum.arg_slot > max_reg_slots) {
1278             int align = __alignof(Int128) / sizeof(tcg_target_long);
1279 
1280             ref_base = cum.arg_slot - max_reg_slots;
1281             if (align > 1) {
1282                 ref_base = ROUND_UP(ref_base, align);
1283             }
1284         }
1285         assert(ref_base + cum.ref_slot <= max_stk_slots);
1286         ref_base += max_reg_slots;
1287 
1288         if (ref_base != 0) {
1289             for (int i = cum.info_in_idx - 1; i >= 0; --i) {
1290                 TCGCallArgumentLoc *loc = &info->in[i];
1291                 switch (loc->kind) {
1292                 case TCG_CALL_ARG_BY_REF:
1293                 case TCG_CALL_ARG_BY_REF_N:
1294                     loc->ref_slot += ref_base;
1295                     break;
1296                 default:
1297                     break;
1298                 }
1299             }
1300         }
1301     }
1302 }
1303 
1304 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
1305 static void process_op_defs(TCGContext *s);
1306 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1307                                             TCGReg reg, const char *name);
1308 
1309 static void tcg_context_init(unsigned max_cpus)
1310 {
1311     TCGContext *s = &tcg_init_ctx;
1312     int op, total_args, n, i;
1313     TCGOpDef *def;
1314     TCGArgConstraint *args_ct;
1315     TCGTemp *ts;
1316 
1317     memset(s, 0, sizeof(*s));
1318     s->nb_globals = 0;
1319 
1320     /* Count total number of arguments and allocate the corresponding
1321        space */
1322     total_args = 0;
1323     for(op = 0; op < NB_OPS; op++) {
1324         def = &tcg_op_defs[op];
1325         n = def->nb_iargs + def->nb_oargs;
1326         total_args += n;
1327     }
1328 
1329     args_ct = g_new0(TCGArgConstraint, total_args);
1330 
1331     for(op = 0; op < NB_OPS; op++) {
1332         def = &tcg_op_defs[op];
1333         def->args_ct = args_ct;
1334         n = def->nb_iargs + def->nb_oargs;
1335         args_ct += n;
1336     }
1337 
1338     init_call_layout(&info_helper_ld32_mmu);
1339     init_call_layout(&info_helper_ld64_mmu);
1340     init_call_layout(&info_helper_ld128_mmu);
1341     init_call_layout(&info_helper_st32_mmu);
1342     init_call_layout(&info_helper_st64_mmu);
1343     init_call_layout(&info_helper_st128_mmu);
1344 
1345     tcg_target_init(s);
1346     process_op_defs(s);
1347 
1348     /* Reverse the order of the saved registers, assuming they're all at
1349        the start of tcg_target_reg_alloc_order.  */
1350     for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
1351         int r = tcg_target_reg_alloc_order[n];
1352         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
1353             break;
1354         }
1355     }
1356     for (i = 0; i < n; ++i) {
1357         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
1358     }
1359     for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
1360         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
1361     }
1362 
1363     alloc_tcg_plugin_context(s);
1364 
1365     tcg_ctx = s;
1366     /*
1367      * In user-mode we simply share the init context among threads, since we
1368      * use a single region. See the documentation tcg_region_init() for the
1369      * reasoning behind this.
1370      * In system-mode we will have at most max_cpus TCG threads.
1371      */
1372 #ifdef CONFIG_USER_ONLY
1373     tcg_ctxs = &tcg_ctx;
1374     tcg_cur_ctxs = 1;
1375     tcg_max_ctxs = 1;
1376 #else
1377     tcg_max_ctxs = max_cpus;
1378     tcg_ctxs = g_new0(TCGContext *, max_cpus);
1379 #endif
1380 
1381     tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
1382     ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
1383     tcg_env = temp_tcgv_ptr(ts);
1384 }
1385 
1386 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
1387 {
1388     tcg_context_init(max_cpus);
1389     tcg_region_init(tb_size, splitwx, max_cpus);
1390 }
1391 
1392 /*
1393  * Allocate TBs right before their corresponding translated code, making
1394  * sure that TBs and code are on different cache lines.
1395  */
1396 TranslationBlock *tcg_tb_alloc(TCGContext *s)
1397 {
1398     uintptr_t align = qemu_icache_linesize;
1399     TranslationBlock *tb;
1400     void *next;
1401 
1402  retry:
1403     tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
1404     next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
1405 
1406     if (unlikely(next > s->code_gen_highwater)) {
1407         if (tcg_region_alloc(s)) {
1408             return NULL;
1409         }
1410         goto retry;
1411     }
1412     qatomic_set(&s->code_gen_ptr, next);
1413     s->data_gen_ptr = NULL;
1414     return tb;
1415 }
1416 
1417 void tcg_prologue_init(void)
1418 {
1419     TCGContext *s = tcg_ctx;
1420     size_t prologue_size;
1421 
1422     s->code_ptr = s->code_gen_ptr;
1423     s->code_buf = s->code_gen_ptr;
1424     s->data_gen_ptr = NULL;
1425 
1426 #ifndef CONFIG_TCG_INTERPRETER
1427     tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
1428 #endif
1429 
1430 #ifdef TCG_TARGET_NEED_POOL_LABELS
1431     s->pool_labels = NULL;
1432 #endif
1433 
1434     qemu_thread_jit_write();
1435     /* Generate the prologue.  */
1436     tcg_target_qemu_prologue(s);
1437 
1438 #ifdef TCG_TARGET_NEED_POOL_LABELS
1439     /* Allow the prologue to put e.g. guest_base into a pool entry.  */
1440     {
1441         int result = tcg_out_pool_finalize(s);
1442         tcg_debug_assert(result == 0);
1443     }
1444 #endif
1445 
1446     prologue_size = tcg_current_code_size(s);
1447     perf_report_prologue(s->code_gen_ptr, prologue_size);
1448 
1449 #ifndef CONFIG_TCG_INTERPRETER
1450     flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
1451                         (uintptr_t)s->code_buf, prologue_size);
1452 #endif
1453 
1454     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1455         FILE *logfile = qemu_log_trylock();
1456         if (logfile) {
1457             fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size);
1458             if (s->data_gen_ptr) {
1459                 size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
1460                 size_t data_size = prologue_size - code_size;
1461                 size_t i;
1462 
1463                 disas(logfile, s->code_gen_ptr, code_size);
1464 
1465                 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1466                     if (sizeof(tcg_target_ulong) == 8) {
1467                         fprintf(logfile,
1468                                 "0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1469                                 (uintptr_t)s->data_gen_ptr + i,
1470                                 *(uint64_t *)(s->data_gen_ptr + i));
1471                     } else {
1472                         fprintf(logfile,
1473                                 "0x%08" PRIxPTR ":  .long  0x%08x\n",
1474                                 (uintptr_t)s->data_gen_ptr + i,
1475                                 *(uint32_t *)(s->data_gen_ptr + i));
1476                     }
1477                 }
1478             } else {
1479                 disas(logfile, s->code_gen_ptr, prologue_size);
1480             }
1481             fprintf(logfile, "\n");
1482             qemu_log_unlock(logfile);
1483         }
1484     }
1485 
1486 #ifndef CONFIG_TCG_INTERPRETER
1487     /*
1488      * Assert that goto_ptr is implemented completely, setting an epilogue.
1489      * For tci, we use NULL as the signal to return from the interpreter,
1490      * so skip this check.
1491      */
1492     tcg_debug_assert(tcg_code_gen_epilogue != NULL);
1493 #endif
1494 
1495     tcg_region_prologue_set(s);
1496 }
1497 
1498 void tcg_func_start(TCGContext *s)
1499 {
1500     tcg_pool_reset(s);
1501     s->nb_temps = s->nb_globals;
1502 
1503     /* No temps have been previously allocated for size or locality.  */
1504     memset(s->free_temps, 0, sizeof(s->free_temps));
1505 
1506     /* No constant temps have been previously allocated. */
1507     for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
1508         if (s->const_table[i]) {
1509             g_hash_table_remove_all(s->const_table[i]);
1510         }
1511     }
1512 
1513     s->nb_ops = 0;
1514     s->nb_labels = 0;
1515     s->current_frame_offset = s->frame_start;
1516 
1517 #ifdef CONFIG_DEBUG_TCG
1518     s->goto_tb_issue_mask = 0;
1519 #endif
1520 
1521     QTAILQ_INIT(&s->ops);
1522     QTAILQ_INIT(&s->free_ops);
1523     QSIMPLEQ_INIT(&s->labels);
1524 
1525     tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
1526                      s->addr_type == TCG_TYPE_I64);
1527 
1528     tcg_debug_assert(s->insn_start_words > 0);
1529 }
1530 
1531 static TCGTemp *tcg_temp_alloc(TCGContext *s)
1532 {
1533     int n = s->nb_temps++;
1534 
1535     if (n >= TCG_MAX_TEMPS) {
1536         tcg_raise_tb_overflow(s);
1537     }
1538     return memset(&s->temps[n], 0, sizeof(TCGTemp));
1539 }
1540 
1541 static TCGTemp *tcg_global_alloc(TCGContext *s)
1542 {
1543     TCGTemp *ts;
1544 
1545     tcg_debug_assert(s->nb_globals == s->nb_temps);
1546     tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
1547     s->nb_globals++;
1548     ts = tcg_temp_alloc(s);
1549     ts->kind = TEMP_GLOBAL;
1550 
1551     return ts;
1552 }
1553 
1554 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1555                                             TCGReg reg, const char *name)
1556 {
1557     TCGTemp *ts;
1558 
1559     tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1560 
1561     ts = tcg_global_alloc(s);
1562     ts->base_type = type;
1563     ts->type = type;
1564     ts->kind = TEMP_FIXED;
1565     ts->reg = reg;
1566     ts->name = name;
1567     tcg_regset_set_reg(s->reserved_regs, reg);
1568 
1569     return ts;
1570 }
1571 
1572 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
1573 {
1574     s->frame_start = start;
1575     s->frame_end = start + size;
1576     s->frame_temp
1577         = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
1578 }
1579 
1580 static TCGTemp *tcg_global_mem_new_internal(TCGv_ptr base, intptr_t offset,
1581                                             const char *name, TCGType type)
1582 {
1583     TCGContext *s = tcg_ctx;
1584     TCGTemp *base_ts = tcgv_ptr_temp(base);
1585     TCGTemp *ts = tcg_global_alloc(s);
1586     int indirect_reg = 0;
1587 
1588     switch (base_ts->kind) {
1589     case TEMP_FIXED:
1590         break;
1591     case TEMP_GLOBAL:
1592         /* We do not support double-indirect registers.  */
1593         tcg_debug_assert(!base_ts->indirect_reg);
1594         base_ts->indirect_base = 1;
1595         s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1596                             ? 2 : 1);
1597         indirect_reg = 1;
1598         break;
1599     default:
1600         g_assert_not_reached();
1601     }
1602 
1603     if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1604         TCGTemp *ts2 = tcg_global_alloc(s);
1605         char buf[64];
1606 
1607         ts->base_type = TCG_TYPE_I64;
1608         ts->type = TCG_TYPE_I32;
1609         ts->indirect_reg = indirect_reg;
1610         ts->mem_allocated = 1;
1611         ts->mem_base = base_ts;
1612         ts->mem_offset = offset;
1613         pstrcpy(buf, sizeof(buf), name);
1614         pstrcat(buf, sizeof(buf), "_0");
1615         ts->name = strdup(buf);
1616 
1617         tcg_debug_assert(ts2 == ts + 1);
1618         ts2->base_type = TCG_TYPE_I64;
1619         ts2->type = TCG_TYPE_I32;
1620         ts2->indirect_reg = indirect_reg;
1621         ts2->mem_allocated = 1;
1622         ts2->mem_base = base_ts;
1623         ts2->mem_offset = offset + 4;
1624         ts2->temp_subindex = 1;
1625         pstrcpy(buf, sizeof(buf), name);
1626         pstrcat(buf, sizeof(buf), "_1");
1627         ts2->name = strdup(buf);
1628     } else {
1629         ts->base_type = type;
1630         ts->type = type;
1631         ts->indirect_reg = indirect_reg;
1632         ts->mem_allocated = 1;
1633         ts->mem_base = base_ts;
1634         ts->mem_offset = offset;
1635         ts->name = name;
1636     }
1637     return ts;
1638 }
1639 
1640 TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t off, const char *name)
1641 {
1642     TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_I32);
1643     return temp_tcgv_i32(ts);
1644 }
1645 
1646 TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t off, const char *name)
1647 {
1648     TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_I64);
1649     return temp_tcgv_i64(ts);
1650 }
1651 
1652 TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t off, const char *name)
1653 {
1654     TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_PTR);
1655     return temp_tcgv_ptr(ts);
1656 }
1657 
1658 static TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
1659 {
1660     TCGContext *s = tcg_ctx;
1661     TCGTemp *ts;
1662     int n;
1663 
1664     if (kind == TEMP_EBB) {
1665         int idx = find_first_bit(s->free_temps[type].l, TCG_MAX_TEMPS);
1666 
1667         if (idx < TCG_MAX_TEMPS) {
1668             /* There is already an available temp with the right type.  */
1669             clear_bit(idx, s->free_temps[type].l);
1670 
1671             ts = &s->temps[idx];
1672             ts->temp_allocated = 1;
1673             tcg_debug_assert(ts->base_type == type);
1674             tcg_debug_assert(ts->kind == kind);
1675             return ts;
1676         }
1677     } else {
1678         tcg_debug_assert(kind == TEMP_TB);
1679     }
1680 
1681     switch (type) {
1682     case TCG_TYPE_I32:
1683     case TCG_TYPE_V64:
1684     case TCG_TYPE_V128:
1685     case TCG_TYPE_V256:
1686         n = 1;
1687         break;
1688     case TCG_TYPE_I64:
1689         n = 64 / TCG_TARGET_REG_BITS;
1690         break;
1691     case TCG_TYPE_I128:
1692         n = 128 / TCG_TARGET_REG_BITS;
1693         break;
1694     default:
1695         g_assert_not_reached();
1696     }
1697 
1698     ts = tcg_temp_alloc(s);
1699     ts->base_type = type;
1700     ts->temp_allocated = 1;
1701     ts->kind = kind;
1702 
1703     if (n == 1) {
1704         ts->type = type;
1705     } else {
1706         ts->type = TCG_TYPE_REG;
1707 
1708         for (int i = 1; i < n; ++i) {
1709             TCGTemp *ts2 = tcg_temp_alloc(s);
1710 
1711             tcg_debug_assert(ts2 == ts + i);
1712             ts2->base_type = type;
1713             ts2->type = TCG_TYPE_REG;
1714             ts2->temp_allocated = 1;
1715             ts2->temp_subindex = i;
1716             ts2->kind = kind;
1717         }
1718     }
1719     return ts;
1720 }
1721 
1722 TCGv_i32 tcg_temp_new_i32(void)
1723 {
1724     return temp_tcgv_i32(tcg_temp_new_internal(TCG_TYPE_I32, TEMP_TB));
1725 }
1726 
1727 TCGv_i32 tcg_temp_ebb_new_i32(void)
1728 {
1729     return temp_tcgv_i32(tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB));
1730 }
1731 
1732 TCGv_i64 tcg_temp_new_i64(void)
1733 {
1734     return temp_tcgv_i64(tcg_temp_new_internal(TCG_TYPE_I64, TEMP_TB));
1735 }
1736 
1737 TCGv_i64 tcg_temp_ebb_new_i64(void)
1738 {
1739     return temp_tcgv_i64(tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB));
1740 }
1741 
1742 TCGv_ptr tcg_temp_new_ptr(void)
1743 {
1744     return temp_tcgv_ptr(tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_TB));
1745 }
1746 
1747 TCGv_ptr tcg_temp_ebb_new_ptr(void)
1748 {
1749     return temp_tcgv_ptr(tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB));
1750 }
1751 
1752 TCGv_i128 tcg_temp_new_i128(void)
1753 {
1754     return temp_tcgv_i128(tcg_temp_new_internal(TCG_TYPE_I128, TEMP_TB));
1755 }
1756 
1757 TCGv_i128 tcg_temp_ebb_new_i128(void)
1758 {
1759     return temp_tcgv_i128(tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB));
1760 }
1761 
1762 TCGv_vec tcg_temp_new_vec(TCGType type)
1763 {
1764     TCGTemp *t;
1765 
1766 #ifdef CONFIG_DEBUG_TCG
1767     switch (type) {
1768     case TCG_TYPE_V64:
1769         assert(TCG_TARGET_HAS_v64);
1770         break;
1771     case TCG_TYPE_V128:
1772         assert(TCG_TARGET_HAS_v128);
1773         break;
1774     case TCG_TYPE_V256:
1775         assert(TCG_TARGET_HAS_v256);
1776         break;
1777     default:
1778         g_assert_not_reached();
1779     }
1780 #endif
1781 
1782     t = tcg_temp_new_internal(type, TEMP_EBB);
1783     return temp_tcgv_vec(t);
1784 }
1785 
1786 /* Create a new temp of the same type as an existing temp.  */
1787 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1788 {
1789     TCGTemp *t = tcgv_vec_temp(match);
1790 
1791     tcg_debug_assert(t->temp_allocated != 0);
1792 
1793     t = tcg_temp_new_internal(t->base_type, TEMP_EBB);
1794     return temp_tcgv_vec(t);
1795 }
1796 
1797 void tcg_temp_free_internal(TCGTemp *ts)
1798 {
1799     TCGContext *s = tcg_ctx;
1800 
1801     switch (ts->kind) {
1802     case TEMP_CONST:
1803     case TEMP_TB:
1804         /* Silently ignore free. */
1805         break;
1806     case TEMP_EBB:
1807         tcg_debug_assert(ts->temp_allocated != 0);
1808         ts->temp_allocated = 0;
1809         set_bit(temp_idx(ts), s->free_temps[ts->base_type].l);
1810         break;
1811     default:
1812         /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
1813         g_assert_not_reached();
1814     }
1815 }
1816 
1817 void tcg_temp_free_i32(TCGv_i32 arg)
1818 {
1819     tcg_temp_free_internal(tcgv_i32_temp(arg));
1820 }
1821 
1822 void tcg_temp_free_i64(TCGv_i64 arg)
1823 {
1824     tcg_temp_free_internal(tcgv_i64_temp(arg));
1825 }
1826 
1827 void tcg_temp_free_i128(TCGv_i128 arg)
1828 {
1829     tcg_temp_free_internal(tcgv_i128_temp(arg));
1830 }
1831 
1832 void tcg_temp_free_ptr(TCGv_ptr arg)
1833 {
1834     tcg_temp_free_internal(tcgv_ptr_temp(arg));
1835 }
1836 
1837 void tcg_temp_free_vec(TCGv_vec arg)
1838 {
1839     tcg_temp_free_internal(tcgv_vec_temp(arg));
1840 }
1841 
1842 TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
1843 {
1844     TCGContext *s = tcg_ctx;
1845     GHashTable *h = s->const_table[type];
1846     TCGTemp *ts;
1847 
1848     if (h == NULL) {
1849         h = g_hash_table_new(g_int64_hash, g_int64_equal);
1850         s->const_table[type] = h;
1851     }
1852 
1853     ts = g_hash_table_lookup(h, &val);
1854     if (ts == NULL) {
1855         int64_t *val_ptr;
1856 
1857         ts = tcg_temp_alloc(s);
1858 
1859         if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1860             TCGTemp *ts2 = tcg_temp_alloc(s);
1861 
1862             tcg_debug_assert(ts2 == ts + 1);
1863 
1864             ts->base_type = TCG_TYPE_I64;
1865             ts->type = TCG_TYPE_I32;
1866             ts->kind = TEMP_CONST;
1867             ts->temp_allocated = 1;
1868 
1869             ts2->base_type = TCG_TYPE_I64;
1870             ts2->type = TCG_TYPE_I32;
1871             ts2->kind = TEMP_CONST;
1872             ts2->temp_allocated = 1;
1873             ts2->temp_subindex = 1;
1874 
1875             /*
1876              * Retain the full value of the 64-bit constant in the low
1877              * part, so that the hash table works.  Actual uses will
1878              * truncate the value to the low part.
1879              */
1880             ts[HOST_BIG_ENDIAN].val = val;
1881             ts[!HOST_BIG_ENDIAN].val = val >> 32;
1882             val_ptr = &ts[HOST_BIG_ENDIAN].val;
1883         } else {
1884             ts->base_type = type;
1885             ts->type = type;
1886             ts->kind = TEMP_CONST;
1887             ts->temp_allocated = 1;
1888             ts->val = val;
1889             val_ptr = &ts->val;
1890         }
1891         g_hash_table_insert(h, val_ptr, ts);
1892     }
1893 
1894     return ts;
1895 }
1896 
1897 TCGv_i32 tcg_constant_i32(int32_t val)
1898 {
1899     return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
1900 }
1901 
1902 TCGv_i64 tcg_constant_i64(int64_t val)
1903 {
1904     return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
1905 }
1906 
1907 TCGv_ptr tcg_constant_ptr_int(intptr_t val)
1908 {
1909     return temp_tcgv_ptr(tcg_constant_internal(TCG_TYPE_PTR, val));
1910 }
1911 
1912 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
1913 {
1914     val = dup_const(vece, val);
1915     return temp_tcgv_vec(tcg_constant_internal(type, val));
1916 }
1917 
1918 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
1919 {
1920     TCGTemp *t = tcgv_vec_temp(match);
1921 
1922     tcg_debug_assert(t->temp_allocated != 0);
1923     return tcg_constant_vec(t->base_type, vece, val);
1924 }
1925 
1926 #ifdef CONFIG_DEBUG_TCG
1927 size_t temp_idx(TCGTemp *ts)
1928 {
1929     ptrdiff_t n = ts - tcg_ctx->temps;
1930     assert(n >= 0 && n < tcg_ctx->nb_temps);
1931     return n;
1932 }
1933 
1934 TCGTemp *tcgv_i32_temp(TCGv_i32 v)
1935 {
1936     uintptr_t o = (uintptr_t)v - offsetof(TCGContext, temps);
1937 
1938     assert(o < sizeof(TCGTemp) * tcg_ctx->nb_temps);
1939     assert(o % sizeof(TCGTemp) == 0);
1940 
1941     return (void *)tcg_ctx + (uintptr_t)v;
1942 }
1943 #endif /* CONFIG_DEBUG_TCG */
1944 
1945 /* Return true if OP may appear in the opcode stream.
1946    Test the runtime variable that controls each opcode.  */
1947 bool tcg_op_supported(TCGOpcode op)
1948 {
1949     const bool have_vec
1950         = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1951 
1952     switch (op) {
1953     case INDEX_op_discard:
1954     case INDEX_op_set_label:
1955     case INDEX_op_call:
1956     case INDEX_op_br:
1957     case INDEX_op_mb:
1958     case INDEX_op_insn_start:
1959     case INDEX_op_exit_tb:
1960     case INDEX_op_goto_tb:
1961     case INDEX_op_goto_ptr:
1962     case INDEX_op_qemu_ld_a32_i32:
1963     case INDEX_op_qemu_ld_a64_i32:
1964     case INDEX_op_qemu_st_a32_i32:
1965     case INDEX_op_qemu_st_a64_i32:
1966     case INDEX_op_qemu_ld_a32_i64:
1967     case INDEX_op_qemu_ld_a64_i64:
1968     case INDEX_op_qemu_st_a32_i64:
1969     case INDEX_op_qemu_st_a64_i64:
1970         return true;
1971 
1972     case INDEX_op_qemu_st8_a32_i32:
1973     case INDEX_op_qemu_st8_a64_i32:
1974         return TCG_TARGET_HAS_qemu_st8_i32;
1975 
1976     case INDEX_op_qemu_ld_a32_i128:
1977     case INDEX_op_qemu_ld_a64_i128:
1978     case INDEX_op_qemu_st_a32_i128:
1979     case INDEX_op_qemu_st_a64_i128:
1980         return TCG_TARGET_HAS_qemu_ldst_i128;
1981 
1982     case INDEX_op_mov_i32:
1983     case INDEX_op_setcond_i32:
1984     case INDEX_op_brcond_i32:
1985     case INDEX_op_movcond_i32:
1986     case INDEX_op_ld8u_i32:
1987     case INDEX_op_ld8s_i32:
1988     case INDEX_op_ld16u_i32:
1989     case INDEX_op_ld16s_i32:
1990     case INDEX_op_ld_i32:
1991     case INDEX_op_st8_i32:
1992     case INDEX_op_st16_i32:
1993     case INDEX_op_st_i32:
1994     case INDEX_op_add_i32:
1995     case INDEX_op_sub_i32:
1996     case INDEX_op_neg_i32:
1997     case INDEX_op_mul_i32:
1998     case INDEX_op_and_i32:
1999     case INDEX_op_or_i32:
2000     case INDEX_op_xor_i32:
2001     case INDEX_op_shl_i32:
2002     case INDEX_op_shr_i32:
2003     case INDEX_op_sar_i32:
2004         return true;
2005 
2006     case INDEX_op_negsetcond_i32:
2007         return TCG_TARGET_HAS_negsetcond_i32;
2008     case INDEX_op_div_i32:
2009     case INDEX_op_divu_i32:
2010         return TCG_TARGET_HAS_div_i32;
2011     case INDEX_op_rem_i32:
2012     case INDEX_op_remu_i32:
2013         return TCG_TARGET_HAS_rem_i32;
2014     case INDEX_op_div2_i32:
2015     case INDEX_op_divu2_i32:
2016         return TCG_TARGET_HAS_div2_i32;
2017     case INDEX_op_rotl_i32:
2018     case INDEX_op_rotr_i32:
2019         return TCG_TARGET_HAS_rot_i32;
2020     case INDEX_op_deposit_i32:
2021         return TCG_TARGET_HAS_deposit_i32;
2022     case INDEX_op_extract_i32:
2023         return TCG_TARGET_HAS_extract_i32;
2024     case INDEX_op_sextract_i32:
2025         return TCG_TARGET_HAS_sextract_i32;
2026     case INDEX_op_extract2_i32:
2027         return TCG_TARGET_HAS_extract2_i32;
2028     case INDEX_op_add2_i32:
2029         return TCG_TARGET_HAS_add2_i32;
2030     case INDEX_op_sub2_i32:
2031         return TCG_TARGET_HAS_sub2_i32;
2032     case INDEX_op_mulu2_i32:
2033         return TCG_TARGET_HAS_mulu2_i32;
2034     case INDEX_op_muls2_i32:
2035         return TCG_TARGET_HAS_muls2_i32;
2036     case INDEX_op_muluh_i32:
2037         return TCG_TARGET_HAS_muluh_i32;
2038     case INDEX_op_mulsh_i32:
2039         return TCG_TARGET_HAS_mulsh_i32;
2040     case INDEX_op_ext8s_i32:
2041         return TCG_TARGET_HAS_ext8s_i32;
2042     case INDEX_op_ext16s_i32:
2043         return TCG_TARGET_HAS_ext16s_i32;
2044     case INDEX_op_ext8u_i32:
2045         return TCG_TARGET_HAS_ext8u_i32;
2046     case INDEX_op_ext16u_i32:
2047         return TCG_TARGET_HAS_ext16u_i32;
2048     case INDEX_op_bswap16_i32:
2049         return TCG_TARGET_HAS_bswap16_i32;
2050     case INDEX_op_bswap32_i32:
2051         return TCG_TARGET_HAS_bswap32_i32;
2052     case INDEX_op_not_i32:
2053         return TCG_TARGET_HAS_not_i32;
2054     case INDEX_op_andc_i32:
2055         return TCG_TARGET_HAS_andc_i32;
2056     case INDEX_op_orc_i32:
2057         return TCG_TARGET_HAS_orc_i32;
2058     case INDEX_op_eqv_i32:
2059         return TCG_TARGET_HAS_eqv_i32;
2060     case INDEX_op_nand_i32:
2061         return TCG_TARGET_HAS_nand_i32;
2062     case INDEX_op_nor_i32:
2063         return TCG_TARGET_HAS_nor_i32;
2064     case INDEX_op_clz_i32:
2065         return TCG_TARGET_HAS_clz_i32;
2066     case INDEX_op_ctz_i32:
2067         return TCG_TARGET_HAS_ctz_i32;
2068     case INDEX_op_ctpop_i32:
2069         return TCG_TARGET_HAS_ctpop_i32;
2070 
2071     case INDEX_op_brcond2_i32:
2072     case INDEX_op_setcond2_i32:
2073         return TCG_TARGET_REG_BITS == 32;
2074 
2075     case INDEX_op_mov_i64:
2076     case INDEX_op_setcond_i64:
2077     case INDEX_op_brcond_i64:
2078     case INDEX_op_movcond_i64:
2079     case INDEX_op_ld8u_i64:
2080     case INDEX_op_ld8s_i64:
2081     case INDEX_op_ld16u_i64:
2082     case INDEX_op_ld16s_i64:
2083     case INDEX_op_ld32u_i64:
2084     case INDEX_op_ld32s_i64:
2085     case INDEX_op_ld_i64:
2086     case INDEX_op_st8_i64:
2087     case INDEX_op_st16_i64:
2088     case INDEX_op_st32_i64:
2089     case INDEX_op_st_i64:
2090     case INDEX_op_add_i64:
2091     case INDEX_op_sub_i64:
2092     case INDEX_op_neg_i64:
2093     case INDEX_op_mul_i64:
2094     case INDEX_op_and_i64:
2095     case INDEX_op_or_i64:
2096     case INDEX_op_xor_i64:
2097     case INDEX_op_shl_i64:
2098     case INDEX_op_shr_i64:
2099     case INDEX_op_sar_i64:
2100     case INDEX_op_ext_i32_i64:
2101     case INDEX_op_extu_i32_i64:
2102         return TCG_TARGET_REG_BITS == 64;
2103 
2104     case INDEX_op_negsetcond_i64:
2105         return TCG_TARGET_HAS_negsetcond_i64;
2106     case INDEX_op_div_i64:
2107     case INDEX_op_divu_i64:
2108         return TCG_TARGET_HAS_div_i64;
2109     case INDEX_op_rem_i64:
2110     case INDEX_op_remu_i64:
2111         return TCG_TARGET_HAS_rem_i64;
2112     case INDEX_op_div2_i64:
2113     case INDEX_op_divu2_i64:
2114         return TCG_TARGET_HAS_div2_i64;
2115     case INDEX_op_rotl_i64:
2116     case INDEX_op_rotr_i64:
2117         return TCG_TARGET_HAS_rot_i64;
2118     case INDEX_op_deposit_i64:
2119         return TCG_TARGET_HAS_deposit_i64;
2120     case INDEX_op_extract_i64:
2121         return TCG_TARGET_HAS_extract_i64;
2122     case INDEX_op_sextract_i64:
2123         return TCG_TARGET_HAS_sextract_i64;
2124     case INDEX_op_extract2_i64:
2125         return TCG_TARGET_HAS_extract2_i64;
2126     case INDEX_op_extrl_i64_i32:
2127     case INDEX_op_extrh_i64_i32:
2128         return TCG_TARGET_HAS_extr_i64_i32;
2129     case INDEX_op_ext8s_i64:
2130         return TCG_TARGET_HAS_ext8s_i64;
2131     case INDEX_op_ext16s_i64:
2132         return TCG_TARGET_HAS_ext16s_i64;
2133     case INDEX_op_ext32s_i64:
2134         return TCG_TARGET_HAS_ext32s_i64;
2135     case INDEX_op_ext8u_i64:
2136         return TCG_TARGET_HAS_ext8u_i64;
2137     case INDEX_op_ext16u_i64:
2138         return TCG_TARGET_HAS_ext16u_i64;
2139     case INDEX_op_ext32u_i64:
2140         return TCG_TARGET_HAS_ext32u_i64;
2141     case INDEX_op_bswap16_i64:
2142         return TCG_TARGET_HAS_bswap16_i64;
2143     case INDEX_op_bswap32_i64:
2144         return TCG_TARGET_HAS_bswap32_i64;
2145     case INDEX_op_bswap64_i64:
2146         return TCG_TARGET_HAS_bswap64_i64;
2147     case INDEX_op_not_i64:
2148         return TCG_TARGET_HAS_not_i64;
2149     case INDEX_op_andc_i64:
2150         return TCG_TARGET_HAS_andc_i64;
2151     case INDEX_op_orc_i64:
2152         return TCG_TARGET_HAS_orc_i64;
2153     case INDEX_op_eqv_i64:
2154         return TCG_TARGET_HAS_eqv_i64;
2155     case INDEX_op_nand_i64:
2156         return TCG_TARGET_HAS_nand_i64;
2157     case INDEX_op_nor_i64:
2158         return TCG_TARGET_HAS_nor_i64;
2159     case INDEX_op_clz_i64:
2160         return TCG_TARGET_HAS_clz_i64;
2161     case INDEX_op_ctz_i64:
2162         return TCG_TARGET_HAS_ctz_i64;
2163     case INDEX_op_ctpop_i64:
2164         return TCG_TARGET_HAS_ctpop_i64;
2165     case INDEX_op_add2_i64:
2166         return TCG_TARGET_HAS_add2_i64;
2167     case INDEX_op_sub2_i64:
2168         return TCG_TARGET_HAS_sub2_i64;
2169     case INDEX_op_mulu2_i64:
2170         return TCG_TARGET_HAS_mulu2_i64;
2171     case INDEX_op_muls2_i64:
2172         return TCG_TARGET_HAS_muls2_i64;
2173     case INDEX_op_muluh_i64:
2174         return TCG_TARGET_HAS_muluh_i64;
2175     case INDEX_op_mulsh_i64:
2176         return TCG_TARGET_HAS_mulsh_i64;
2177 
2178     case INDEX_op_mov_vec:
2179     case INDEX_op_dup_vec:
2180     case INDEX_op_dupm_vec:
2181     case INDEX_op_ld_vec:
2182     case INDEX_op_st_vec:
2183     case INDEX_op_add_vec:
2184     case INDEX_op_sub_vec:
2185     case INDEX_op_and_vec:
2186     case INDEX_op_or_vec:
2187     case INDEX_op_xor_vec:
2188     case INDEX_op_cmp_vec:
2189         return have_vec;
2190     case INDEX_op_dup2_vec:
2191         return have_vec && TCG_TARGET_REG_BITS == 32;
2192     case INDEX_op_not_vec:
2193         return have_vec && TCG_TARGET_HAS_not_vec;
2194     case INDEX_op_neg_vec:
2195         return have_vec && TCG_TARGET_HAS_neg_vec;
2196     case INDEX_op_abs_vec:
2197         return have_vec && TCG_TARGET_HAS_abs_vec;
2198     case INDEX_op_andc_vec:
2199         return have_vec && TCG_TARGET_HAS_andc_vec;
2200     case INDEX_op_orc_vec:
2201         return have_vec && TCG_TARGET_HAS_orc_vec;
2202     case INDEX_op_nand_vec:
2203         return have_vec && TCG_TARGET_HAS_nand_vec;
2204     case INDEX_op_nor_vec:
2205         return have_vec && TCG_TARGET_HAS_nor_vec;
2206     case INDEX_op_eqv_vec:
2207         return have_vec && TCG_TARGET_HAS_eqv_vec;
2208     case INDEX_op_mul_vec:
2209         return have_vec && TCG_TARGET_HAS_mul_vec;
2210     case INDEX_op_shli_vec:
2211     case INDEX_op_shri_vec:
2212     case INDEX_op_sari_vec:
2213         return have_vec && TCG_TARGET_HAS_shi_vec;
2214     case INDEX_op_shls_vec:
2215     case INDEX_op_shrs_vec:
2216     case INDEX_op_sars_vec:
2217         return have_vec && TCG_TARGET_HAS_shs_vec;
2218     case INDEX_op_shlv_vec:
2219     case INDEX_op_shrv_vec:
2220     case INDEX_op_sarv_vec:
2221         return have_vec && TCG_TARGET_HAS_shv_vec;
2222     case INDEX_op_rotli_vec:
2223         return have_vec && TCG_TARGET_HAS_roti_vec;
2224     case INDEX_op_rotls_vec:
2225         return have_vec && TCG_TARGET_HAS_rots_vec;
2226     case INDEX_op_rotlv_vec:
2227     case INDEX_op_rotrv_vec:
2228         return have_vec && TCG_TARGET_HAS_rotv_vec;
2229     case INDEX_op_ssadd_vec:
2230     case INDEX_op_usadd_vec:
2231     case INDEX_op_sssub_vec:
2232     case INDEX_op_ussub_vec:
2233         return have_vec && TCG_TARGET_HAS_sat_vec;
2234     case INDEX_op_smin_vec:
2235     case INDEX_op_umin_vec:
2236     case INDEX_op_smax_vec:
2237     case INDEX_op_umax_vec:
2238         return have_vec && TCG_TARGET_HAS_minmax_vec;
2239     case INDEX_op_bitsel_vec:
2240         return have_vec && TCG_TARGET_HAS_bitsel_vec;
2241     case INDEX_op_cmpsel_vec:
2242         return have_vec && TCG_TARGET_HAS_cmpsel_vec;
2243 
2244     default:
2245         tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
2246         return true;
2247     }
2248 }
2249 
2250 static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
2251 
2252 static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args)
2253 {
2254     TCGv_i64 extend_free[MAX_CALL_IARGS];
2255     int n_extend = 0;
2256     TCGOp *op;
2257     int i, n, pi = 0, total_args;
2258 
2259     if (unlikely(g_once_init_enter(HELPER_INFO_INIT(info)))) {
2260         init_call_layout(info);
2261         g_once_init_leave(HELPER_INFO_INIT(info), HELPER_INFO_INIT_VAL(info));
2262     }
2263 
2264     total_args = info->nr_out + info->nr_in + 2;
2265     op = tcg_op_alloc(INDEX_op_call, total_args);
2266 
2267 #ifdef CONFIG_PLUGIN
2268     /* Flag helpers that may affect guest state */
2269     if (tcg_ctx->plugin_insn &&
2270         !(info->flags & TCG_CALL_PLUGIN) &&
2271         !(info->flags & TCG_CALL_NO_SIDE_EFFECTS)) {
2272         tcg_ctx->plugin_insn->calls_helpers = true;
2273     }
2274 #endif
2275 
2276     TCGOP_CALLO(op) = n = info->nr_out;
2277     switch (n) {
2278     case 0:
2279         tcg_debug_assert(ret == NULL);
2280         break;
2281     case 1:
2282         tcg_debug_assert(ret != NULL);
2283         op->args[pi++] = temp_arg(ret);
2284         break;
2285     case 2:
2286     case 4:
2287         tcg_debug_assert(ret != NULL);
2288         tcg_debug_assert(ret->base_type == ret->type + ctz32(n));
2289         tcg_debug_assert(ret->temp_subindex == 0);
2290         for (i = 0; i < n; ++i) {
2291             op->args[pi++] = temp_arg(ret + i);
2292         }
2293         break;
2294     default:
2295         g_assert_not_reached();
2296     }
2297 
2298     TCGOP_CALLI(op) = n = info->nr_in;
2299     for (i = 0; i < n; i++) {
2300         const TCGCallArgumentLoc *loc = &info->in[i];
2301         TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex;
2302 
2303         switch (loc->kind) {
2304         case TCG_CALL_ARG_NORMAL:
2305         case TCG_CALL_ARG_BY_REF:
2306         case TCG_CALL_ARG_BY_REF_N:
2307             op->args[pi++] = temp_arg(ts);
2308             break;
2309 
2310         case TCG_CALL_ARG_EXTEND_U:
2311         case TCG_CALL_ARG_EXTEND_S:
2312             {
2313                 TCGv_i64 temp = tcg_temp_ebb_new_i64();
2314                 TCGv_i32 orig = temp_tcgv_i32(ts);
2315 
2316                 if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
2317                     tcg_gen_ext_i32_i64(temp, orig);
2318                 } else {
2319                     tcg_gen_extu_i32_i64(temp, orig);
2320                 }
2321                 op->args[pi++] = tcgv_i64_arg(temp);
2322                 extend_free[n_extend++] = temp;
2323             }
2324             break;
2325 
2326         default:
2327             g_assert_not_reached();
2328         }
2329     }
2330     op->args[pi++] = (uintptr_t)info->func;
2331     op->args[pi++] = (uintptr_t)info;
2332     tcg_debug_assert(pi == total_args);
2333 
2334     QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2335 
2336     tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free));
2337     for (i = 0; i < n_extend; ++i) {
2338         tcg_temp_free_i64(extend_free[i]);
2339     }
2340 }
2341 
2342 void tcg_gen_call0(TCGHelperInfo *info, TCGTemp *ret)
2343 {
2344     tcg_gen_callN(info, ret, NULL);
2345 }
2346 
2347 void tcg_gen_call1(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1)
2348 {
2349     tcg_gen_callN(info, ret, &t1);
2350 }
2351 
2352 void tcg_gen_call2(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2)
2353 {
2354     TCGTemp *args[2] = { t1, t2 };
2355     tcg_gen_callN(info, ret, args);
2356 }
2357 
2358 void tcg_gen_call3(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
2359                    TCGTemp *t2, TCGTemp *t3)
2360 {
2361     TCGTemp *args[3] = { t1, t2, t3 };
2362     tcg_gen_callN(info, ret, args);
2363 }
2364 
2365 void tcg_gen_call4(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
2366                    TCGTemp *t2, TCGTemp *t3, TCGTemp *t4)
2367 {
2368     TCGTemp *args[4] = { t1, t2, t3, t4 };
2369     tcg_gen_callN(info, ret, args);
2370 }
2371 
2372 void tcg_gen_call5(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
2373                    TCGTemp *t2, TCGTemp *t3, TCGTemp *t4, TCGTemp *t5)
2374 {
2375     TCGTemp *args[5] = { t1, t2, t3, t4, t5 };
2376     tcg_gen_callN(info, ret, args);
2377 }
2378 
2379 void tcg_gen_call6(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2,
2380                    TCGTemp *t3, TCGTemp *t4, TCGTemp *t5, TCGTemp *t6)
2381 {
2382     TCGTemp *args[6] = { t1, t2, t3, t4, t5, t6 };
2383     tcg_gen_callN(info, ret, args);
2384 }
2385 
2386 void tcg_gen_call7(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
2387                    TCGTemp *t2, TCGTemp *t3, TCGTemp *t4,
2388                    TCGTemp *t5, TCGTemp *t6, TCGTemp *t7)
2389 {
2390     TCGTemp *args[7] = { t1, t2, t3, t4, t5, t6, t7 };
2391     tcg_gen_callN(info, ret, args);
2392 }
2393 
2394 static void tcg_reg_alloc_start(TCGContext *s)
2395 {
2396     int i, n;
2397 
2398     for (i = 0, n = s->nb_temps; i < n; i++) {
2399         TCGTemp *ts = &s->temps[i];
2400         TCGTempVal val = TEMP_VAL_MEM;
2401 
2402         switch (ts->kind) {
2403         case TEMP_CONST:
2404             val = TEMP_VAL_CONST;
2405             break;
2406         case TEMP_FIXED:
2407             val = TEMP_VAL_REG;
2408             break;
2409         case TEMP_GLOBAL:
2410             break;
2411         case TEMP_EBB:
2412             val = TEMP_VAL_DEAD;
2413             /* fall through */
2414         case TEMP_TB:
2415             ts->mem_allocated = 0;
2416             break;
2417         default:
2418             g_assert_not_reached();
2419         }
2420         ts->val_type = val;
2421     }
2422 
2423     memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
2424 }
2425 
2426 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
2427                                  TCGTemp *ts)
2428 {
2429     int idx = temp_idx(ts);
2430 
2431     switch (ts->kind) {
2432     case TEMP_FIXED:
2433     case TEMP_GLOBAL:
2434         pstrcpy(buf, buf_size, ts->name);
2435         break;
2436     case TEMP_TB:
2437         snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
2438         break;
2439     case TEMP_EBB:
2440         snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
2441         break;
2442     case TEMP_CONST:
2443         switch (ts->type) {
2444         case TCG_TYPE_I32:
2445             snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
2446             break;
2447 #if TCG_TARGET_REG_BITS > 32
2448         case TCG_TYPE_I64:
2449             snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
2450             break;
2451 #endif
2452         case TCG_TYPE_V64:
2453         case TCG_TYPE_V128:
2454         case TCG_TYPE_V256:
2455             snprintf(buf, buf_size, "v%d$0x%" PRIx64,
2456                      64 << (ts->type - TCG_TYPE_V64), ts->val);
2457             break;
2458         default:
2459             g_assert_not_reached();
2460         }
2461         break;
2462     }
2463     return buf;
2464 }
2465 
2466 static char *tcg_get_arg_str(TCGContext *s, char *buf,
2467                              int buf_size, TCGArg arg)
2468 {
2469     return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
2470 }
2471 
2472 static const char * const cond_name[] =
2473 {
2474     [TCG_COND_NEVER] = "never",
2475     [TCG_COND_ALWAYS] = "always",
2476     [TCG_COND_EQ] = "eq",
2477     [TCG_COND_NE] = "ne",
2478     [TCG_COND_LT] = "lt",
2479     [TCG_COND_GE] = "ge",
2480     [TCG_COND_LE] = "le",
2481     [TCG_COND_GT] = "gt",
2482     [TCG_COND_LTU] = "ltu",
2483     [TCG_COND_GEU] = "geu",
2484     [TCG_COND_LEU] = "leu",
2485     [TCG_COND_GTU] = "gtu",
2486     [TCG_COND_TSTEQ] = "tsteq",
2487     [TCG_COND_TSTNE] = "tstne",
2488 };
2489 
2490 static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] =
2491 {
2492     [MO_UB]   = "ub",
2493     [MO_SB]   = "sb",
2494     [MO_LEUW] = "leuw",
2495     [MO_LESW] = "lesw",
2496     [MO_LEUL] = "leul",
2497     [MO_LESL] = "lesl",
2498     [MO_LEUQ] = "leq",
2499     [MO_BEUW] = "beuw",
2500     [MO_BESW] = "besw",
2501     [MO_BEUL] = "beul",
2502     [MO_BESL] = "besl",
2503     [MO_BEUQ] = "beq",
2504     [MO_128 + MO_BE] = "beo",
2505     [MO_128 + MO_LE] = "leo",
2506 };
2507 
2508 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
2509     [MO_UNALN >> MO_ASHIFT]    = "un+",
2510     [MO_ALIGN >> MO_ASHIFT]    = "al+",
2511     [MO_ALIGN_2 >> MO_ASHIFT]  = "al2+",
2512     [MO_ALIGN_4 >> MO_ASHIFT]  = "al4+",
2513     [MO_ALIGN_8 >> MO_ASHIFT]  = "al8+",
2514     [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
2515     [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
2516     [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
2517 };
2518 
2519 static const char * const atom_name[(MO_ATOM_MASK >> MO_ATOM_SHIFT) + 1] = {
2520     [MO_ATOM_IFALIGN >> MO_ATOM_SHIFT] = "",
2521     [MO_ATOM_IFALIGN_PAIR >> MO_ATOM_SHIFT] = "pair+",
2522     [MO_ATOM_WITHIN16 >> MO_ATOM_SHIFT] = "w16+",
2523     [MO_ATOM_WITHIN16_PAIR >> MO_ATOM_SHIFT] = "w16p+",
2524     [MO_ATOM_SUBALIGN >> MO_ATOM_SHIFT] = "sub+",
2525     [MO_ATOM_NONE >> MO_ATOM_SHIFT] = "noat+",
2526 };
2527 
2528 static const char bswap_flag_name[][6] = {
2529     [TCG_BSWAP_IZ] = "iz",
2530     [TCG_BSWAP_OZ] = "oz",
2531     [TCG_BSWAP_OS] = "os",
2532     [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
2533     [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
2534 };
2535 
2536 static inline bool tcg_regset_single(TCGRegSet d)
2537 {
2538     return (d & (d - 1)) == 0;
2539 }
2540 
2541 static inline TCGReg tcg_regset_first(TCGRegSet d)
2542 {
2543     if (TCG_TARGET_NB_REGS <= 32) {
2544         return ctz32(d);
2545     } else {
2546         return ctz64(d);
2547     }
2548 }
2549 
2550 /* Return only the number of characters output -- no error return. */
2551 #define ne_fprintf(...) \
2552     ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
2553 
2554 static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
2555 {
2556     char buf[128];
2557     TCGOp *op;
2558 
2559     QTAILQ_FOREACH(op, &s->ops, link) {
2560         int i, k, nb_oargs, nb_iargs, nb_cargs;
2561         const TCGOpDef *def;
2562         TCGOpcode c;
2563         int col = 0;
2564 
2565         c = op->opc;
2566         def = &tcg_op_defs[c];
2567 
2568         if (c == INDEX_op_insn_start) {
2569             nb_oargs = 0;
2570             col += ne_fprintf(f, "\n ----");
2571 
2572             for (i = 0, k = s->insn_start_words; i < k; ++i) {
2573                 col += ne_fprintf(f, " %016" PRIx64,
2574                                   tcg_get_insn_start_param(op, i));
2575             }
2576         } else if (c == INDEX_op_call) {
2577             const TCGHelperInfo *info = tcg_call_info(op);
2578             void *func = tcg_call_func(op);
2579 
2580             /* variable number of arguments */
2581             nb_oargs = TCGOP_CALLO(op);
2582             nb_iargs = TCGOP_CALLI(op);
2583             nb_cargs = def->nb_cargs;
2584 
2585             col += ne_fprintf(f, " %s ", def->name);
2586 
2587             /*
2588              * Print the function name from TCGHelperInfo, if available.
2589              * Note that plugins have a template function for the info,
2590              * but the actual function pointer comes from the plugin.
2591              */
2592             if (func == info->func) {
2593                 col += ne_fprintf(f, "%s", info->name);
2594             } else {
2595                 col += ne_fprintf(f, "plugin(%p)", func);
2596             }
2597 
2598             col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs);
2599             for (i = 0; i < nb_oargs; i++) {
2600                 col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf),
2601                                                             op->args[i]));
2602             }
2603             for (i = 0; i < nb_iargs; i++) {
2604                 TCGArg arg = op->args[nb_oargs + i];
2605                 const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
2606                 col += ne_fprintf(f, ",%s", t);
2607             }
2608         } else {
2609             col += ne_fprintf(f, " %s ", def->name);
2610 
2611             nb_oargs = def->nb_oargs;
2612             nb_iargs = def->nb_iargs;
2613             nb_cargs = def->nb_cargs;
2614 
2615             if (def->flags & TCG_OPF_VECTOR) {
2616                 col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
2617                                   8 << TCGOP_VECE(op));
2618             }
2619 
2620             k = 0;
2621             for (i = 0; i < nb_oargs; i++) {
2622                 const char *sep =  k ? "," : "";
2623                 col += ne_fprintf(f, "%s%s", sep,
2624                                   tcg_get_arg_str(s, buf, sizeof(buf),
2625                                                   op->args[k++]));
2626             }
2627             for (i = 0; i < nb_iargs; i++) {
2628                 const char *sep =  k ? "," : "";
2629                 col += ne_fprintf(f, "%s%s", sep,
2630                                   tcg_get_arg_str(s, buf, sizeof(buf),
2631                                                   op->args[k++]));
2632             }
2633             switch (c) {
2634             case INDEX_op_brcond_i32:
2635             case INDEX_op_setcond_i32:
2636             case INDEX_op_negsetcond_i32:
2637             case INDEX_op_movcond_i32:
2638             case INDEX_op_brcond2_i32:
2639             case INDEX_op_setcond2_i32:
2640             case INDEX_op_brcond_i64:
2641             case INDEX_op_setcond_i64:
2642             case INDEX_op_negsetcond_i64:
2643             case INDEX_op_movcond_i64:
2644             case INDEX_op_cmp_vec:
2645             case INDEX_op_cmpsel_vec:
2646                 if (op->args[k] < ARRAY_SIZE(cond_name)
2647                     && cond_name[op->args[k]]) {
2648                     col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]);
2649                 } else {
2650                     col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]);
2651                 }
2652                 i = 1;
2653                 break;
2654             case INDEX_op_qemu_ld_a32_i32:
2655             case INDEX_op_qemu_ld_a64_i32:
2656             case INDEX_op_qemu_st_a32_i32:
2657             case INDEX_op_qemu_st_a64_i32:
2658             case INDEX_op_qemu_st8_a32_i32:
2659             case INDEX_op_qemu_st8_a64_i32:
2660             case INDEX_op_qemu_ld_a32_i64:
2661             case INDEX_op_qemu_ld_a64_i64:
2662             case INDEX_op_qemu_st_a32_i64:
2663             case INDEX_op_qemu_st_a64_i64:
2664             case INDEX_op_qemu_ld_a32_i128:
2665             case INDEX_op_qemu_ld_a64_i128:
2666             case INDEX_op_qemu_st_a32_i128:
2667             case INDEX_op_qemu_st_a64_i128:
2668                 {
2669                     const char *s_al, *s_op, *s_at;
2670                     MemOpIdx oi = op->args[k++];
2671                     MemOp mop = get_memop(oi);
2672                     unsigned ix = get_mmuidx(oi);
2673 
2674                     s_al = alignment_name[(mop & MO_AMASK) >> MO_ASHIFT];
2675                     s_op = ldst_name[mop & (MO_BSWAP | MO_SSIZE)];
2676                     s_at = atom_name[(mop & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
2677                     mop &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
2678 
2679                     /* If all fields are accounted for, print symbolically. */
2680                     if (!mop && s_al && s_op && s_at) {
2681                         col += ne_fprintf(f, ",%s%s%s,%u",
2682                                           s_at, s_al, s_op, ix);
2683                     } else {
2684                         mop = get_memop(oi);
2685                         col += ne_fprintf(f, ",$0x%x,%u", mop, ix);
2686                     }
2687                     i = 1;
2688                 }
2689                 break;
2690             case INDEX_op_bswap16_i32:
2691             case INDEX_op_bswap16_i64:
2692             case INDEX_op_bswap32_i32:
2693             case INDEX_op_bswap32_i64:
2694             case INDEX_op_bswap64_i64:
2695                 {
2696                     TCGArg flags = op->args[k];
2697                     const char *name = NULL;
2698 
2699                     if (flags < ARRAY_SIZE(bswap_flag_name)) {
2700                         name = bswap_flag_name[flags];
2701                     }
2702                     if (name) {
2703                         col += ne_fprintf(f, ",%s", name);
2704                     } else {
2705                         col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags);
2706                     }
2707                     i = k = 1;
2708                 }
2709                 break;
2710             default:
2711                 i = 0;
2712                 break;
2713             }
2714             switch (c) {
2715             case INDEX_op_set_label:
2716             case INDEX_op_br:
2717             case INDEX_op_brcond_i32:
2718             case INDEX_op_brcond_i64:
2719             case INDEX_op_brcond2_i32:
2720                 col += ne_fprintf(f, "%s$L%d", k ? "," : "",
2721                                   arg_label(op->args[k])->id);
2722                 i++, k++;
2723                 break;
2724             case INDEX_op_mb:
2725                 {
2726                     TCGBar membar = op->args[k];
2727                     const char *b_op, *m_op;
2728 
2729                     switch (membar & TCG_BAR_SC) {
2730                     case 0:
2731                         b_op = "none";
2732                         break;
2733                     case TCG_BAR_LDAQ:
2734                         b_op = "acq";
2735                         break;
2736                     case TCG_BAR_STRL:
2737                         b_op = "rel";
2738                         break;
2739                     case TCG_BAR_SC:
2740                         b_op = "seq";
2741                         break;
2742                     default:
2743                         g_assert_not_reached();
2744                     }
2745 
2746                     switch (membar & TCG_MO_ALL) {
2747                     case 0:
2748                         m_op = "none";
2749                         break;
2750                     case TCG_MO_LD_LD:
2751                         m_op = "rr";
2752                         break;
2753                     case TCG_MO_LD_ST:
2754                         m_op = "rw";
2755                         break;
2756                     case TCG_MO_ST_LD:
2757                         m_op = "wr";
2758                         break;
2759                     case TCG_MO_ST_ST:
2760                         m_op = "ww";
2761                         break;
2762                     case TCG_MO_LD_LD | TCG_MO_LD_ST:
2763                         m_op = "rr+rw";
2764                         break;
2765                     case TCG_MO_LD_LD | TCG_MO_ST_LD:
2766                         m_op = "rr+wr";
2767                         break;
2768                     case TCG_MO_LD_LD | TCG_MO_ST_ST:
2769                         m_op = "rr+ww";
2770                         break;
2771                     case TCG_MO_LD_ST | TCG_MO_ST_LD:
2772                         m_op = "rw+wr";
2773                         break;
2774                     case TCG_MO_LD_ST | TCG_MO_ST_ST:
2775                         m_op = "rw+ww";
2776                         break;
2777                     case TCG_MO_ST_LD | TCG_MO_ST_ST:
2778                         m_op = "wr+ww";
2779                         break;
2780                     case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_LD:
2781                         m_op = "rr+rw+wr";
2782                         break;
2783                     case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST:
2784                         m_op = "rr+rw+ww";
2785                         break;
2786                     case TCG_MO_LD_LD | TCG_MO_ST_LD | TCG_MO_ST_ST:
2787                         m_op = "rr+wr+ww";
2788                         break;
2789                     case TCG_MO_LD_ST | TCG_MO_ST_LD | TCG_MO_ST_ST:
2790                         m_op = "rw+wr+ww";
2791                         break;
2792                     case TCG_MO_ALL:
2793                         m_op = "all";
2794                         break;
2795                     default:
2796                         g_assert_not_reached();
2797                     }
2798 
2799                     col += ne_fprintf(f, "%s%s:%s", (k ? "," : ""), b_op, m_op);
2800                     i++, k++;
2801                 }
2802                 break;
2803             default:
2804                 break;
2805             }
2806             for (; i < nb_cargs; i++, k++) {
2807                 col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "",
2808                                   op->args[k]);
2809             }
2810         }
2811 
2812         if (have_prefs || op->life) {
2813             for (; col < 40; ++col) {
2814                 putc(' ', f);
2815             }
2816         }
2817 
2818         if (op->life) {
2819             unsigned life = op->life;
2820 
2821             if (life & (SYNC_ARG * 3)) {
2822                 ne_fprintf(f, "  sync:");
2823                 for (i = 0; i < 2; ++i) {
2824                     if (life & (SYNC_ARG << i)) {
2825                         ne_fprintf(f, " %d", i);
2826                     }
2827                 }
2828             }
2829             life /= DEAD_ARG;
2830             if (life) {
2831                 ne_fprintf(f, "  dead:");
2832                 for (i = 0; life; ++i, life >>= 1) {
2833                     if (life & 1) {
2834                         ne_fprintf(f, " %d", i);
2835                     }
2836                 }
2837             }
2838         }
2839 
2840         if (have_prefs) {
2841             for (i = 0; i < nb_oargs; ++i) {
2842                 TCGRegSet set = output_pref(op, i);
2843 
2844                 if (i == 0) {
2845                     ne_fprintf(f, "  pref=");
2846                 } else {
2847                     ne_fprintf(f, ",");
2848                 }
2849                 if (set == 0) {
2850                     ne_fprintf(f, "none");
2851                 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
2852                     ne_fprintf(f, "all");
2853 #ifdef CONFIG_DEBUG_TCG
2854                 } else if (tcg_regset_single(set)) {
2855                     TCGReg reg = tcg_regset_first(set);
2856                     ne_fprintf(f, "%s", tcg_target_reg_names[reg]);
2857 #endif
2858                 } else if (TCG_TARGET_NB_REGS <= 32) {
2859                     ne_fprintf(f, "0x%x", (uint32_t)set);
2860                 } else {
2861                     ne_fprintf(f, "0x%" PRIx64, (uint64_t)set);
2862                 }
2863             }
2864         }
2865 
2866         putc('\n', f);
2867     }
2868 }
2869 
2870 /* we give more priority to constraints with less registers */
2871 static int get_constraint_priority(const TCGOpDef *def, int k)
2872 {
2873     const TCGArgConstraint *arg_ct = &def->args_ct[k];
2874     int n = ctpop64(arg_ct->regs);
2875 
2876     /*
2877      * Sort constraints of a single register first, which includes output
2878      * aliases (which must exactly match the input already allocated).
2879      */
2880     if (n == 1 || arg_ct->oalias) {
2881         return INT_MAX;
2882     }
2883 
2884     /*
2885      * Sort register pairs next, first then second immediately after.
2886      * Arbitrarily sort multiple pairs by the index of the first reg;
2887      * there shouldn't be many pairs.
2888      */
2889     switch (arg_ct->pair) {
2890     case 1:
2891     case 3:
2892         return (k + 1) * 2;
2893     case 2:
2894         return (arg_ct->pair_index + 1) * 2 - 1;
2895     }
2896 
2897     /* Finally, sort by decreasing register count. */
2898     assert(n > 1);
2899     return -n;
2900 }
2901 
2902 /* sort from highest priority to lowest */
2903 static void sort_constraints(TCGOpDef *def, int start, int n)
2904 {
2905     int i, j;
2906     TCGArgConstraint *a = def->args_ct;
2907 
2908     for (i = 0; i < n; i++) {
2909         a[start + i].sort_index = start + i;
2910     }
2911     if (n <= 1) {
2912         return;
2913     }
2914     for (i = 0; i < n - 1; i++) {
2915         for (j = i + 1; j < n; j++) {
2916             int p1 = get_constraint_priority(def, a[start + i].sort_index);
2917             int p2 = get_constraint_priority(def, a[start + j].sort_index);
2918             if (p1 < p2) {
2919                 int tmp = a[start + i].sort_index;
2920                 a[start + i].sort_index = a[start + j].sort_index;
2921                 a[start + j].sort_index = tmp;
2922             }
2923         }
2924     }
2925 }
2926 
2927 static void process_op_defs(TCGContext *s)
2928 {
2929     TCGOpcode op;
2930 
2931     for (op = 0; op < NB_OPS; op++) {
2932         TCGOpDef *def = &tcg_op_defs[op];
2933         const TCGTargetOpDef *tdefs;
2934         bool saw_alias_pair = false;
2935         int i, o, i2, o2, nb_args;
2936 
2937         if (def->flags & TCG_OPF_NOT_PRESENT) {
2938             continue;
2939         }
2940 
2941         nb_args = def->nb_iargs + def->nb_oargs;
2942         if (nb_args == 0) {
2943             continue;
2944         }
2945 
2946         /*
2947          * Macro magic should make it impossible, but double-check that
2948          * the array index is in range.  Since the signness of an enum
2949          * is implementation defined, force the result to unsigned.
2950          */
2951         unsigned con_set = tcg_target_op_def(op);
2952         tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
2953         tdefs = &constraint_sets[con_set];
2954 
2955         for (i = 0; i < nb_args; i++) {
2956             const char *ct_str = tdefs->args_ct_str[i];
2957             bool input_p = i >= def->nb_oargs;
2958 
2959             /* Incomplete TCGTargetOpDef entry. */
2960             tcg_debug_assert(ct_str != NULL);
2961 
2962             switch (*ct_str) {
2963             case '0' ... '9':
2964                 o = *ct_str - '0';
2965                 tcg_debug_assert(input_p);
2966                 tcg_debug_assert(o < def->nb_oargs);
2967                 tcg_debug_assert(def->args_ct[o].regs != 0);
2968                 tcg_debug_assert(!def->args_ct[o].oalias);
2969                 def->args_ct[i] = def->args_ct[o];
2970                 /* The output sets oalias.  */
2971                 def->args_ct[o].oalias = 1;
2972                 def->args_ct[o].alias_index = i;
2973                 /* The input sets ialias. */
2974                 def->args_ct[i].ialias = 1;
2975                 def->args_ct[i].alias_index = o;
2976                 if (def->args_ct[i].pair) {
2977                     saw_alias_pair = true;
2978                 }
2979                 tcg_debug_assert(ct_str[1] == '\0');
2980                 continue;
2981 
2982             case '&':
2983                 tcg_debug_assert(!input_p);
2984                 def->args_ct[i].newreg = true;
2985                 ct_str++;
2986                 break;
2987 
2988             case 'p': /* plus */
2989                 /* Allocate to the register after the previous. */
2990                 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
2991                 o = i - 1;
2992                 tcg_debug_assert(!def->args_ct[o].pair);
2993                 tcg_debug_assert(!def->args_ct[o].ct);
2994                 def->args_ct[i] = (TCGArgConstraint){
2995                     .pair = 2,
2996                     .pair_index = o,
2997                     .regs = def->args_ct[o].regs << 1,
2998                     .newreg = def->args_ct[o].newreg,
2999                 };
3000                 def->args_ct[o].pair = 1;
3001                 def->args_ct[o].pair_index = i;
3002                 tcg_debug_assert(ct_str[1] == '\0');
3003                 continue;
3004 
3005             case 'm': /* minus */
3006                 /* Allocate to the register before the previous. */
3007                 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
3008                 o = i - 1;
3009                 tcg_debug_assert(!def->args_ct[o].pair);
3010                 tcg_debug_assert(!def->args_ct[o].ct);
3011                 def->args_ct[i] = (TCGArgConstraint){
3012                     .pair = 1,
3013                     .pair_index = o,
3014                     .regs = def->args_ct[o].regs >> 1,
3015                     .newreg = def->args_ct[o].newreg,
3016                 };
3017                 def->args_ct[o].pair = 2;
3018                 def->args_ct[o].pair_index = i;
3019                 tcg_debug_assert(ct_str[1] == '\0');
3020                 continue;
3021             }
3022 
3023             do {
3024                 switch (*ct_str) {
3025                 case 'i':
3026                     def->args_ct[i].ct |= TCG_CT_CONST;
3027                     break;
3028 
3029                 /* Include all of the target-specific constraints. */
3030 
3031 #undef CONST
3032 #define CONST(CASE, MASK) \
3033     case CASE: def->args_ct[i].ct |= MASK; break;
3034 #define REGS(CASE, MASK) \
3035     case CASE: def->args_ct[i].regs |= MASK; break;
3036 
3037 #include "tcg-target-con-str.h"
3038 
3039 #undef REGS
3040 #undef CONST
3041                 default:
3042                 case '0' ... '9':
3043                 case '&':
3044                 case 'p':
3045                 case 'm':
3046                     /* Typo in TCGTargetOpDef constraint. */
3047                     g_assert_not_reached();
3048                 }
3049             } while (*++ct_str != '\0');
3050         }
3051 
3052         /* TCGTargetOpDef entry with too much information? */
3053         tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
3054 
3055         /*
3056          * Fix up output pairs that are aliased with inputs.
3057          * When we created the alias, we copied pair from the output.
3058          * There are three cases:
3059          *    (1a) Pairs of inputs alias pairs of outputs.
3060          *    (1b) One input aliases the first of a pair of outputs.
3061          *    (2)  One input aliases the second of a pair of outputs.
3062          *
3063          * Case 1a is handled by making sure that the pair_index'es are
3064          * properly updated so that they appear the same as a pair of inputs.
3065          *
3066          * Case 1b is handled by setting the pair_index of the input to
3067          * itself, simply so it doesn't point to an unrelated argument.
3068          * Since we don't encounter the "second" during the input allocation
3069          * phase, nothing happens with the second half of the input pair.
3070          *
3071          * Case 2 is handled by setting the second input to pair=3, the
3072          * first output to pair=3, and the pair_index'es to match.
3073          */
3074         if (saw_alias_pair) {
3075             for (i = def->nb_oargs; i < nb_args; i++) {
3076                 /*
3077                  * Since [0-9pm] must be alone in the constraint string,
3078                  * the only way they can both be set is if the pair comes
3079                  * from the output alias.
3080                  */
3081                 if (!def->args_ct[i].ialias) {
3082                     continue;
3083                 }
3084                 switch (def->args_ct[i].pair) {
3085                 case 0:
3086                     break;
3087                 case 1:
3088                     o = def->args_ct[i].alias_index;
3089                     o2 = def->args_ct[o].pair_index;
3090                     tcg_debug_assert(def->args_ct[o].pair == 1);
3091                     tcg_debug_assert(def->args_ct[o2].pair == 2);
3092                     if (def->args_ct[o2].oalias) {
3093                         /* Case 1a */
3094                         i2 = def->args_ct[o2].alias_index;
3095                         tcg_debug_assert(def->args_ct[i2].pair == 2);
3096                         def->args_ct[i2].pair_index = i;
3097                         def->args_ct[i].pair_index = i2;
3098                     } else {
3099                         /* Case 1b */
3100                         def->args_ct[i].pair_index = i;
3101                     }
3102                     break;
3103                 case 2:
3104                     o = def->args_ct[i].alias_index;
3105                     o2 = def->args_ct[o].pair_index;
3106                     tcg_debug_assert(def->args_ct[o].pair == 2);
3107                     tcg_debug_assert(def->args_ct[o2].pair == 1);
3108                     if (def->args_ct[o2].oalias) {
3109                         /* Case 1a */
3110                         i2 = def->args_ct[o2].alias_index;
3111                         tcg_debug_assert(def->args_ct[i2].pair == 1);
3112                         def->args_ct[i2].pair_index = i;
3113                         def->args_ct[i].pair_index = i2;
3114                     } else {
3115                         /* Case 2 */
3116                         def->args_ct[i].pair = 3;
3117                         def->args_ct[o2].pair = 3;
3118                         def->args_ct[i].pair_index = o2;
3119                         def->args_ct[o2].pair_index = i;
3120                     }
3121                     break;
3122                 default:
3123                     g_assert_not_reached();
3124                 }
3125             }
3126         }
3127 
3128         /* sort the constraints (XXX: this is just an heuristic) */
3129         sort_constraints(def, 0, def->nb_oargs);
3130         sort_constraints(def, def->nb_oargs, def->nb_iargs);
3131     }
3132 }
3133 
3134 static void remove_label_use(TCGOp *op, int idx)
3135 {
3136     TCGLabel *label = arg_label(op->args[idx]);
3137     TCGLabelUse *use;
3138 
3139     QSIMPLEQ_FOREACH(use, &label->branches, next) {
3140         if (use->op == op) {
3141             QSIMPLEQ_REMOVE(&label->branches, use, TCGLabelUse, next);
3142             return;
3143         }
3144     }
3145     g_assert_not_reached();
3146 }
3147 
3148 void tcg_op_remove(TCGContext *s, TCGOp *op)
3149 {
3150     switch (op->opc) {
3151     case INDEX_op_br:
3152         remove_label_use(op, 0);
3153         break;
3154     case INDEX_op_brcond_i32:
3155     case INDEX_op_brcond_i64:
3156         remove_label_use(op, 3);
3157         break;
3158     case INDEX_op_brcond2_i32:
3159         remove_label_use(op, 5);
3160         break;
3161     default:
3162         break;
3163     }
3164 
3165     QTAILQ_REMOVE(&s->ops, op, link);
3166     QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
3167     s->nb_ops--;
3168 }
3169 
3170 void tcg_remove_ops_after(TCGOp *op)
3171 {
3172     TCGContext *s = tcg_ctx;
3173 
3174     while (true) {
3175         TCGOp *last = tcg_last_op();
3176         if (last == op) {
3177             return;
3178         }
3179         tcg_op_remove(s, last);
3180     }
3181 }
3182 
3183 static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs)
3184 {
3185     TCGContext *s = tcg_ctx;
3186     TCGOp *op = NULL;
3187 
3188     if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) {
3189         QTAILQ_FOREACH(op, &s->free_ops, link) {
3190             if (nargs <= op->nargs) {
3191                 QTAILQ_REMOVE(&s->free_ops, op, link);
3192                 nargs = op->nargs;
3193                 goto found;
3194             }
3195         }
3196     }
3197 
3198     /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
3199     nargs = MAX(4, nargs);
3200     op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs);
3201 
3202  found:
3203     memset(op, 0, offsetof(TCGOp, link));
3204     op->opc = opc;
3205     op->nargs = nargs;
3206 
3207     /* Check for bitfield overflow. */
3208     tcg_debug_assert(op->nargs == nargs);
3209 
3210     s->nb_ops++;
3211     return op;
3212 }
3213 
3214 TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
3215 {
3216     TCGOp *op = tcg_op_alloc(opc, nargs);
3217     QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
3218     return op;
3219 }
3220 
3221 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
3222                             TCGOpcode opc, unsigned nargs)
3223 {
3224     TCGOp *new_op = tcg_op_alloc(opc, nargs);
3225     QTAILQ_INSERT_BEFORE(old_op, new_op, link);
3226     return new_op;
3227 }
3228 
3229 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
3230                            TCGOpcode opc, unsigned nargs)
3231 {
3232     TCGOp *new_op = tcg_op_alloc(opc, nargs);
3233     QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
3234     return new_op;
3235 }
3236 
3237 static void move_label_uses(TCGLabel *to, TCGLabel *from)
3238 {
3239     TCGLabelUse *u;
3240 
3241     QSIMPLEQ_FOREACH(u, &from->branches, next) {
3242         TCGOp *op = u->op;
3243         switch (op->opc) {
3244         case INDEX_op_br:
3245             op->args[0] = label_arg(to);
3246             break;
3247         case INDEX_op_brcond_i32:
3248         case INDEX_op_brcond_i64:
3249             op->args[3] = label_arg(to);
3250             break;
3251         case INDEX_op_brcond2_i32:
3252             op->args[5] = label_arg(to);
3253             break;
3254         default:
3255             g_assert_not_reached();
3256         }
3257     }
3258 
3259     QSIMPLEQ_CONCAT(&to->branches, &from->branches);
3260 }
3261 
3262 /* Reachable analysis : remove unreachable code.  */
3263 static void __attribute__((noinline))
3264 reachable_code_pass(TCGContext *s)
3265 {
3266     TCGOp *op, *op_next, *op_prev;
3267     bool dead = false;
3268 
3269     QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
3270         bool remove = dead;
3271         TCGLabel *label;
3272 
3273         switch (op->opc) {
3274         case INDEX_op_set_label:
3275             label = arg_label(op->args[0]);
3276 
3277             /*
3278              * Note that the first op in the TB is always a load,
3279              * so there is always something before a label.
3280              */
3281             op_prev = QTAILQ_PREV(op, link);
3282 
3283             /*
3284              * If we find two sequential labels, move all branches to
3285              * reference the second label and remove the first label.
3286              * Do this before branch to next optimization, so that the
3287              * middle label is out of the way.
3288              */
3289             if (op_prev->opc == INDEX_op_set_label) {
3290                 move_label_uses(label, arg_label(op_prev->args[0]));
3291                 tcg_op_remove(s, op_prev);
3292                 op_prev = QTAILQ_PREV(op, link);
3293             }
3294 
3295             /*
3296              * Optimization can fold conditional branches to unconditional.
3297              * If we find a label which is preceded by an unconditional
3298              * branch to next, remove the branch.  We couldn't do this when
3299              * processing the branch because any dead code between the branch
3300              * and label had not yet been removed.
3301              */
3302             if (op_prev->opc == INDEX_op_br &&
3303                 label == arg_label(op_prev->args[0])) {
3304                 tcg_op_remove(s, op_prev);
3305                 /* Fall through means insns become live again.  */
3306                 dead = false;
3307             }
3308 
3309             if (QSIMPLEQ_EMPTY(&label->branches)) {
3310                 /*
3311                  * While there is an occasional backward branch, virtually
3312                  * all branches generated by the translators are forward.
3313                  * Which means that generally we will have already removed
3314                  * all references to the label that will be, and there is
3315                  * little to be gained by iterating.
3316                  */
3317                 remove = true;
3318             } else {
3319                 /* Once we see a label, insns become live again.  */
3320                 dead = false;
3321                 remove = false;
3322             }
3323             break;
3324 
3325         case INDEX_op_br:
3326         case INDEX_op_exit_tb:
3327         case INDEX_op_goto_ptr:
3328             /* Unconditional branches; everything following is dead.  */
3329             dead = true;
3330             break;
3331 
3332         case INDEX_op_call:
3333             /* Notice noreturn helper calls, raising exceptions.  */
3334             if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
3335                 dead = true;
3336             }
3337             break;
3338 
3339         case INDEX_op_insn_start:
3340             /* Never remove -- we need to keep these for unwind.  */
3341             remove = false;
3342             break;
3343 
3344         default:
3345             break;
3346         }
3347 
3348         if (remove) {
3349             tcg_op_remove(s, op);
3350         }
3351     }
3352 }
3353 
3354 #define TS_DEAD  1
3355 #define TS_MEM   2
3356 
3357 #define IS_DEAD_ARG(n)   (arg_life & (DEAD_ARG << (n)))
3358 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
3359 
3360 /* For liveness_pass_1, the register preferences for a given temp.  */
3361 static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
3362 {
3363     return ts->state_ptr;
3364 }
3365 
3366 /* For liveness_pass_1, reset the preferences for a given temp to the
3367  * maximal regset for its type.
3368  */
3369 static inline void la_reset_pref(TCGTemp *ts)
3370 {
3371     *la_temp_pref(ts)
3372         = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
3373 }
3374 
3375 /* liveness analysis: end of function: all temps are dead, and globals
3376    should be in memory. */
3377 static void la_func_end(TCGContext *s, int ng, int nt)
3378 {
3379     int i;
3380 
3381     for (i = 0; i < ng; ++i) {
3382         s->temps[i].state = TS_DEAD | TS_MEM;
3383         la_reset_pref(&s->temps[i]);
3384     }
3385     for (i = ng; i < nt; ++i) {
3386         s->temps[i].state = TS_DEAD;
3387         la_reset_pref(&s->temps[i]);
3388     }
3389 }
3390 
3391 /* liveness analysis: end of basic block: all temps are dead, globals
3392    and local temps should be in memory. */
3393 static void la_bb_end(TCGContext *s, int ng, int nt)
3394 {
3395     int i;
3396 
3397     for (i = 0; i < nt; ++i) {
3398         TCGTemp *ts = &s->temps[i];
3399         int state;
3400 
3401         switch (ts->kind) {
3402         case TEMP_FIXED:
3403         case TEMP_GLOBAL:
3404         case TEMP_TB:
3405             state = TS_DEAD | TS_MEM;
3406             break;
3407         case TEMP_EBB:
3408         case TEMP_CONST:
3409             state = TS_DEAD;
3410             break;
3411         default:
3412             g_assert_not_reached();
3413         }
3414         ts->state = state;
3415         la_reset_pref(ts);
3416     }
3417 }
3418 
3419 /* liveness analysis: sync globals back to memory.  */
3420 static void la_global_sync(TCGContext *s, int ng)
3421 {
3422     int i;
3423 
3424     for (i = 0; i < ng; ++i) {
3425         int state = s->temps[i].state;
3426         s->temps[i].state = state | TS_MEM;
3427         if (state == TS_DEAD) {
3428             /* If the global was previously dead, reset prefs.  */
3429             la_reset_pref(&s->temps[i]);
3430         }
3431     }
3432 }
3433 
3434 /*
3435  * liveness analysis: conditional branch: all temps are dead unless
3436  * explicitly live-across-conditional-branch, globals and local temps
3437  * should be synced.
3438  */
3439 static void la_bb_sync(TCGContext *s, int ng, int nt)
3440 {
3441     la_global_sync(s, ng);
3442 
3443     for (int i = ng; i < nt; ++i) {
3444         TCGTemp *ts = &s->temps[i];
3445         int state;
3446 
3447         switch (ts->kind) {
3448         case TEMP_TB:
3449             state = ts->state;
3450             ts->state = state | TS_MEM;
3451             if (state != TS_DEAD) {
3452                 continue;
3453             }
3454             break;
3455         case TEMP_EBB:
3456         case TEMP_CONST:
3457             continue;
3458         default:
3459             g_assert_not_reached();
3460         }
3461         la_reset_pref(&s->temps[i]);
3462     }
3463 }
3464 
3465 /* liveness analysis: sync globals back to memory and kill.  */
3466 static void la_global_kill(TCGContext *s, int ng)
3467 {
3468     int i;
3469 
3470     for (i = 0; i < ng; i++) {
3471         s->temps[i].state = TS_DEAD | TS_MEM;
3472         la_reset_pref(&s->temps[i]);
3473     }
3474 }
3475 
3476 /* liveness analysis: note live globals crossing calls.  */
3477 static void la_cross_call(TCGContext *s, int nt)
3478 {
3479     TCGRegSet mask = ~tcg_target_call_clobber_regs;
3480     int i;
3481 
3482     for (i = 0; i < nt; i++) {
3483         TCGTemp *ts = &s->temps[i];
3484         if (!(ts->state & TS_DEAD)) {
3485             TCGRegSet *pset = la_temp_pref(ts);
3486             TCGRegSet set = *pset;
3487 
3488             set &= mask;
3489             /* If the combination is not possible, restart.  */
3490             if (set == 0) {
3491                 set = tcg_target_available_regs[ts->type] & mask;
3492             }
3493             *pset = set;
3494         }
3495     }
3496 }
3497 
3498 /*
3499  * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
3500  * to TEMP_EBB, if possible.
3501  */
3502 static void __attribute__((noinline))
3503 liveness_pass_0(TCGContext *s)
3504 {
3505     void * const multiple_ebb = (void *)(uintptr_t)-1;
3506     int nb_temps = s->nb_temps;
3507     TCGOp *op, *ebb;
3508 
3509     for (int i = s->nb_globals; i < nb_temps; ++i) {
3510         s->temps[i].state_ptr = NULL;
3511     }
3512 
3513     /*
3514      * Represent each EBB by the op at which it begins.  In the case of
3515      * the first EBB, this is the first op, otherwise it is a label.
3516      * Collect the uses of each TEMP_TB: NULL for unused, EBB for use
3517      * within a single EBB, else MULTIPLE_EBB.
3518      */
3519     ebb = QTAILQ_FIRST(&s->ops);
3520     QTAILQ_FOREACH(op, &s->ops, link) {
3521         const TCGOpDef *def;
3522         int nb_oargs, nb_iargs;
3523 
3524         switch (op->opc) {
3525         case INDEX_op_set_label:
3526             ebb = op;
3527             continue;
3528         case INDEX_op_discard:
3529             continue;
3530         case INDEX_op_call:
3531             nb_oargs = TCGOP_CALLO(op);
3532             nb_iargs = TCGOP_CALLI(op);
3533             break;
3534         default:
3535             def = &tcg_op_defs[op->opc];
3536             nb_oargs = def->nb_oargs;
3537             nb_iargs = def->nb_iargs;
3538             break;
3539         }
3540 
3541         for (int i = 0; i < nb_oargs + nb_iargs; ++i) {
3542             TCGTemp *ts = arg_temp(op->args[i]);
3543 
3544             if (ts->kind != TEMP_TB) {
3545                 continue;
3546             }
3547             if (ts->state_ptr == NULL) {
3548                 ts->state_ptr = ebb;
3549             } else if (ts->state_ptr != ebb) {
3550                 ts->state_ptr = multiple_ebb;
3551             }
3552         }
3553     }
3554 
3555     /*
3556      * For TEMP_TB that turned out not to be used beyond one EBB,
3557      * reduce the liveness to TEMP_EBB.
3558      */
3559     for (int i = s->nb_globals; i < nb_temps; ++i) {
3560         TCGTemp *ts = &s->temps[i];
3561         if (ts->kind == TEMP_TB && ts->state_ptr != multiple_ebb) {
3562             ts->kind = TEMP_EBB;
3563         }
3564     }
3565 }
3566 
3567 /* Liveness analysis : update the opc_arg_life array to tell if a
3568    given input arguments is dead. Instructions updating dead
3569    temporaries are removed. */
3570 static void __attribute__((noinline))
3571 liveness_pass_1(TCGContext *s)
3572 {
3573     int nb_globals = s->nb_globals;
3574     int nb_temps = s->nb_temps;
3575     TCGOp *op, *op_prev;
3576     TCGRegSet *prefs;
3577     int i;
3578 
3579     prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
3580     for (i = 0; i < nb_temps; ++i) {
3581         s->temps[i].state_ptr = prefs + i;
3582     }
3583 
3584     /* ??? Should be redundant with the exit_tb that ends the TB.  */
3585     la_func_end(s, nb_globals, nb_temps);
3586 
3587     QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
3588         int nb_iargs, nb_oargs;
3589         TCGOpcode opc_new, opc_new2;
3590         bool have_opc_new2;
3591         TCGLifeData arg_life = 0;
3592         TCGTemp *ts;
3593         TCGOpcode opc = op->opc;
3594         const TCGOpDef *def = &tcg_op_defs[opc];
3595 
3596         switch (opc) {
3597         case INDEX_op_call:
3598             {
3599                 const TCGHelperInfo *info = tcg_call_info(op);
3600                 int call_flags = tcg_call_flags(op);
3601 
3602                 nb_oargs = TCGOP_CALLO(op);
3603                 nb_iargs = TCGOP_CALLI(op);
3604 
3605                 /* pure functions can be removed if their result is unused */
3606                 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
3607                     for (i = 0; i < nb_oargs; i++) {
3608                         ts = arg_temp(op->args[i]);
3609                         if (ts->state != TS_DEAD) {
3610                             goto do_not_remove_call;
3611                         }
3612                     }
3613                     goto do_remove;
3614                 }
3615             do_not_remove_call:
3616 
3617                 /* Output args are dead.  */
3618                 for (i = 0; i < nb_oargs; i++) {
3619                     ts = arg_temp(op->args[i]);
3620                     if (ts->state & TS_DEAD) {
3621                         arg_life |= DEAD_ARG << i;
3622                     }
3623                     if (ts->state & TS_MEM) {
3624                         arg_life |= SYNC_ARG << i;
3625                     }
3626                     ts->state = TS_DEAD;
3627                     la_reset_pref(ts);
3628                 }
3629 
3630                 /* Not used -- it will be tcg_target_call_oarg_reg().  */
3631                 memset(op->output_pref, 0, sizeof(op->output_pref));
3632 
3633                 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
3634                                     TCG_CALL_NO_READ_GLOBALS))) {
3635                     la_global_kill(s, nb_globals);
3636                 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
3637                     la_global_sync(s, nb_globals);
3638                 }
3639 
3640                 /* Record arguments that die in this helper.  */
3641                 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3642                     ts = arg_temp(op->args[i]);
3643                     if (ts->state & TS_DEAD) {
3644                         arg_life |= DEAD_ARG << i;
3645                     }
3646                 }
3647 
3648                 /* For all live registers, remove call-clobbered prefs.  */
3649                 la_cross_call(s, nb_temps);
3650 
3651                 /*
3652                  * Input arguments are live for preceding opcodes.
3653                  *
3654                  * For those arguments that die, and will be allocated in
3655                  * registers, clear the register set for that arg, to be
3656                  * filled in below.  For args that will be on the stack,
3657                  * reset to any available reg.  Process arguments in reverse
3658                  * order so that if a temp is used more than once, the stack
3659                  * reset to max happens before the register reset to 0.
3660                  */
3661                 for (i = nb_iargs - 1; i >= 0; i--) {
3662                     const TCGCallArgumentLoc *loc = &info->in[i];
3663                     ts = arg_temp(op->args[nb_oargs + i]);
3664 
3665                     if (ts->state & TS_DEAD) {
3666                         switch (loc->kind) {
3667                         case TCG_CALL_ARG_NORMAL:
3668                         case TCG_CALL_ARG_EXTEND_U:
3669                         case TCG_CALL_ARG_EXTEND_S:
3670                             if (arg_slot_reg_p(loc->arg_slot)) {
3671                                 *la_temp_pref(ts) = 0;
3672                                 break;
3673                             }
3674                             /* fall through */
3675                         default:
3676                             *la_temp_pref(ts) =
3677                                 tcg_target_available_regs[ts->type];
3678                             break;
3679                         }
3680                         ts->state &= ~TS_DEAD;
3681                     }
3682                 }
3683 
3684                 /*
3685                  * For each input argument, add its input register to prefs.
3686                  * If a temp is used once, this produces a single set bit;
3687                  * if a temp is used multiple times, this produces a set.
3688                  */
3689                 for (i = 0; i < nb_iargs; i++) {
3690                     const TCGCallArgumentLoc *loc = &info->in[i];
3691                     ts = arg_temp(op->args[nb_oargs + i]);
3692 
3693                     switch (loc->kind) {
3694                     case TCG_CALL_ARG_NORMAL:
3695                     case TCG_CALL_ARG_EXTEND_U:
3696                     case TCG_CALL_ARG_EXTEND_S:
3697                         if (arg_slot_reg_p(loc->arg_slot)) {
3698                             tcg_regset_set_reg(*la_temp_pref(ts),
3699                                 tcg_target_call_iarg_regs[loc->arg_slot]);
3700                         }
3701                         break;
3702                     default:
3703                         break;
3704                     }
3705                 }
3706             }
3707             break;
3708         case INDEX_op_insn_start:
3709             break;
3710         case INDEX_op_discard:
3711             /* mark the temporary as dead */
3712             ts = arg_temp(op->args[0]);
3713             ts->state = TS_DEAD;
3714             la_reset_pref(ts);
3715             break;
3716 
3717         case INDEX_op_add2_i32:
3718             opc_new = INDEX_op_add_i32;
3719             goto do_addsub2;
3720         case INDEX_op_sub2_i32:
3721             opc_new = INDEX_op_sub_i32;
3722             goto do_addsub2;
3723         case INDEX_op_add2_i64:
3724             opc_new = INDEX_op_add_i64;
3725             goto do_addsub2;
3726         case INDEX_op_sub2_i64:
3727             opc_new = INDEX_op_sub_i64;
3728         do_addsub2:
3729             nb_iargs = 4;
3730             nb_oargs = 2;
3731             /* Test if the high part of the operation is dead, but not
3732                the low part.  The result can be optimized to a simple
3733                add or sub.  This happens often for x86_64 guest when the
3734                cpu mode is set to 32 bit.  */
3735             if (arg_temp(op->args[1])->state == TS_DEAD) {
3736                 if (arg_temp(op->args[0])->state == TS_DEAD) {
3737                     goto do_remove;
3738                 }
3739                 /* Replace the opcode and adjust the args in place,
3740                    leaving 3 unused args at the end.  */
3741                 op->opc = opc = opc_new;
3742                 op->args[1] = op->args[2];
3743                 op->args[2] = op->args[4];
3744                 /* Fall through and mark the single-word operation live.  */
3745                 nb_iargs = 2;
3746                 nb_oargs = 1;
3747             }
3748             goto do_not_remove;
3749 
3750         case INDEX_op_mulu2_i32:
3751             opc_new = INDEX_op_mul_i32;
3752             opc_new2 = INDEX_op_muluh_i32;
3753             have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
3754             goto do_mul2;
3755         case INDEX_op_muls2_i32:
3756             opc_new = INDEX_op_mul_i32;
3757             opc_new2 = INDEX_op_mulsh_i32;
3758             have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
3759             goto do_mul2;
3760         case INDEX_op_mulu2_i64:
3761             opc_new = INDEX_op_mul_i64;
3762             opc_new2 = INDEX_op_muluh_i64;
3763             have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
3764             goto do_mul2;
3765         case INDEX_op_muls2_i64:
3766             opc_new = INDEX_op_mul_i64;
3767             opc_new2 = INDEX_op_mulsh_i64;
3768             have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
3769             goto do_mul2;
3770         do_mul2:
3771             nb_iargs = 2;
3772             nb_oargs = 2;
3773             if (arg_temp(op->args[1])->state == TS_DEAD) {
3774                 if (arg_temp(op->args[0])->state == TS_DEAD) {
3775                     /* Both parts of the operation are dead.  */
3776                     goto do_remove;
3777                 }
3778                 /* The high part of the operation is dead; generate the low. */
3779                 op->opc = opc = opc_new;
3780                 op->args[1] = op->args[2];
3781                 op->args[2] = op->args[3];
3782             } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
3783                 /* The low part of the operation is dead; generate the high. */
3784                 op->opc = opc = opc_new2;
3785                 op->args[0] = op->args[1];
3786                 op->args[1] = op->args[2];
3787                 op->args[2] = op->args[3];
3788             } else {
3789                 goto do_not_remove;
3790             }
3791             /* Mark the single-word operation live.  */
3792             nb_oargs = 1;
3793             goto do_not_remove;
3794 
3795         default:
3796             /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
3797             nb_iargs = def->nb_iargs;
3798             nb_oargs = def->nb_oargs;
3799 
3800             /* Test if the operation can be removed because all
3801                its outputs are dead. We assume that nb_oargs == 0
3802                implies side effects */
3803             if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
3804                 for (i = 0; i < nb_oargs; i++) {
3805                     if (arg_temp(op->args[i])->state != TS_DEAD) {
3806                         goto do_not_remove;
3807                     }
3808                 }
3809                 goto do_remove;
3810             }
3811             goto do_not_remove;
3812 
3813         do_remove:
3814             tcg_op_remove(s, op);
3815             break;
3816 
3817         do_not_remove:
3818             for (i = 0; i < nb_oargs; i++) {
3819                 ts = arg_temp(op->args[i]);
3820 
3821                 /* Remember the preference of the uses that followed.  */
3822                 if (i < ARRAY_SIZE(op->output_pref)) {
3823                     op->output_pref[i] = *la_temp_pref(ts);
3824                 }
3825 
3826                 /* Output args are dead.  */
3827                 if (ts->state & TS_DEAD) {
3828                     arg_life |= DEAD_ARG << i;
3829                 }
3830                 if (ts->state & TS_MEM) {
3831                     arg_life |= SYNC_ARG << i;
3832                 }
3833                 ts->state = TS_DEAD;
3834                 la_reset_pref(ts);
3835             }
3836 
3837             /* If end of basic block, update.  */
3838             if (def->flags & TCG_OPF_BB_EXIT) {
3839                 la_func_end(s, nb_globals, nb_temps);
3840             } else if (def->flags & TCG_OPF_COND_BRANCH) {
3841                 la_bb_sync(s, nb_globals, nb_temps);
3842             } else if (def->flags & TCG_OPF_BB_END) {
3843                 la_bb_end(s, nb_globals, nb_temps);
3844             } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3845                 la_global_sync(s, nb_globals);
3846                 if (def->flags & TCG_OPF_CALL_CLOBBER) {
3847                     la_cross_call(s, nb_temps);
3848                 }
3849             }
3850 
3851             /* Record arguments that die in this opcode.  */
3852             for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3853                 ts = arg_temp(op->args[i]);
3854                 if (ts->state & TS_DEAD) {
3855                     arg_life |= DEAD_ARG << i;
3856                 }
3857             }
3858 
3859             /* Input arguments are live for preceding opcodes.  */
3860             for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3861                 ts = arg_temp(op->args[i]);
3862                 if (ts->state & TS_DEAD) {
3863                     /* For operands that were dead, initially allow
3864                        all regs for the type.  */
3865                     *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
3866                     ts->state &= ~TS_DEAD;
3867                 }
3868             }
3869 
3870             /* Incorporate constraints for this operand.  */
3871             switch (opc) {
3872             case INDEX_op_mov_i32:
3873             case INDEX_op_mov_i64:
3874                 /* Note that these are TCG_OPF_NOT_PRESENT and do not
3875                    have proper constraints.  That said, special case
3876                    moves to propagate preferences backward.  */
3877                 if (IS_DEAD_ARG(1)) {
3878                     *la_temp_pref(arg_temp(op->args[0]))
3879                         = *la_temp_pref(arg_temp(op->args[1]));
3880                 }
3881                 break;
3882 
3883             default:
3884                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3885                     const TCGArgConstraint *ct = &def->args_ct[i];
3886                     TCGRegSet set, *pset;
3887 
3888                     ts = arg_temp(op->args[i]);
3889                     pset = la_temp_pref(ts);
3890                     set = *pset;
3891 
3892                     set &= ct->regs;
3893                     if (ct->ialias) {
3894                         set &= output_pref(op, ct->alias_index);
3895                     }
3896                     /* If the combination is not possible, restart.  */
3897                     if (set == 0) {
3898                         set = ct->regs;
3899                     }
3900                     *pset = set;
3901                 }
3902                 break;
3903             }
3904             break;
3905         }
3906         op->life = arg_life;
3907     }
3908 }
3909 
3910 /* Liveness analysis: Convert indirect regs to direct temporaries.  */
3911 static bool __attribute__((noinline))
3912 liveness_pass_2(TCGContext *s)
3913 {
3914     int nb_globals = s->nb_globals;
3915     int nb_temps, i;
3916     bool changes = false;
3917     TCGOp *op, *op_next;
3918 
3919     /* Create a temporary for each indirect global.  */
3920     for (i = 0; i < nb_globals; ++i) {
3921         TCGTemp *its = &s->temps[i];
3922         if (its->indirect_reg) {
3923             TCGTemp *dts = tcg_temp_alloc(s);
3924             dts->type = its->type;
3925             dts->base_type = its->base_type;
3926             dts->temp_subindex = its->temp_subindex;
3927             dts->kind = TEMP_EBB;
3928             its->state_ptr = dts;
3929         } else {
3930             its->state_ptr = NULL;
3931         }
3932         /* All globals begin dead.  */
3933         its->state = TS_DEAD;
3934     }
3935     for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
3936         TCGTemp *its = &s->temps[i];
3937         its->state_ptr = NULL;
3938         its->state = TS_DEAD;
3939     }
3940 
3941     QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
3942         TCGOpcode opc = op->opc;
3943         const TCGOpDef *def = &tcg_op_defs[opc];
3944         TCGLifeData arg_life = op->life;
3945         int nb_iargs, nb_oargs, call_flags;
3946         TCGTemp *arg_ts, *dir_ts;
3947 
3948         if (opc == INDEX_op_call) {
3949             nb_oargs = TCGOP_CALLO(op);
3950             nb_iargs = TCGOP_CALLI(op);
3951             call_flags = tcg_call_flags(op);
3952         } else {
3953             nb_iargs = def->nb_iargs;
3954             nb_oargs = def->nb_oargs;
3955 
3956             /* Set flags similar to how calls require.  */
3957             if (def->flags & TCG_OPF_COND_BRANCH) {
3958                 /* Like reading globals: sync_globals */
3959                 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3960             } else if (def->flags & TCG_OPF_BB_END) {
3961                 /* Like writing globals: save_globals */
3962                 call_flags = 0;
3963             } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3964                 /* Like reading globals: sync_globals */
3965                 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
3966             } else {
3967                 /* No effect on globals.  */
3968                 call_flags = (TCG_CALL_NO_READ_GLOBALS |
3969                               TCG_CALL_NO_WRITE_GLOBALS);
3970             }
3971         }
3972 
3973         /* Make sure that input arguments are available.  */
3974         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3975             arg_ts = arg_temp(op->args[i]);
3976             dir_ts = arg_ts->state_ptr;
3977             if (dir_ts && arg_ts->state == TS_DEAD) {
3978                 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
3979                                   ? INDEX_op_ld_i32
3980                                   : INDEX_op_ld_i64);
3981                 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
3982 
3983                 lop->args[0] = temp_arg(dir_ts);
3984                 lop->args[1] = temp_arg(arg_ts->mem_base);
3985                 lop->args[2] = arg_ts->mem_offset;
3986 
3987                 /* Loaded, but synced with memory.  */
3988                 arg_ts->state = TS_MEM;
3989             }
3990         }
3991 
3992         /* Perform input replacement, and mark inputs that became dead.
3993            No action is required except keeping temp_state up to date
3994            so that we reload when needed.  */
3995         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3996             arg_ts = arg_temp(op->args[i]);
3997             dir_ts = arg_ts->state_ptr;
3998             if (dir_ts) {
3999                 op->args[i] = temp_arg(dir_ts);
4000                 changes = true;
4001                 if (IS_DEAD_ARG(i)) {
4002                     arg_ts->state = TS_DEAD;
4003                 }
4004             }
4005         }
4006 
4007         /* Liveness analysis should ensure that the following are
4008            all correct, for call sites and basic block end points.  */
4009         if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
4010             /* Nothing to do */
4011         } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
4012             for (i = 0; i < nb_globals; ++i) {
4013                 /* Liveness should see that globals are synced back,
4014                    that is, either TS_DEAD or TS_MEM.  */
4015                 arg_ts = &s->temps[i];
4016                 tcg_debug_assert(arg_ts->state_ptr == 0
4017                                  || arg_ts->state != 0);
4018             }
4019         } else {
4020             for (i = 0; i < nb_globals; ++i) {
4021                 /* Liveness should see that globals are saved back,
4022                    that is, TS_DEAD, waiting to be reloaded.  */
4023                 arg_ts = &s->temps[i];
4024                 tcg_debug_assert(arg_ts->state_ptr == 0
4025                                  || arg_ts->state == TS_DEAD);
4026             }
4027         }
4028 
4029         /* Outputs become available.  */
4030         if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
4031             arg_ts = arg_temp(op->args[0]);
4032             dir_ts = arg_ts->state_ptr;
4033             if (dir_ts) {
4034                 op->args[0] = temp_arg(dir_ts);
4035                 changes = true;
4036 
4037                 /* The output is now live and modified.  */
4038                 arg_ts->state = 0;
4039 
4040                 if (NEED_SYNC_ARG(0)) {
4041                     TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
4042                                       ? INDEX_op_st_i32
4043                                       : INDEX_op_st_i64);
4044                     TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
4045                     TCGTemp *out_ts = dir_ts;
4046 
4047                     if (IS_DEAD_ARG(0)) {
4048                         out_ts = arg_temp(op->args[1]);
4049                         arg_ts->state = TS_DEAD;
4050                         tcg_op_remove(s, op);
4051                     } else {
4052                         arg_ts->state = TS_MEM;
4053                     }
4054 
4055                     sop->args[0] = temp_arg(out_ts);
4056                     sop->args[1] = temp_arg(arg_ts->mem_base);
4057                     sop->args[2] = arg_ts->mem_offset;
4058                 } else {
4059                     tcg_debug_assert(!IS_DEAD_ARG(0));
4060                 }
4061             }
4062         } else {
4063             for (i = 0; i < nb_oargs; i++) {
4064                 arg_ts = arg_temp(op->args[i]);
4065                 dir_ts = arg_ts->state_ptr;
4066                 if (!dir_ts) {
4067                     continue;
4068                 }
4069                 op->args[i] = temp_arg(dir_ts);
4070                 changes = true;
4071 
4072                 /* The output is now live and modified.  */
4073                 arg_ts->state = 0;
4074 
4075                 /* Sync outputs upon their last write.  */
4076                 if (NEED_SYNC_ARG(i)) {
4077                     TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
4078                                       ? INDEX_op_st_i32
4079                                       : INDEX_op_st_i64);
4080                     TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
4081 
4082                     sop->args[0] = temp_arg(dir_ts);
4083                     sop->args[1] = temp_arg(arg_ts->mem_base);
4084                     sop->args[2] = arg_ts->mem_offset;
4085 
4086                     arg_ts->state = TS_MEM;
4087                 }
4088                 /* Drop outputs that are dead.  */
4089                 if (IS_DEAD_ARG(i)) {
4090                     arg_ts->state = TS_DEAD;
4091                 }
4092             }
4093         }
4094     }
4095 
4096     return changes;
4097 }
4098 
4099 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
4100 {
4101     intptr_t off;
4102     int size, align;
4103 
4104     /* When allocating an object, look at the full type. */
4105     size = tcg_type_size(ts->base_type);
4106     switch (ts->base_type) {
4107     case TCG_TYPE_I32:
4108         align = 4;
4109         break;
4110     case TCG_TYPE_I64:
4111     case TCG_TYPE_V64:
4112         align = 8;
4113         break;
4114     case TCG_TYPE_I128:
4115     case TCG_TYPE_V128:
4116     case TCG_TYPE_V256:
4117         /*
4118          * Note that we do not require aligned storage for V256,
4119          * and that we provide alignment for I128 to match V128,
4120          * even if that's above what the host ABI requires.
4121          */
4122         align = 16;
4123         break;
4124     default:
4125         g_assert_not_reached();
4126     }
4127 
4128     /*
4129      * Assume the stack is sufficiently aligned.
4130      * This affects e.g. ARM NEON, where we have 8 byte stack alignment
4131      * and do not require 16 byte vector alignment.  This seems slightly
4132      * easier than fully parameterizing the above switch statement.
4133      */
4134     align = MIN(TCG_TARGET_STACK_ALIGN, align);
4135     off = ROUND_UP(s->current_frame_offset, align);
4136 
4137     /* If we've exhausted the stack frame, restart with a smaller TB. */
4138     if (off + size > s->frame_end) {
4139         tcg_raise_tb_overflow(s);
4140     }
4141     s->current_frame_offset = off + size;
4142 #if defined(__sparc__)
4143     off += TCG_TARGET_STACK_BIAS;
4144 #endif
4145 
4146     /* If the object was subdivided, assign memory to all the parts. */
4147     if (ts->base_type != ts->type) {
4148         int part_size = tcg_type_size(ts->type);
4149         int part_count = size / part_size;
4150 
4151         /*
4152          * Each part is allocated sequentially in tcg_temp_new_internal.
4153          * Jump back to the first part by subtracting the current index.
4154          */
4155         ts -= ts->temp_subindex;
4156         for (int i = 0; i < part_count; ++i) {
4157             ts[i].mem_offset = off + i * part_size;
4158             ts[i].mem_base = s->frame_temp;
4159             ts[i].mem_allocated = 1;
4160         }
4161     } else {
4162         ts->mem_offset = off;
4163         ts->mem_base = s->frame_temp;
4164         ts->mem_allocated = 1;
4165     }
4166 }
4167 
4168 /* Assign @reg to @ts, and update reg_to_temp[]. */
4169 static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg)
4170 {
4171     if (ts->val_type == TEMP_VAL_REG) {
4172         TCGReg old = ts->reg;
4173         tcg_debug_assert(s->reg_to_temp[old] == ts);
4174         if (old == reg) {
4175             return;
4176         }
4177         s->reg_to_temp[old] = NULL;
4178     }
4179     tcg_debug_assert(s->reg_to_temp[reg] == NULL);
4180     s->reg_to_temp[reg] = ts;
4181     ts->val_type = TEMP_VAL_REG;
4182     ts->reg = reg;
4183 }
4184 
4185 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */
4186 static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type)
4187 {
4188     tcg_debug_assert(type != TEMP_VAL_REG);
4189     if (ts->val_type == TEMP_VAL_REG) {
4190         TCGReg reg = ts->reg;
4191         tcg_debug_assert(s->reg_to_temp[reg] == ts);
4192         s->reg_to_temp[reg] = NULL;
4193     }
4194     ts->val_type = type;
4195 }
4196 
4197 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
4198 
4199 /* Mark a temporary as free or dead.  If 'free_or_dead' is negative,
4200    mark it free; otherwise mark it dead.  */
4201 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
4202 {
4203     TCGTempVal new_type;
4204 
4205     switch (ts->kind) {
4206     case TEMP_FIXED:
4207         return;
4208     case TEMP_GLOBAL:
4209     case TEMP_TB:
4210         new_type = TEMP_VAL_MEM;
4211         break;
4212     case TEMP_EBB:
4213         new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
4214         break;
4215     case TEMP_CONST:
4216         new_type = TEMP_VAL_CONST;
4217         break;
4218     default:
4219         g_assert_not_reached();
4220     }
4221     set_temp_val_nonreg(s, ts, new_type);
4222 }
4223 
4224 /* Mark a temporary as dead.  */
4225 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
4226 {
4227     temp_free_or_dead(s, ts, 1);
4228 }
4229 
4230 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
4231    registers needs to be allocated to store a constant.  If 'free_or_dead'
4232    is non-zero, subsequently release the temporary; if it is positive, the
4233    temp is dead; if it is negative, the temp is free.  */
4234 static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
4235                       TCGRegSet preferred_regs, int free_or_dead)
4236 {
4237     if (!temp_readonly(ts) && !ts->mem_coherent) {
4238         if (!ts->mem_allocated) {
4239             temp_allocate_frame(s, ts);
4240         }
4241         switch (ts->val_type) {
4242         case TEMP_VAL_CONST:
4243             /* If we're going to free the temp immediately, then we won't
4244                require it later in a register, so attempt to store the
4245                constant to memory directly.  */
4246             if (free_or_dead
4247                 && tcg_out_sti(s, ts->type, ts->val,
4248                                ts->mem_base->reg, ts->mem_offset)) {
4249                 break;
4250             }
4251             temp_load(s, ts, tcg_target_available_regs[ts->type],
4252                       allocated_regs, preferred_regs);
4253             /* fallthrough */
4254 
4255         case TEMP_VAL_REG:
4256             tcg_out_st(s, ts->type, ts->reg,
4257                        ts->mem_base->reg, ts->mem_offset);
4258             break;
4259 
4260         case TEMP_VAL_MEM:
4261             break;
4262 
4263         case TEMP_VAL_DEAD:
4264         default:
4265             g_assert_not_reached();
4266         }
4267         ts->mem_coherent = 1;
4268     }
4269     if (free_or_dead) {
4270         temp_free_or_dead(s, ts, free_or_dead);
4271     }
4272 }
4273 
4274 /* free register 'reg' by spilling the corresponding temporary if necessary */
4275 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
4276 {
4277     TCGTemp *ts = s->reg_to_temp[reg];
4278     if (ts != NULL) {
4279         temp_sync(s, ts, allocated_regs, 0, -1);
4280     }
4281 }
4282 
4283 /**
4284  * tcg_reg_alloc:
4285  * @required_regs: Set of registers in which we must allocate.
4286  * @allocated_regs: Set of registers which must be avoided.
4287  * @preferred_regs: Set of registers we should prefer.
4288  * @rev: True if we search the registers in "indirect" order.
4289  *
4290  * The allocated register must be in @required_regs & ~@allocated_regs,
4291  * but if we can put it in @preferred_regs we may save a move later.
4292  */
4293 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
4294                             TCGRegSet allocated_regs,
4295                             TCGRegSet preferred_regs, bool rev)
4296 {
4297     int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
4298     TCGRegSet reg_ct[2];
4299     const int *order;
4300 
4301     reg_ct[1] = required_regs & ~allocated_regs;
4302     tcg_debug_assert(reg_ct[1] != 0);
4303     reg_ct[0] = reg_ct[1] & preferred_regs;
4304 
4305     /* Skip the preferred_regs option if it cannot be satisfied,
4306        or if the preference made no difference.  */
4307     f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
4308 
4309     order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
4310 
4311     /* Try free registers, preferences first.  */
4312     for (j = f; j < 2; j++) {
4313         TCGRegSet set = reg_ct[j];
4314 
4315         if (tcg_regset_single(set)) {
4316             /* One register in the set.  */
4317             TCGReg reg = tcg_regset_first(set);
4318             if (s->reg_to_temp[reg] == NULL) {
4319                 return reg;
4320             }
4321         } else {
4322             for (i = 0; i < n; i++) {
4323                 TCGReg reg = order[i];
4324                 if (s->reg_to_temp[reg] == NULL &&
4325                     tcg_regset_test_reg(set, reg)) {
4326                     return reg;
4327                 }
4328             }
4329         }
4330     }
4331 
4332     /* We must spill something.  */
4333     for (j = f; j < 2; j++) {
4334         TCGRegSet set = reg_ct[j];
4335 
4336         if (tcg_regset_single(set)) {
4337             /* One register in the set.  */
4338             TCGReg reg = tcg_regset_first(set);
4339             tcg_reg_free(s, reg, allocated_regs);
4340             return reg;
4341         } else {
4342             for (i = 0; i < n; i++) {
4343                 TCGReg reg = order[i];
4344                 if (tcg_regset_test_reg(set, reg)) {
4345                     tcg_reg_free(s, reg, allocated_regs);
4346                     return reg;
4347                 }
4348             }
4349         }
4350     }
4351 
4352     g_assert_not_reached();
4353 }
4354 
4355 static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
4356                                  TCGRegSet allocated_regs,
4357                                  TCGRegSet preferred_regs, bool rev)
4358 {
4359     int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
4360     TCGRegSet reg_ct[2];
4361     const int *order;
4362 
4363     /* Ensure that if I is not in allocated_regs, I+1 is not either. */
4364     reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1));
4365     tcg_debug_assert(reg_ct[1] != 0);
4366     reg_ct[0] = reg_ct[1] & preferred_regs;
4367 
4368     order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
4369 
4370     /*
4371      * Skip the preferred_regs option if it cannot be satisfied,
4372      * or if the preference made no difference.
4373      */
4374     k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
4375 
4376     /*
4377      * Minimize the number of flushes by looking for 2 free registers first,
4378      * then a single flush, then two flushes.
4379      */
4380     for (fmin = 2; fmin >= 0; fmin--) {
4381         for (j = k; j < 2; j++) {
4382             TCGRegSet set = reg_ct[j];
4383 
4384             for (i = 0; i < n; i++) {
4385                 TCGReg reg = order[i];
4386 
4387                 if (tcg_regset_test_reg(set, reg)) {
4388                     int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1];
4389                     if (f >= fmin) {
4390                         tcg_reg_free(s, reg, allocated_regs);
4391                         tcg_reg_free(s, reg + 1, allocated_regs);
4392                         return reg;
4393                     }
4394                 }
4395             }
4396         }
4397     }
4398     g_assert_not_reached();
4399 }
4400 
4401 /* Make sure the temporary is in a register.  If needed, allocate the register
4402    from DESIRED while avoiding ALLOCATED.  */
4403 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
4404                       TCGRegSet allocated_regs, TCGRegSet preferred_regs)
4405 {
4406     TCGReg reg;
4407 
4408     switch (ts->val_type) {
4409     case TEMP_VAL_REG:
4410         return;
4411     case TEMP_VAL_CONST:
4412         reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
4413                             preferred_regs, ts->indirect_base);
4414         if (ts->type <= TCG_TYPE_I64) {
4415             tcg_out_movi(s, ts->type, reg, ts->val);
4416         } else {
4417             uint64_t val = ts->val;
4418             MemOp vece = MO_64;
4419 
4420             /*
4421              * Find the minimal vector element that matches the constant.
4422              * The targets will, in general, have to do this search anyway,
4423              * do this generically.
4424              */
4425             if (val == dup_const(MO_8, val)) {
4426                 vece = MO_8;
4427             } else if (val == dup_const(MO_16, val)) {
4428                 vece = MO_16;
4429             } else if (val == dup_const(MO_32, val)) {
4430                 vece = MO_32;
4431             }
4432 
4433             tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
4434         }
4435         ts->mem_coherent = 0;
4436         break;
4437     case TEMP_VAL_MEM:
4438         reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
4439                             preferred_regs, ts->indirect_base);
4440         tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
4441         ts->mem_coherent = 1;
4442         break;
4443     case TEMP_VAL_DEAD:
4444     default:
4445         g_assert_not_reached();
4446     }
4447     set_temp_val_reg(s, ts, reg);
4448 }
4449 
4450 /* Save a temporary to memory. 'allocated_regs' is used in case a
4451    temporary registers needs to be allocated to store a constant.  */
4452 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
4453 {
4454     /* The liveness analysis already ensures that globals are back
4455        in memory. Keep an tcg_debug_assert for safety. */
4456     tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
4457 }
4458 
4459 /* save globals to their canonical location and assume they can be
4460    modified be the following code. 'allocated_regs' is used in case a
4461    temporary registers needs to be allocated to store a constant. */
4462 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
4463 {
4464     int i, n;
4465 
4466     for (i = 0, n = s->nb_globals; i < n; i++) {
4467         temp_save(s, &s->temps[i], allocated_regs);
4468     }
4469 }
4470 
4471 /* sync globals to their canonical location and assume they can be
4472    read by the following code. 'allocated_regs' is used in case a
4473    temporary registers needs to be allocated to store a constant. */
4474 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
4475 {
4476     int i, n;
4477 
4478     for (i = 0, n = s->nb_globals; i < n; i++) {
4479         TCGTemp *ts = &s->temps[i];
4480         tcg_debug_assert(ts->val_type != TEMP_VAL_REG
4481                          || ts->kind == TEMP_FIXED
4482                          || ts->mem_coherent);
4483     }
4484 }
4485 
4486 /* at the end of a basic block, we assume all temporaries are dead and
4487    all globals are stored at their canonical location. */
4488 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
4489 {
4490     int i;
4491 
4492     for (i = s->nb_globals; i < s->nb_temps; i++) {
4493         TCGTemp *ts = &s->temps[i];
4494 
4495         switch (ts->kind) {
4496         case TEMP_TB:
4497             temp_save(s, ts, allocated_regs);
4498             break;
4499         case TEMP_EBB:
4500             /* The liveness analysis already ensures that temps are dead.
4501                Keep an tcg_debug_assert for safety. */
4502             tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
4503             break;
4504         case TEMP_CONST:
4505             /* Similarly, we should have freed any allocated register. */
4506             tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
4507             break;
4508         default:
4509             g_assert_not_reached();
4510         }
4511     }
4512 
4513     save_globals(s, allocated_regs);
4514 }
4515 
4516 /*
4517  * At a conditional branch, we assume all temporaries are dead unless
4518  * explicitly live-across-conditional-branch; all globals and local
4519  * temps are synced to their location.
4520  */
4521 static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
4522 {
4523     sync_globals(s, allocated_regs);
4524 
4525     for (int i = s->nb_globals; i < s->nb_temps; i++) {
4526         TCGTemp *ts = &s->temps[i];
4527         /*
4528          * The liveness analysis already ensures that temps are dead.
4529          * Keep tcg_debug_asserts for safety.
4530          */
4531         switch (ts->kind) {
4532         case TEMP_TB:
4533             tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
4534             break;
4535         case TEMP_EBB:
4536         case TEMP_CONST:
4537             break;
4538         default:
4539             g_assert_not_reached();
4540         }
4541     }
4542 }
4543 
4544 /*
4545  * Specialized code generation for INDEX_op_mov_* with a constant.
4546  */
4547 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
4548                                   tcg_target_ulong val, TCGLifeData arg_life,
4549                                   TCGRegSet preferred_regs)
4550 {
4551     /* ENV should not be modified.  */
4552     tcg_debug_assert(!temp_readonly(ots));
4553 
4554     /* The movi is not explicitly generated here.  */
4555     set_temp_val_nonreg(s, ots, TEMP_VAL_CONST);
4556     ots->val = val;
4557     ots->mem_coherent = 0;
4558     if (NEED_SYNC_ARG(0)) {
4559         temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
4560     } else if (IS_DEAD_ARG(0)) {
4561         temp_dead(s, ots);
4562     }
4563 }
4564 
4565 /*
4566  * Specialized code generation for INDEX_op_mov_*.
4567  */
4568 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
4569 {
4570     const TCGLifeData arg_life = op->life;
4571     TCGRegSet allocated_regs, preferred_regs;
4572     TCGTemp *ts, *ots;
4573     TCGType otype, itype;
4574     TCGReg oreg, ireg;
4575 
4576     allocated_regs = s->reserved_regs;
4577     preferred_regs = output_pref(op, 0);
4578     ots = arg_temp(op->args[0]);
4579     ts = arg_temp(op->args[1]);
4580 
4581     /* ENV should not be modified.  */
4582     tcg_debug_assert(!temp_readonly(ots));
4583 
4584     /* Note that otype != itype for no-op truncation.  */
4585     otype = ots->type;
4586     itype = ts->type;
4587 
4588     if (ts->val_type == TEMP_VAL_CONST) {
4589         /* propagate constant or generate sti */
4590         tcg_target_ulong val = ts->val;
4591         if (IS_DEAD_ARG(1)) {
4592             temp_dead(s, ts);
4593         }
4594         tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
4595         return;
4596     }
4597 
4598     /* If the source value is in memory we're going to be forced
4599        to have it in a register in order to perform the copy.  Copy
4600        the SOURCE value into its own register first, that way we
4601        don't have to reload SOURCE the next time it is used. */
4602     if (ts->val_type == TEMP_VAL_MEM) {
4603         temp_load(s, ts, tcg_target_available_regs[itype],
4604                   allocated_regs, preferred_regs);
4605     }
4606     tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
4607     ireg = ts->reg;
4608 
4609     if (IS_DEAD_ARG(0)) {
4610         /* mov to a non-saved dead register makes no sense (even with
4611            liveness analysis disabled). */
4612         tcg_debug_assert(NEED_SYNC_ARG(0));
4613         if (!ots->mem_allocated) {
4614             temp_allocate_frame(s, ots);
4615         }
4616         tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset);
4617         if (IS_DEAD_ARG(1)) {
4618             temp_dead(s, ts);
4619         }
4620         temp_dead(s, ots);
4621         return;
4622     }
4623 
4624     if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
4625         /*
4626          * The mov can be suppressed.  Kill input first, so that it
4627          * is unlinked from reg_to_temp, then set the output to the
4628          * reg that we saved from the input.
4629          */
4630         temp_dead(s, ts);
4631         oreg = ireg;
4632     } else {
4633         if (ots->val_type == TEMP_VAL_REG) {
4634             oreg = ots->reg;
4635         } else {
4636             /* Make sure to not spill the input register during allocation. */
4637             oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
4638                                  allocated_regs | ((TCGRegSet)1 << ireg),
4639                                  preferred_regs, ots->indirect_base);
4640         }
4641         if (!tcg_out_mov(s, otype, oreg, ireg)) {
4642             /*
4643              * Cross register class move not supported.
4644              * Store the source register into the destination slot
4645              * and leave the destination temp as TEMP_VAL_MEM.
4646              */
4647             assert(!temp_readonly(ots));
4648             if (!ts->mem_allocated) {
4649                 temp_allocate_frame(s, ots);
4650             }
4651             tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset);
4652             set_temp_val_nonreg(s, ts, TEMP_VAL_MEM);
4653             ots->mem_coherent = 1;
4654             return;
4655         }
4656     }
4657     set_temp_val_reg(s, ots, oreg);
4658     ots->mem_coherent = 0;
4659 
4660     if (NEED_SYNC_ARG(0)) {
4661         temp_sync(s, ots, allocated_regs, 0, 0);
4662     }
4663 }
4664 
4665 /*
4666  * Specialized code generation for INDEX_op_dup_vec.
4667  */
4668 static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
4669 {
4670     const TCGLifeData arg_life = op->life;
4671     TCGRegSet dup_out_regs, dup_in_regs;
4672     TCGTemp *its, *ots;
4673     TCGType itype, vtype;
4674     unsigned vece;
4675     int lowpart_ofs;
4676     bool ok;
4677 
4678     ots = arg_temp(op->args[0]);
4679     its = arg_temp(op->args[1]);
4680 
4681     /* ENV should not be modified.  */
4682     tcg_debug_assert(!temp_readonly(ots));
4683 
4684     itype = its->type;
4685     vece = TCGOP_VECE(op);
4686     vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
4687 
4688     if (its->val_type == TEMP_VAL_CONST) {
4689         /* Propagate constant via movi -> dupi.  */
4690         tcg_target_ulong val = its->val;
4691         if (IS_DEAD_ARG(1)) {
4692             temp_dead(s, its);
4693         }
4694         tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0));
4695         return;
4696     }
4697 
4698     dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
4699     dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
4700 
4701     /* Allocate the output register now.  */
4702     if (ots->val_type != TEMP_VAL_REG) {
4703         TCGRegSet allocated_regs = s->reserved_regs;
4704         TCGReg oreg;
4705 
4706         if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
4707             /* Make sure to not spill the input register. */
4708             tcg_regset_set_reg(allocated_regs, its->reg);
4709         }
4710         oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
4711                              output_pref(op, 0), ots->indirect_base);
4712         set_temp_val_reg(s, ots, oreg);
4713     }
4714 
4715     switch (its->val_type) {
4716     case TEMP_VAL_REG:
4717         /*
4718          * The dup constriaints must be broad, covering all possible VECE.
4719          * However, tcg_op_dup_vec() gets to see the VECE and we allow it
4720          * to fail, indicating that extra moves are required for that case.
4721          */
4722         if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
4723             if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
4724                 goto done;
4725             }
4726             /* Try again from memory or a vector input register.  */
4727         }
4728         if (!its->mem_coherent) {
4729             /*
4730              * The input register is not synced, and so an extra store
4731              * would be required to use memory.  Attempt an integer-vector
4732              * register move first.  We do not have a TCGRegSet for this.
4733              */
4734             if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
4735                 break;
4736             }
4737             /* Sync the temp back to its slot and load from there.  */
4738             temp_sync(s, its, s->reserved_regs, 0, 0);
4739         }
4740         /* fall through */
4741 
4742     case TEMP_VAL_MEM:
4743         lowpart_ofs = 0;
4744         if (HOST_BIG_ENDIAN) {
4745             lowpart_ofs = tcg_type_size(itype) - (1 << vece);
4746         }
4747         if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
4748                              its->mem_offset + lowpart_ofs)) {
4749             goto done;
4750         }
4751         /* Load the input into the destination vector register. */
4752         tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
4753         break;
4754 
4755     default:
4756         g_assert_not_reached();
4757     }
4758 
4759     /* We now have a vector input register, so dup must succeed. */
4760     ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
4761     tcg_debug_assert(ok);
4762 
4763  done:
4764     ots->mem_coherent = 0;
4765     if (IS_DEAD_ARG(1)) {
4766         temp_dead(s, its);
4767     }
4768     if (NEED_SYNC_ARG(0)) {
4769         temp_sync(s, ots, s->reserved_regs, 0, 0);
4770     }
4771     if (IS_DEAD_ARG(0)) {
4772         temp_dead(s, ots);
4773     }
4774 }
4775 
4776 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
4777 {
4778     const TCGLifeData arg_life = op->life;
4779     const TCGOpDef * const def = &tcg_op_defs[op->opc];
4780     TCGRegSet i_allocated_regs;
4781     TCGRegSet o_allocated_regs;
4782     int i, k, nb_iargs, nb_oargs;
4783     TCGReg reg;
4784     TCGArg arg;
4785     const TCGArgConstraint *arg_ct;
4786     TCGTemp *ts;
4787     TCGArg new_args[TCG_MAX_OP_ARGS];
4788     int const_args[TCG_MAX_OP_ARGS];
4789 
4790     nb_oargs = def->nb_oargs;
4791     nb_iargs = def->nb_iargs;
4792 
4793     /* copy constants */
4794     memcpy(new_args + nb_oargs + nb_iargs,
4795            op->args + nb_oargs + nb_iargs,
4796            sizeof(TCGArg) * def->nb_cargs);
4797 
4798     i_allocated_regs = s->reserved_regs;
4799     o_allocated_regs = s->reserved_regs;
4800 
4801     /* satisfy input constraints */
4802     for (k = 0; k < nb_iargs; k++) {
4803         TCGRegSet i_preferred_regs, i_required_regs;
4804         bool allocate_new_reg, copyto_new_reg;
4805         TCGTemp *ts2;
4806         int i1, i2;
4807 
4808         i = def->args_ct[nb_oargs + k].sort_index;
4809         arg = op->args[i];
4810         arg_ct = &def->args_ct[i];
4811         ts = arg_temp(arg);
4812 
4813         if (ts->val_type == TEMP_VAL_CONST
4814             && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
4815             /* constant is OK for instruction */
4816             const_args[i] = 1;
4817             new_args[i] = ts->val;
4818             continue;
4819         }
4820 
4821         reg = ts->reg;
4822         i_preferred_regs = 0;
4823         i_required_regs = arg_ct->regs;
4824         allocate_new_reg = false;
4825         copyto_new_reg = false;
4826 
4827         switch (arg_ct->pair) {
4828         case 0: /* not paired */
4829             if (arg_ct->ialias) {
4830                 i_preferred_regs = output_pref(op, arg_ct->alias_index);
4831 
4832                 /*
4833                  * If the input is readonly, then it cannot also be an
4834                  * output and aliased to itself.  If the input is not
4835                  * dead after the instruction, we must allocate a new
4836                  * register and move it.
4837                  */
4838                 if (temp_readonly(ts) || !IS_DEAD_ARG(i)
4839                     || def->args_ct[arg_ct->alias_index].newreg) {
4840                     allocate_new_reg = true;
4841                 } else if (ts->val_type == TEMP_VAL_REG) {
4842                     /*
4843                      * Check if the current register has already been
4844                      * allocated for another input.
4845                      */
4846                     allocate_new_reg =
4847                         tcg_regset_test_reg(i_allocated_regs, reg);
4848                 }
4849             }
4850             if (!allocate_new_reg) {
4851                 temp_load(s, ts, i_required_regs, i_allocated_regs,
4852                           i_preferred_regs);
4853                 reg = ts->reg;
4854                 allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg);
4855             }
4856             if (allocate_new_reg) {
4857                 /*
4858                  * Allocate a new register matching the constraint
4859                  * and move the temporary register into it.
4860                  */
4861                 temp_load(s, ts, tcg_target_available_regs[ts->type],
4862                           i_allocated_regs, 0);
4863                 reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs,
4864                                     i_preferred_regs, ts->indirect_base);
4865                 copyto_new_reg = true;
4866             }
4867             break;
4868 
4869         case 1:
4870             /* First of an input pair; if i1 == i2, the second is an output. */
4871             i1 = i;
4872             i2 = arg_ct->pair_index;
4873             ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL;
4874 
4875             /*
4876              * It is easier to default to allocating a new pair
4877              * and to identify a few cases where it's not required.
4878              */
4879             if (arg_ct->ialias) {
4880                 i_preferred_regs = output_pref(op, arg_ct->alias_index);
4881                 if (IS_DEAD_ARG(i1) &&
4882                     IS_DEAD_ARG(i2) &&
4883                     !temp_readonly(ts) &&
4884                     ts->val_type == TEMP_VAL_REG &&
4885                     ts->reg < TCG_TARGET_NB_REGS - 1 &&
4886                     tcg_regset_test_reg(i_required_regs, reg) &&
4887                     !tcg_regset_test_reg(i_allocated_regs, reg) &&
4888                     !tcg_regset_test_reg(i_allocated_regs, reg + 1) &&
4889                     (ts2
4890                      ? ts2->val_type == TEMP_VAL_REG &&
4891                        ts2->reg == reg + 1 &&
4892                        !temp_readonly(ts2)
4893                      : s->reg_to_temp[reg + 1] == NULL)) {
4894                     break;
4895                 }
4896             } else {
4897                 /* Without aliasing, the pair must also be an input. */
4898                 tcg_debug_assert(ts2);
4899                 if (ts->val_type == TEMP_VAL_REG &&
4900                     ts2->val_type == TEMP_VAL_REG &&
4901                     ts2->reg == reg + 1 &&
4902                     tcg_regset_test_reg(i_required_regs, reg)) {
4903                     break;
4904                 }
4905             }
4906             reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs,
4907                                      0, ts->indirect_base);
4908             goto do_pair;
4909 
4910         case 2: /* pair second */
4911             reg = new_args[arg_ct->pair_index] + 1;
4912             goto do_pair;
4913 
4914         case 3: /* ialias with second output, no first input */
4915             tcg_debug_assert(arg_ct->ialias);
4916             i_preferred_regs = output_pref(op, arg_ct->alias_index);
4917 
4918             if (IS_DEAD_ARG(i) &&
4919                 !temp_readonly(ts) &&
4920                 ts->val_type == TEMP_VAL_REG &&
4921                 reg > 0 &&
4922                 s->reg_to_temp[reg - 1] == NULL &&
4923                 tcg_regset_test_reg(i_required_regs, reg) &&
4924                 !tcg_regset_test_reg(i_allocated_regs, reg) &&
4925                 !tcg_regset_test_reg(i_allocated_regs, reg - 1)) {
4926                 tcg_regset_set_reg(i_allocated_regs, reg - 1);
4927                 break;
4928             }
4929             reg = tcg_reg_alloc_pair(s, i_required_regs >> 1,
4930                                      i_allocated_regs, 0,
4931                                      ts->indirect_base);
4932             tcg_regset_set_reg(i_allocated_regs, reg);
4933             reg += 1;
4934             goto do_pair;
4935 
4936         do_pair:
4937             /*
4938              * If an aliased input is not dead after the instruction,
4939              * we must allocate a new register and move it.
4940              */
4941             if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) {
4942                 TCGRegSet t_allocated_regs = i_allocated_regs;
4943 
4944                 /*
4945                  * Because of the alias, and the continued life, make sure
4946                  * that the temp is somewhere *other* than the reg pair,
4947                  * and we get a copy in reg.
4948                  */
4949                 tcg_regset_set_reg(t_allocated_regs, reg);
4950                 tcg_regset_set_reg(t_allocated_regs, reg + 1);
4951                 if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) {
4952                     /* If ts was already in reg, copy it somewhere else. */
4953                     TCGReg nr;
4954                     bool ok;
4955 
4956                     tcg_debug_assert(ts->kind != TEMP_FIXED);
4957                     nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
4958                                        t_allocated_regs, 0, ts->indirect_base);
4959                     ok = tcg_out_mov(s, ts->type, nr, reg);
4960                     tcg_debug_assert(ok);
4961 
4962                     set_temp_val_reg(s, ts, nr);
4963                 } else {
4964                     temp_load(s, ts, tcg_target_available_regs[ts->type],
4965                               t_allocated_regs, 0);
4966                     copyto_new_reg = true;
4967                 }
4968             } else {
4969                 /* Preferably allocate to reg, otherwise copy. */
4970                 i_required_regs = (TCGRegSet)1 << reg;
4971                 temp_load(s, ts, i_required_regs, i_allocated_regs,
4972                           i_preferred_regs);
4973                 copyto_new_reg = ts->reg != reg;
4974             }
4975             break;
4976 
4977         default:
4978             g_assert_not_reached();
4979         }
4980 
4981         if (copyto_new_reg) {
4982             if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
4983                 /*
4984                  * Cross register class move not supported.  Sync the
4985                  * temp back to its slot and load from there.
4986                  */
4987                 temp_sync(s, ts, i_allocated_regs, 0, 0);
4988                 tcg_out_ld(s, ts->type, reg,
4989                            ts->mem_base->reg, ts->mem_offset);
4990             }
4991         }
4992         new_args[i] = reg;
4993         const_args[i] = 0;
4994         tcg_regset_set_reg(i_allocated_regs, reg);
4995     }
4996 
4997     /* mark dead temporaries and free the associated registers */
4998     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
4999         if (IS_DEAD_ARG(i)) {
5000             temp_dead(s, arg_temp(op->args[i]));
5001         }
5002     }
5003 
5004     if (def->flags & TCG_OPF_COND_BRANCH) {
5005         tcg_reg_alloc_cbranch(s, i_allocated_regs);
5006     } else if (def->flags & TCG_OPF_BB_END) {
5007         tcg_reg_alloc_bb_end(s, i_allocated_regs);
5008     } else {
5009         if (def->flags & TCG_OPF_CALL_CLOBBER) {
5010             /* XXX: permit generic clobber register list ? */
5011             for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
5012                 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
5013                     tcg_reg_free(s, i, i_allocated_regs);
5014                 }
5015             }
5016         }
5017         if (def->flags & TCG_OPF_SIDE_EFFECTS) {
5018             /* sync globals if the op has side effects and might trigger
5019                an exception. */
5020             sync_globals(s, i_allocated_regs);
5021         }
5022 
5023         /* satisfy the output constraints */
5024         for(k = 0; k < nb_oargs; k++) {
5025             i = def->args_ct[k].sort_index;
5026             arg = op->args[i];
5027             arg_ct = &def->args_ct[i];
5028             ts = arg_temp(arg);
5029 
5030             /* ENV should not be modified.  */
5031             tcg_debug_assert(!temp_readonly(ts));
5032 
5033             switch (arg_ct->pair) {
5034             case 0: /* not paired */
5035                 if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
5036                     reg = new_args[arg_ct->alias_index];
5037                 } else if (arg_ct->newreg) {
5038                     reg = tcg_reg_alloc(s, arg_ct->regs,
5039                                         i_allocated_regs | o_allocated_regs,
5040                                         output_pref(op, k), ts->indirect_base);
5041                 } else {
5042                     reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
5043                                         output_pref(op, k), ts->indirect_base);
5044                 }
5045                 break;
5046 
5047             case 1: /* first of pair */
5048                 if (arg_ct->oalias) {
5049                     reg = new_args[arg_ct->alias_index];
5050                 } else if (arg_ct->newreg) {
5051                     reg = tcg_reg_alloc_pair(s, arg_ct->regs,
5052                                              i_allocated_regs | o_allocated_regs,
5053                                              output_pref(op, k),
5054                                              ts->indirect_base);
5055                 } else {
5056                     reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
5057                                              output_pref(op, k),
5058                                              ts->indirect_base);
5059                 }
5060                 break;
5061 
5062             case 2: /* second of pair */
5063                 if (arg_ct->oalias) {
5064                     reg = new_args[arg_ct->alias_index];
5065                 } else {
5066                     reg = new_args[arg_ct->pair_index] + 1;
5067                 }
5068                 break;
5069 
5070             case 3: /* first of pair, aliasing with a second input */
5071                 tcg_debug_assert(!arg_ct->newreg);
5072                 reg = new_args[arg_ct->pair_index] - 1;
5073                 break;
5074 
5075             default:
5076                 g_assert_not_reached();
5077             }
5078             tcg_regset_set_reg(o_allocated_regs, reg);
5079             set_temp_val_reg(s, ts, reg);
5080             ts->mem_coherent = 0;
5081             new_args[i] = reg;
5082         }
5083     }
5084 
5085     /* emit instruction */
5086     switch (op->opc) {
5087     case INDEX_op_ext8s_i32:
5088         tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
5089         break;
5090     case INDEX_op_ext8s_i64:
5091         tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
5092         break;
5093     case INDEX_op_ext8u_i32:
5094     case INDEX_op_ext8u_i64:
5095         tcg_out_ext8u(s, new_args[0], new_args[1]);
5096         break;
5097     case INDEX_op_ext16s_i32:
5098         tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
5099         break;
5100     case INDEX_op_ext16s_i64:
5101         tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
5102         break;
5103     case INDEX_op_ext16u_i32:
5104     case INDEX_op_ext16u_i64:
5105         tcg_out_ext16u(s, new_args[0], new_args[1]);
5106         break;
5107     case INDEX_op_ext32s_i64:
5108         tcg_out_ext32s(s, new_args[0], new_args[1]);
5109         break;
5110     case INDEX_op_ext32u_i64:
5111         tcg_out_ext32u(s, new_args[0], new_args[1]);
5112         break;
5113     case INDEX_op_ext_i32_i64:
5114         tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
5115         break;
5116     case INDEX_op_extu_i32_i64:
5117         tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
5118         break;
5119     case INDEX_op_extrl_i64_i32:
5120         tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
5121         break;
5122     default:
5123         if (def->flags & TCG_OPF_VECTOR) {
5124             tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
5125                            new_args, const_args);
5126         } else {
5127             tcg_out_op(s, op->opc, new_args, const_args);
5128         }
5129         break;
5130     }
5131 
5132     /* move the outputs in the correct register if needed */
5133     for(i = 0; i < nb_oargs; i++) {
5134         ts = arg_temp(op->args[i]);
5135 
5136         /* ENV should not be modified.  */
5137         tcg_debug_assert(!temp_readonly(ts));
5138 
5139         if (NEED_SYNC_ARG(i)) {
5140             temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
5141         } else if (IS_DEAD_ARG(i)) {
5142             temp_dead(s, ts);
5143         }
5144     }
5145 }
5146 
5147 static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
5148 {
5149     const TCGLifeData arg_life = op->life;
5150     TCGTemp *ots, *itsl, *itsh;
5151     TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
5152 
5153     /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
5154     tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
5155     tcg_debug_assert(TCGOP_VECE(op) == MO_64);
5156 
5157     ots = arg_temp(op->args[0]);
5158     itsl = arg_temp(op->args[1]);
5159     itsh = arg_temp(op->args[2]);
5160 
5161     /* ENV should not be modified.  */
5162     tcg_debug_assert(!temp_readonly(ots));
5163 
5164     /* Allocate the output register now.  */
5165     if (ots->val_type != TEMP_VAL_REG) {
5166         TCGRegSet allocated_regs = s->reserved_regs;
5167         TCGRegSet dup_out_regs =
5168             tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
5169         TCGReg oreg;
5170 
5171         /* Make sure to not spill the input registers. */
5172         if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
5173             tcg_regset_set_reg(allocated_regs, itsl->reg);
5174         }
5175         if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
5176             tcg_regset_set_reg(allocated_regs, itsh->reg);
5177         }
5178 
5179         oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
5180                              output_pref(op, 0), ots->indirect_base);
5181         set_temp_val_reg(s, ots, oreg);
5182     }
5183 
5184     /* Promote dup2 of immediates to dupi_vec. */
5185     if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
5186         uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
5187         MemOp vece = MO_64;
5188 
5189         if (val == dup_const(MO_8, val)) {
5190             vece = MO_8;
5191         } else if (val == dup_const(MO_16, val)) {
5192             vece = MO_16;
5193         } else if (val == dup_const(MO_32, val)) {
5194             vece = MO_32;
5195         }
5196 
5197         tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
5198         goto done;
5199     }
5200 
5201     /* If the two inputs form one 64-bit value, try dupm_vec. */
5202     if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
5203         itsh->temp_subindex == !HOST_BIG_ENDIAN &&
5204         itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
5205         TCGTemp *its = itsl - HOST_BIG_ENDIAN;
5206 
5207         temp_sync(s, its + 0, s->reserved_regs, 0, 0);
5208         temp_sync(s, its + 1, s->reserved_regs, 0, 0);
5209 
5210         if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
5211                              its->mem_base->reg, its->mem_offset)) {
5212             goto done;
5213         }
5214     }
5215 
5216     /* Fall back to generic expansion. */
5217     return false;
5218 
5219  done:
5220     ots->mem_coherent = 0;
5221     if (IS_DEAD_ARG(1)) {
5222         temp_dead(s, itsl);
5223     }
5224     if (IS_DEAD_ARG(2)) {
5225         temp_dead(s, itsh);
5226     }
5227     if (NEED_SYNC_ARG(0)) {
5228         temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
5229     } else if (IS_DEAD_ARG(0)) {
5230         temp_dead(s, ots);
5231     }
5232     return true;
5233 }
5234 
5235 static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
5236                          TCGRegSet allocated_regs)
5237 {
5238     if (ts->val_type == TEMP_VAL_REG) {
5239         if (ts->reg != reg) {
5240             tcg_reg_free(s, reg, allocated_regs);
5241             if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
5242                 /*
5243                  * Cross register class move not supported.  Sync the
5244                  * temp back to its slot and load from there.
5245                  */
5246                 temp_sync(s, ts, allocated_regs, 0, 0);
5247                 tcg_out_ld(s, ts->type, reg,
5248                            ts->mem_base->reg, ts->mem_offset);
5249             }
5250         }
5251     } else {
5252         TCGRegSet arg_set = 0;
5253 
5254         tcg_reg_free(s, reg, allocated_regs);
5255         tcg_regset_set_reg(arg_set, reg);
5256         temp_load(s, ts, arg_set, allocated_regs, 0);
5257     }
5258 }
5259 
5260 static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts,
5261                          TCGRegSet allocated_regs)
5262 {
5263     /*
5264      * When the destination is on the stack, load up the temp and store.
5265      * If there are many call-saved registers, the temp might live to
5266      * see another use; otherwise it'll be discarded.
5267      */
5268     temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
5269     tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
5270                arg_slot_stk_ofs(arg_slot));
5271 }
5272 
5273 static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
5274                             TCGTemp *ts, TCGRegSet *allocated_regs)
5275 {
5276     if (arg_slot_reg_p(l->arg_slot)) {
5277         TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
5278         load_arg_reg(s, reg, ts, *allocated_regs);
5279         tcg_regset_set_reg(*allocated_regs, reg);
5280     } else {
5281         load_arg_stk(s, l->arg_slot, ts, *allocated_regs);
5282     }
5283 }
5284 
5285 static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base,
5286                          intptr_t ref_off, TCGRegSet *allocated_regs)
5287 {
5288     TCGReg reg;
5289 
5290     if (arg_slot_reg_p(arg_slot)) {
5291         reg = tcg_target_call_iarg_regs[arg_slot];
5292         tcg_reg_free(s, reg, *allocated_regs);
5293         tcg_out_addi_ptr(s, reg, ref_base, ref_off);
5294         tcg_regset_set_reg(*allocated_regs, reg);
5295     } else {
5296         reg = tcg_reg_alloc(s, tcg_target_available_regs[TCG_TYPE_PTR],
5297                             *allocated_regs, 0, false);
5298         tcg_out_addi_ptr(s, reg, ref_base, ref_off);
5299         tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
5300                    arg_slot_stk_ofs(arg_slot));
5301     }
5302 }
5303 
5304 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
5305 {
5306     const int nb_oargs = TCGOP_CALLO(op);
5307     const int nb_iargs = TCGOP_CALLI(op);
5308     const TCGLifeData arg_life = op->life;
5309     const TCGHelperInfo *info = tcg_call_info(op);
5310     TCGRegSet allocated_regs = s->reserved_regs;
5311     int i;
5312 
5313     /*
5314      * Move inputs into place in reverse order,
5315      * so that we place stacked arguments first.
5316      */
5317     for (i = nb_iargs - 1; i >= 0; --i) {
5318         const TCGCallArgumentLoc *loc = &info->in[i];
5319         TCGTemp *ts = arg_temp(op->args[nb_oargs + i]);
5320 
5321         switch (loc->kind) {
5322         case TCG_CALL_ARG_NORMAL:
5323         case TCG_CALL_ARG_EXTEND_U:
5324         case TCG_CALL_ARG_EXTEND_S:
5325             load_arg_normal(s, loc, ts, &allocated_regs);
5326             break;
5327         case TCG_CALL_ARG_BY_REF:
5328             load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
5329             load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
5330                          arg_slot_stk_ofs(loc->ref_slot),
5331                          &allocated_regs);
5332             break;
5333         case TCG_CALL_ARG_BY_REF_N:
5334             load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
5335             break;
5336         default:
5337             g_assert_not_reached();
5338         }
5339     }
5340 
5341     /* Mark dead temporaries and free the associated registers.  */
5342     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
5343         if (IS_DEAD_ARG(i)) {
5344             temp_dead(s, arg_temp(op->args[i]));
5345         }
5346     }
5347 
5348     /* Clobber call registers.  */
5349     for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
5350         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
5351             tcg_reg_free(s, i, allocated_regs);
5352         }
5353     }
5354 
5355     /*
5356      * Save globals if they might be written by the helper,
5357      * sync them if they might be read.
5358      */
5359     if (info->flags & TCG_CALL_NO_READ_GLOBALS) {
5360         /* Nothing to do */
5361     } else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) {
5362         sync_globals(s, allocated_regs);
5363     } else {
5364         save_globals(s, allocated_regs);
5365     }
5366 
5367     /*
5368      * If the ABI passes a pointer to the returned struct as the first
5369      * argument, load that now.  Pass a pointer to the output home slot.
5370      */
5371     if (info->out_kind == TCG_CALL_RET_BY_REF) {
5372         TCGTemp *ts = arg_temp(op->args[0]);
5373 
5374         if (!ts->mem_allocated) {
5375             temp_allocate_frame(s, ts);
5376         }
5377         load_arg_ref(s, 0, ts->mem_base->reg, ts->mem_offset, &allocated_regs);
5378     }
5379 
5380     tcg_out_call(s, tcg_call_func(op), info);
5381 
5382     /* Assign output registers and emit moves if needed.  */
5383     switch (info->out_kind) {
5384     case TCG_CALL_RET_NORMAL:
5385         for (i = 0; i < nb_oargs; i++) {
5386             TCGTemp *ts = arg_temp(op->args[i]);
5387             TCGReg reg = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, i);
5388 
5389             /* ENV should not be modified.  */
5390             tcg_debug_assert(!temp_readonly(ts));
5391 
5392             set_temp_val_reg(s, ts, reg);
5393             ts->mem_coherent = 0;
5394         }
5395         break;
5396 
5397     case TCG_CALL_RET_BY_VEC:
5398         {
5399             TCGTemp *ts = arg_temp(op->args[0]);
5400 
5401             tcg_debug_assert(ts->base_type == TCG_TYPE_I128);
5402             tcg_debug_assert(ts->temp_subindex == 0);
5403             if (!ts->mem_allocated) {
5404                 temp_allocate_frame(s, ts);
5405             }
5406             tcg_out_st(s, TCG_TYPE_V128,
5407                        tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
5408                        ts->mem_base->reg, ts->mem_offset);
5409         }
5410         /* fall through to mark all parts in memory */
5411 
5412     case TCG_CALL_RET_BY_REF:
5413         /* The callee has performed a write through the reference. */
5414         for (i = 0; i < nb_oargs; i++) {
5415             TCGTemp *ts = arg_temp(op->args[i]);
5416             ts->val_type = TEMP_VAL_MEM;
5417         }
5418         break;
5419 
5420     default:
5421         g_assert_not_reached();
5422     }
5423 
5424     /* Flush or discard output registers as needed. */
5425     for (i = 0; i < nb_oargs; i++) {
5426         TCGTemp *ts = arg_temp(op->args[i]);
5427         if (NEED_SYNC_ARG(i)) {
5428             temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i));
5429         } else if (IS_DEAD_ARG(i)) {
5430             temp_dead(s, ts);
5431         }
5432     }
5433 }
5434 
5435 /**
5436  * atom_and_align_for_opc:
5437  * @s: tcg context
5438  * @opc: memory operation code
5439  * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
5440  * @allow_two_ops: true if we are prepared to issue two operations
5441  *
5442  * Return the alignment and atomicity to use for the inline fast path
5443  * for the given memory operation.  The alignment may be larger than
5444  * that specified in @opc, and the correct alignment will be diagnosed
5445  * by the slow path helper.
5446  *
5447  * If @allow_two_ops, the host is prepared to test for 2x alignment,
5448  * and issue two loads or stores for subalignment.
5449  */
5450 static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
5451                                            MemOp host_atom, bool allow_two_ops)
5452 {
5453     MemOp align = get_alignment_bits(opc);
5454     MemOp size = opc & MO_SIZE;
5455     MemOp half = size ? size - 1 : 0;
5456     MemOp atom = opc & MO_ATOM_MASK;
5457     MemOp atmax;
5458 
5459     switch (atom) {
5460     case MO_ATOM_NONE:
5461         /* The operation requires no specific atomicity. */
5462         atmax = MO_8;
5463         break;
5464 
5465     case MO_ATOM_IFALIGN:
5466         atmax = size;
5467         break;
5468 
5469     case MO_ATOM_IFALIGN_PAIR:
5470         atmax = half;
5471         break;
5472 
5473     case MO_ATOM_WITHIN16:
5474         atmax = size;
5475         if (size == MO_128) {
5476             /* Misalignment implies !within16, and therefore no atomicity. */
5477         } else if (host_atom != MO_ATOM_WITHIN16) {
5478             /* The host does not implement within16, so require alignment. */
5479             align = MAX(align, size);
5480         }
5481         break;
5482 
5483     case MO_ATOM_WITHIN16_PAIR:
5484         atmax = size;
5485         /*
5486          * Misalignment implies !within16, and therefore half atomicity.
5487          * Any host prepared for two operations can implement this with
5488          * half alignment.
5489          */
5490         if (host_atom != MO_ATOM_WITHIN16 && allow_two_ops) {
5491             align = MAX(align, half);
5492         }
5493         break;
5494 
5495     case MO_ATOM_SUBALIGN:
5496         atmax = size;
5497         if (host_atom != MO_ATOM_SUBALIGN) {
5498             /* If unaligned but not odd, there are subobjects up to half. */
5499             if (allow_two_ops) {
5500                 align = MAX(align, half);
5501             } else {
5502                 align = MAX(align, size);
5503             }
5504         }
5505         break;
5506 
5507     default:
5508         g_assert_not_reached();
5509     }
5510 
5511     return (TCGAtomAlign){ .atom = atmax, .align = align };
5512 }
5513 
5514 /*
5515  * Similarly for qemu_ld/st slow path helpers.
5516  * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
5517  * using only the provided backend tcg_out_* functions.
5518  */
5519 
5520 static int tcg_out_helper_stk_ofs(TCGType type, unsigned slot)
5521 {
5522     int ofs = arg_slot_stk_ofs(slot);
5523 
5524     /*
5525      * Each stack slot is TCG_TARGET_LONG_BITS.  If the host does not
5526      * require extension to uint64_t, adjust the address for uint32_t.
5527      */
5528     if (HOST_BIG_ENDIAN &&
5529         TCG_TARGET_REG_BITS == 64 &&
5530         type == TCG_TYPE_I32) {
5531         ofs += 4;
5532     }
5533     return ofs;
5534 }
5535 
5536 static void tcg_out_helper_load_slots(TCGContext *s,
5537                                       unsigned nmov, TCGMovExtend *mov,
5538                                       const TCGLdstHelperParam *parm)
5539 {
5540     unsigned i;
5541     TCGReg dst3;
5542 
5543     /*
5544      * Start from the end, storing to the stack first.
5545      * This frees those registers, so we need not consider overlap.
5546      */
5547     for (i = nmov; i-- > 0; ) {
5548         unsigned slot = mov[i].dst;
5549 
5550         if (arg_slot_reg_p(slot)) {
5551             goto found_reg;
5552         }
5553 
5554         TCGReg src = mov[i].src;
5555         TCGType dst_type = mov[i].dst_type;
5556         MemOp dst_mo = dst_type == TCG_TYPE_I32 ? MO_32 : MO_64;
5557 
5558         /* The argument is going onto the stack; extend into scratch. */
5559         if ((mov[i].src_ext & MO_SIZE) != dst_mo) {
5560             tcg_debug_assert(parm->ntmp != 0);
5561             mov[i].dst = src = parm->tmp[0];
5562             tcg_out_movext1(s, &mov[i]);
5563         }
5564 
5565         tcg_out_st(s, dst_type, src, TCG_REG_CALL_STACK,
5566                    tcg_out_helper_stk_ofs(dst_type, slot));
5567     }
5568     return;
5569 
5570  found_reg:
5571     /*
5572      * The remaining arguments are in registers.
5573      * Convert slot numbers to argument registers.
5574      */
5575     nmov = i + 1;
5576     for (i = 0; i < nmov; ++i) {
5577         mov[i].dst = tcg_target_call_iarg_regs[mov[i].dst];
5578     }
5579 
5580     switch (nmov) {
5581     case 4:
5582         /* The backend must have provided enough temps for the worst case. */
5583         tcg_debug_assert(parm->ntmp >= 2);
5584 
5585         dst3 = mov[3].dst;
5586         for (unsigned j = 0; j < 3; ++j) {
5587             if (dst3 == mov[j].src) {
5588                 /*
5589                  * Conflict. Copy the source to a temporary, perform the
5590                  * remaining moves, then the extension from our scratch
5591                  * on the way out.
5592                  */
5593                 TCGReg scratch = parm->tmp[1];
5594 
5595                 tcg_out_mov(s, mov[3].src_type, scratch, mov[3].src);
5596                 tcg_out_movext3(s, mov, mov + 1, mov + 2, parm->tmp[0]);
5597                 tcg_out_movext1_new_src(s, &mov[3], scratch);
5598                 break;
5599             }
5600         }
5601 
5602         /* No conflicts: perform this move and continue. */
5603         tcg_out_movext1(s, &mov[3]);
5604         /* fall through */
5605 
5606     case 3:
5607         tcg_out_movext3(s, mov, mov + 1, mov + 2,
5608                         parm->ntmp ? parm->tmp[0] : -1);
5609         break;
5610     case 2:
5611         tcg_out_movext2(s, mov, mov + 1,
5612                         parm->ntmp ? parm->tmp[0] : -1);
5613         break;
5614     case 1:
5615         tcg_out_movext1(s, mov);
5616         break;
5617     default:
5618         g_assert_not_reached();
5619     }
5620 }
5621 
5622 static void tcg_out_helper_load_imm(TCGContext *s, unsigned slot,
5623                                     TCGType type, tcg_target_long imm,
5624                                     const TCGLdstHelperParam *parm)
5625 {
5626     if (arg_slot_reg_p(slot)) {
5627         tcg_out_movi(s, type, tcg_target_call_iarg_regs[slot], imm);
5628     } else {
5629         int ofs = tcg_out_helper_stk_ofs(type, slot);
5630         if (!tcg_out_sti(s, type, imm, TCG_REG_CALL_STACK, ofs)) {
5631             tcg_debug_assert(parm->ntmp != 0);
5632             tcg_out_movi(s, type, parm->tmp[0], imm);
5633             tcg_out_st(s, type, parm->tmp[0], TCG_REG_CALL_STACK, ofs);
5634         }
5635     }
5636 }
5637 
5638 static void tcg_out_helper_load_common_args(TCGContext *s,
5639                                             const TCGLabelQemuLdst *ldst,
5640                                             const TCGLdstHelperParam *parm,
5641                                             const TCGHelperInfo *info,
5642                                             unsigned next_arg)
5643 {
5644     TCGMovExtend ptr_mov = {
5645         .dst_type = TCG_TYPE_PTR,
5646         .src_type = TCG_TYPE_PTR,
5647         .src_ext = sizeof(void *) == 4 ? MO_32 : MO_64
5648     };
5649     const TCGCallArgumentLoc *loc = &info->in[0];
5650     TCGType type;
5651     unsigned slot;
5652     tcg_target_ulong imm;
5653 
5654     /*
5655      * Handle env, which is always first.
5656      */
5657     ptr_mov.dst = loc->arg_slot;
5658     ptr_mov.src = TCG_AREG0;
5659     tcg_out_helper_load_slots(s, 1, &ptr_mov, parm);
5660 
5661     /*
5662      * Handle oi.
5663      */
5664     imm = ldst->oi;
5665     loc = &info->in[next_arg];
5666     type = TCG_TYPE_I32;
5667     switch (loc->kind) {
5668     case TCG_CALL_ARG_NORMAL:
5669         break;
5670     case TCG_CALL_ARG_EXTEND_U:
5671     case TCG_CALL_ARG_EXTEND_S:
5672         /* No extension required for MemOpIdx. */
5673         tcg_debug_assert(imm <= INT32_MAX);
5674         type = TCG_TYPE_REG;
5675         break;
5676     default:
5677         g_assert_not_reached();
5678     }
5679     tcg_out_helper_load_imm(s, loc->arg_slot, type, imm, parm);
5680     next_arg++;
5681 
5682     /*
5683      * Handle ra.
5684      */
5685     loc = &info->in[next_arg];
5686     slot = loc->arg_slot;
5687     if (parm->ra_gen) {
5688         int arg_reg = -1;
5689         TCGReg ra_reg;
5690 
5691         if (arg_slot_reg_p(slot)) {
5692             arg_reg = tcg_target_call_iarg_regs[slot];
5693         }
5694         ra_reg = parm->ra_gen(s, ldst, arg_reg);
5695 
5696         ptr_mov.dst = slot;
5697         ptr_mov.src = ra_reg;
5698         tcg_out_helper_load_slots(s, 1, &ptr_mov, parm);
5699     } else {
5700         imm = (uintptr_t)ldst->raddr;
5701         tcg_out_helper_load_imm(s, slot, TCG_TYPE_PTR, imm, parm);
5702     }
5703 }
5704 
5705 static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov,
5706                                        const TCGCallArgumentLoc *loc,
5707                                        TCGType dst_type, TCGType src_type,
5708                                        TCGReg lo, TCGReg hi)
5709 {
5710     MemOp reg_mo;
5711 
5712     if (dst_type <= TCG_TYPE_REG) {
5713         MemOp src_ext;
5714 
5715         switch (loc->kind) {
5716         case TCG_CALL_ARG_NORMAL:
5717             src_ext = src_type == TCG_TYPE_I32 ? MO_32 : MO_64;
5718             break;
5719         case TCG_CALL_ARG_EXTEND_U:
5720             dst_type = TCG_TYPE_REG;
5721             src_ext = MO_UL;
5722             break;
5723         case TCG_CALL_ARG_EXTEND_S:
5724             dst_type = TCG_TYPE_REG;
5725             src_ext = MO_SL;
5726             break;
5727         default:
5728             g_assert_not_reached();
5729         }
5730 
5731         mov[0].dst = loc->arg_slot;
5732         mov[0].dst_type = dst_type;
5733         mov[0].src = lo;
5734         mov[0].src_type = src_type;
5735         mov[0].src_ext = src_ext;
5736         return 1;
5737     }
5738 
5739     if (TCG_TARGET_REG_BITS == 32) {
5740         assert(dst_type == TCG_TYPE_I64);
5741         reg_mo = MO_32;
5742     } else {
5743         assert(dst_type == TCG_TYPE_I128);
5744         reg_mo = MO_64;
5745     }
5746 
5747     mov[0].dst = loc[HOST_BIG_ENDIAN].arg_slot;
5748     mov[0].src = lo;
5749     mov[0].dst_type = TCG_TYPE_REG;
5750     mov[0].src_type = TCG_TYPE_REG;
5751     mov[0].src_ext = reg_mo;
5752 
5753     mov[1].dst = loc[!HOST_BIG_ENDIAN].arg_slot;
5754     mov[1].src = hi;
5755     mov[1].dst_type = TCG_TYPE_REG;
5756     mov[1].src_type = TCG_TYPE_REG;
5757     mov[1].src_ext = reg_mo;
5758 
5759     return 2;
5760 }
5761 
5762 static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
5763                                    const TCGLdstHelperParam *parm)
5764 {
5765     const TCGHelperInfo *info;
5766     const TCGCallArgumentLoc *loc;
5767     TCGMovExtend mov[2];
5768     unsigned next_arg, nmov;
5769     MemOp mop = get_memop(ldst->oi);
5770 
5771     switch (mop & MO_SIZE) {
5772     case MO_8:
5773     case MO_16:
5774     case MO_32:
5775         info = &info_helper_ld32_mmu;
5776         break;
5777     case MO_64:
5778         info = &info_helper_ld64_mmu;
5779         break;
5780     case MO_128:
5781         info = &info_helper_ld128_mmu;
5782         break;
5783     default:
5784         g_assert_not_reached();
5785     }
5786 
5787     /* Defer env argument. */
5788     next_arg = 1;
5789 
5790     loc = &info->in[next_arg];
5791     if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
5792         /*
5793          * 32-bit host with 32-bit guest: zero-extend the guest address
5794          * to 64-bits for the helper by storing the low part, then
5795          * load a zero for the high part.
5796          */
5797         tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
5798                                TCG_TYPE_I32, TCG_TYPE_I32,
5799                                ldst->addrlo_reg, -1);
5800         tcg_out_helper_load_slots(s, 1, mov, parm);
5801 
5802         tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot,
5803                                 TCG_TYPE_I32, 0, parm);
5804         next_arg += 2;
5805     } else {
5806         nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
5807                                       ldst->addrlo_reg, ldst->addrhi_reg);
5808         tcg_out_helper_load_slots(s, nmov, mov, parm);
5809         next_arg += nmov;
5810     }
5811 
5812     switch (info->out_kind) {
5813     case TCG_CALL_RET_NORMAL:
5814     case TCG_CALL_RET_BY_VEC:
5815         break;
5816     case TCG_CALL_RET_BY_REF:
5817         /*
5818          * The return reference is in the first argument slot.
5819          * We need memory in which to return: re-use the top of stack.
5820          */
5821         {
5822             int ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
5823 
5824             if (arg_slot_reg_p(0)) {
5825                 tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[0],
5826                                  TCG_REG_CALL_STACK, ofs_slot0);
5827             } else {
5828                 tcg_debug_assert(parm->ntmp != 0);
5829                 tcg_out_addi_ptr(s, parm->tmp[0],
5830                                  TCG_REG_CALL_STACK, ofs_slot0);
5831                 tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
5832                            TCG_REG_CALL_STACK, ofs_slot0);
5833             }
5834         }
5835         break;
5836     default:
5837         g_assert_not_reached();
5838     }
5839 
5840     tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
5841 }
5842 
5843 static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst,
5844                                   bool load_sign,
5845                                   const TCGLdstHelperParam *parm)
5846 {
5847     MemOp mop = get_memop(ldst->oi);
5848     TCGMovExtend mov[2];
5849     int ofs_slot0;
5850 
5851     switch (ldst->type) {
5852     case TCG_TYPE_I64:
5853         if (TCG_TARGET_REG_BITS == 32) {
5854             break;
5855         }
5856         /* fall through */
5857 
5858     case TCG_TYPE_I32:
5859         mov[0].dst = ldst->datalo_reg;
5860         mov[0].src = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, 0);
5861         mov[0].dst_type = ldst->type;
5862         mov[0].src_type = TCG_TYPE_REG;
5863 
5864         /*
5865          * If load_sign, then we allowed the helper to perform the
5866          * appropriate sign extension to tcg_target_ulong, and all
5867          * we need now is a plain move.
5868          *
5869          * If they do not, then we expect the relevant extension
5870          * instruction to be no more expensive than a move, and
5871          * we thus save the icache etc by only using one of two
5872          * helper functions.
5873          */
5874         if (load_sign || !(mop & MO_SIGN)) {
5875             if (TCG_TARGET_REG_BITS == 32 || ldst->type == TCG_TYPE_I32) {
5876                 mov[0].src_ext = MO_32;
5877             } else {
5878                 mov[0].src_ext = MO_64;
5879             }
5880         } else {
5881             mov[0].src_ext = mop & MO_SSIZE;
5882         }
5883         tcg_out_movext1(s, mov);
5884         return;
5885 
5886     case TCG_TYPE_I128:
5887         tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
5888         ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
5889         switch (TCG_TARGET_CALL_RET_I128) {
5890         case TCG_CALL_RET_NORMAL:
5891             break;
5892         case TCG_CALL_RET_BY_VEC:
5893             tcg_out_st(s, TCG_TYPE_V128,
5894                        tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
5895                        TCG_REG_CALL_STACK, ofs_slot0);
5896             /* fall through */
5897         case TCG_CALL_RET_BY_REF:
5898             tcg_out_ld(s, TCG_TYPE_I64, ldst->datalo_reg,
5899                        TCG_REG_CALL_STACK, ofs_slot0 + 8 * HOST_BIG_ENDIAN);
5900             tcg_out_ld(s, TCG_TYPE_I64, ldst->datahi_reg,
5901                        TCG_REG_CALL_STACK, ofs_slot0 + 8 * !HOST_BIG_ENDIAN);
5902             return;
5903         default:
5904             g_assert_not_reached();
5905         }
5906         break;
5907 
5908     default:
5909         g_assert_not_reached();
5910     }
5911 
5912     mov[0].dst = ldst->datalo_reg;
5913     mov[0].src =
5914         tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN);
5915     mov[0].dst_type = TCG_TYPE_REG;
5916     mov[0].src_type = TCG_TYPE_REG;
5917     mov[0].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
5918 
5919     mov[1].dst = ldst->datahi_reg;
5920     mov[1].src =
5921         tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN);
5922     mov[1].dst_type = TCG_TYPE_REG;
5923     mov[1].src_type = TCG_TYPE_REG;
5924     mov[1].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
5925 
5926     tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1);
5927 }
5928 
5929 static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
5930                                    const TCGLdstHelperParam *parm)
5931 {
5932     const TCGHelperInfo *info;
5933     const TCGCallArgumentLoc *loc;
5934     TCGMovExtend mov[4];
5935     TCGType data_type;
5936     unsigned next_arg, nmov, n;
5937     MemOp mop = get_memop(ldst->oi);
5938 
5939     switch (mop & MO_SIZE) {
5940     case MO_8:
5941     case MO_16:
5942     case MO_32:
5943         info = &info_helper_st32_mmu;
5944         data_type = TCG_TYPE_I32;
5945         break;
5946     case MO_64:
5947         info = &info_helper_st64_mmu;
5948         data_type = TCG_TYPE_I64;
5949         break;
5950     case MO_128:
5951         info = &info_helper_st128_mmu;
5952         data_type = TCG_TYPE_I128;
5953         break;
5954     default:
5955         g_assert_not_reached();
5956     }
5957 
5958     /* Defer env argument. */
5959     next_arg = 1;
5960     nmov = 0;
5961 
5962     /* Handle addr argument. */
5963     loc = &info->in[next_arg];
5964     if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
5965         /*
5966          * 32-bit host with 32-bit guest: zero-extend the guest address
5967          * to 64-bits for the helper by storing the low part.  Later,
5968          * after we have processed the register inputs, we will load a
5969          * zero for the high part.
5970          */
5971         tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
5972                                TCG_TYPE_I32, TCG_TYPE_I32,
5973                                ldst->addrlo_reg, -1);
5974         next_arg += 2;
5975         nmov += 1;
5976     } else {
5977         n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
5978                                    ldst->addrlo_reg, ldst->addrhi_reg);
5979         next_arg += n;
5980         nmov += n;
5981     }
5982 
5983     /* Handle data argument. */
5984     loc = &info->in[next_arg];
5985     switch (loc->kind) {
5986     case TCG_CALL_ARG_NORMAL:
5987     case TCG_CALL_ARG_EXTEND_U:
5988     case TCG_CALL_ARG_EXTEND_S:
5989         n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type,
5990                                    ldst->datalo_reg, ldst->datahi_reg);
5991         next_arg += n;
5992         nmov += n;
5993         tcg_out_helper_load_slots(s, nmov, mov, parm);
5994         break;
5995 
5996     case TCG_CALL_ARG_BY_REF:
5997         tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
5998         tcg_debug_assert(data_type == TCG_TYPE_I128);
5999         tcg_out_st(s, TCG_TYPE_I64,
6000                    HOST_BIG_ENDIAN ? ldst->datahi_reg : ldst->datalo_reg,
6001                    TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[0].ref_slot));
6002         tcg_out_st(s, TCG_TYPE_I64,
6003                    HOST_BIG_ENDIAN ? ldst->datalo_reg : ldst->datahi_reg,
6004                    TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[1].ref_slot));
6005 
6006         tcg_out_helper_load_slots(s, nmov, mov, parm);
6007 
6008         if (arg_slot_reg_p(loc->arg_slot)) {
6009             tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[loc->arg_slot],
6010                              TCG_REG_CALL_STACK,
6011                              arg_slot_stk_ofs(loc->ref_slot));
6012         } else {
6013             tcg_debug_assert(parm->ntmp != 0);
6014             tcg_out_addi_ptr(s, parm->tmp[0], TCG_REG_CALL_STACK,
6015                              arg_slot_stk_ofs(loc->ref_slot));
6016             tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
6017                        TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc->arg_slot));
6018         }
6019         next_arg += 2;
6020         break;
6021 
6022     default:
6023         g_assert_not_reached();
6024     }
6025 
6026     if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
6027         /* Zero extend the address by loading a zero for the high part. */
6028         loc = &info->in[1 + !HOST_BIG_ENDIAN];
6029         tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm);
6030     }
6031 
6032     tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
6033 }
6034 
6035 int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
6036 {
6037     int i, start_words, num_insns;
6038     TCGOp *op;
6039 
6040     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
6041                  && qemu_log_in_addr_range(pc_start))) {
6042         FILE *logfile = qemu_log_trylock();
6043         if (logfile) {
6044             fprintf(logfile, "OP:\n");
6045             tcg_dump_ops(s, logfile, false);
6046             fprintf(logfile, "\n");
6047             qemu_log_unlock(logfile);
6048         }
6049     }
6050 
6051 #ifdef CONFIG_DEBUG_TCG
6052     /* Ensure all labels referenced have been emitted.  */
6053     {
6054         TCGLabel *l;
6055         bool error = false;
6056 
6057         QSIMPLEQ_FOREACH(l, &s->labels, next) {
6058             if (unlikely(!l->present) && !QSIMPLEQ_EMPTY(&l->branches)) {
6059                 qemu_log_mask(CPU_LOG_TB_OP,
6060                               "$L%d referenced but not present.\n", l->id);
6061                 error = true;
6062             }
6063         }
6064         assert(!error);
6065     }
6066 #endif
6067 
6068     tcg_optimize(s);
6069 
6070     reachable_code_pass(s);
6071     liveness_pass_0(s);
6072     liveness_pass_1(s);
6073 
6074     if (s->nb_indirects > 0) {
6075         if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
6076                      && qemu_log_in_addr_range(pc_start))) {
6077             FILE *logfile = qemu_log_trylock();
6078             if (logfile) {
6079                 fprintf(logfile, "OP before indirect lowering:\n");
6080                 tcg_dump_ops(s, logfile, false);
6081                 fprintf(logfile, "\n");
6082                 qemu_log_unlock(logfile);
6083             }
6084         }
6085 
6086         /* Replace indirect temps with direct temps.  */
6087         if (liveness_pass_2(s)) {
6088             /* If changes were made, re-run liveness.  */
6089             liveness_pass_1(s);
6090         }
6091     }
6092 
6093     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
6094                  && qemu_log_in_addr_range(pc_start))) {
6095         FILE *logfile = qemu_log_trylock();
6096         if (logfile) {
6097             fprintf(logfile, "OP after optimization and liveness analysis:\n");
6098             tcg_dump_ops(s, logfile, true);
6099             fprintf(logfile, "\n");
6100             qemu_log_unlock(logfile);
6101         }
6102     }
6103 
6104     /* Initialize goto_tb jump offsets. */
6105     tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
6106     tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
6107     tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
6108     tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
6109 
6110     tcg_reg_alloc_start(s);
6111 
6112     /*
6113      * Reset the buffer pointers when restarting after overflow.
6114      * TODO: Move this into translate-all.c with the rest of the
6115      * buffer management.  Having only this done here is confusing.
6116      */
6117     s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
6118     s->code_ptr = s->code_buf;
6119 
6120 #ifdef TCG_TARGET_NEED_LDST_LABELS
6121     QSIMPLEQ_INIT(&s->ldst_labels);
6122 #endif
6123 #ifdef TCG_TARGET_NEED_POOL_LABELS
6124     s->pool_labels = NULL;
6125 #endif
6126 
6127     start_words = s->insn_start_words;
6128     s->gen_insn_data =
6129         tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
6130 
6131     tcg_out_tb_start(s);
6132 
6133     num_insns = -1;
6134     QTAILQ_FOREACH(op, &s->ops, link) {
6135         TCGOpcode opc = op->opc;
6136 
6137         switch (opc) {
6138         case INDEX_op_mov_i32:
6139         case INDEX_op_mov_i64:
6140         case INDEX_op_mov_vec:
6141             tcg_reg_alloc_mov(s, op);
6142             break;
6143         case INDEX_op_dup_vec:
6144             tcg_reg_alloc_dup(s, op);
6145             break;
6146         case INDEX_op_insn_start:
6147             if (num_insns >= 0) {
6148                 size_t off = tcg_current_code_size(s);
6149                 s->gen_insn_end_off[num_insns] = off;
6150                 /* Assert that we do not overflow our stored offset.  */
6151                 assert(s->gen_insn_end_off[num_insns] == off);
6152             }
6153             num_insns++;
6154             for (i = 0; i < start_words; ++i) {
6155                 s->gen_insn_data[num_insns * start_words + i] =
6156                     tcg_get_insn_start_param(op, i);
6157             }
6158             break;
6159         case INDEX_op_discard:
6160             temp_dead(s, arg_temp(op->args[0]));
6161             break;
6162         case INDEX_op_set_label:
6163             tcg_reg_alloc_bb_end(s, s->reserved_regs);
6164             tcg_out_label(s, arg_label(op->args[0]));
6165             break;
6166         case INDEX_op_call:
6167             tcg_reg_alloc_call(s, op);
6168             break;
6169         case INDEX_op_exit_tb:
6170             tcg_out_exit_tb(s, op->args[0]);
6171             break;
6172         case INDEX_op_goto_tb:
6173             tcg_out_goto_tb(s, op->args[0]);
6174             break;
6175         case INDEX_op_dup2_vec:
6176             if (tcg_reg_alloc_dup2(s, op)) {
6177                 break;
6178             }
6179             /* fall through */
6180         default:
6181             /* Sanity check that we've not introduced any unhandled opcodes. */
6182             tcg_debug_assert(tcg_op_supported(opc));
6183             /* Note: in order to speed up the code, it would be much
6184                faster to have specialized register allocator functions for
6185                some common argument patterns */
6186             tcg_reg_alloc_op(s, op);
6187             break;
6188         }
6189         /* Test for (pending) buffer overflow.  The assumption is that any
6190            one operation beginning below the high water mark cannot overrun
6191            the buffer completely.  Thus we can test for overflow after
6192            generating code without having to check during generation.  */
6193         if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
6194             return -1;
6195         }
6196         /* Test for TB overflow, as seen by gen_insn_end_off.  */
6197         if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
6198             return -2;
6199         }
6200     }
6201     tcg_debug_assert(num_insns + 1 == s->gen_tb->icount);
6202     s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
6203 
6204     /* Generate TB finalization at the end of block */
6205 #ifdef TCG_TARGET_NEED_LDST_LABELS
6206     i = tcg_out_ldst_finalize(s);
6207     if (i < 0) {
6208         return i;
6209     }
6210 #endif
6211 #ifdef TCG_TARGET_NEED_POOL_LABELS
6212     i = tcg_out_pool_finalize(s);
6213     if (i < 0) {
6214         return i;
6215     }
6216 #endif
6217     if (!tcg_resolve_relocs(s)) {
6218         return -2;
6219     }
6220 
6221 #ifndef CONFIG_TCG_INTERPRETER
6222     /* flush instruction cache */
6223     flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
6224                         (uintptr_t)s->code_buf,
6225                         tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
6226 #endif
6227 
6228     return tcg_current_code_size(s);
6229 }
6230 
6231 #ifdef ELF_HOST_MACHINE
6232 /* In order to use this feature, the backend needs to do three things:
6233 
6234    (1) Define ELF_HOST_MACHINE to indicate both what value to
6235        put into the ELF image and to indicate support for the feature.
6236 
6237    (2) Define tcg_register_jit.  This should create a buffer containing
6238        the contents of a .debug_frame section that describes the post-
6239        prologue unwind info for the tcg machine.
6240 
6241    (3) Call tcg_register_jit_int, with the constructed .debug_frame.
6242 */
6243 
6244 /* Begin GDB interface.  THE FOLLOWING MUST MATCH GDB DOCS.  */
6245 typedef enum {
6246     JIT_NOACTION = 0,
6247     JIT_REGISTER_FN,
6248     JIT_UNREGISTER_FN
6249 } jit_actions_t;
6250 
6251 struct jit_code_entry {
6252     struct jit_code_entry *next_entry;
6253     struct jit_code_entry *prev_entry;
6254     const void *symfile_addr;
6255     uint64_t symfile_size;
6256 };
6257 
6258 struct jit_descriptor {
6259     uint32_t version;
6260     uint32_t action_flag;
6261     struct jit_code_entry *relevant_entry;
6262     struct jit_code_entry *first_entry;
6263 };
6264 
6265 void __jit_debug_register_code(void) __attribute__((noinline));
6266 void __jit_debug_register_code(void)
6267 {
6268     asm("");
6269 }
6270 
6271 /* Must statically initialize the version, because GDB may check
6272    the version before we can set it.  */
6273 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
6274 
6275 /* End GDB interface.  */
6276 
6277 static int find_string(const char *strtab, const char *str)
6278 {
6279     const char *p = strtab + 1;
6280 
6281     while (1) {
6282         if (strcmp(p, str) == 0) {
6283             return p - strtab;
6284         }
6285         p += strlen(p) + 1;
6286     }
6287 }
6288 
6289 static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
6290                                  const void *debug_frame,
6291                                  size_t debug_frame_size)
6292 {
6293     struct __attribute__((packed)) DebugInfo {
6294         uint32_t  len;
6295         uint16_t  version;
6296         uint32_t  abbrev;
6297         uint8_t   ptr_size;
6298         uint8_t   cu_die;
6299         uint16_t  cu_lang;
6300         uintptr_t cu_low_pc;
6301         uintptr_t cu_high_pc;
6302         uint8_t   fn_die;
6303         char      fn_name[16];
6304         uintptr_t fn_low_pc;
6305         uintptr_t fn_high_pc;
6306         uint8_t   cu_eoc;
6307     };
6308 
6309     struct ElfImage {
6310         ElfW(Ehdr) ehdr;
6311         ElfW(Phdr) phdr;
6312         ElfW(Shdr) shdr[7];
6313         ElfW(Sym)  sym[2];
6314         struct DebugInfo di;
6315         uint8_t    da[24];
6316         char       str[80];
6317     };
6318 
6319     struct ElfImage *img;
6320 
6321     static const struct ElfImage img_template = {
6322         .ehdr = {
6323             .e_ident[EI_MAG0] = ELFMAG0,
6324             .e_ident[EI_MAG1] = ELFMAG1,
6325             .e_ident[EI_MAG2] = ELFMAG2,
6326             .e_ident[EI_MAG3] = ELFMAG3,
6327             .e_ident[EI_CLASS] = ELF_CLASS,
6328             .e_ident[EI_DATA] = ELF_DATA,
6329             .e_ident[EI_VERSION] = EV_CURRENT,
6330             .e_type = ET_EXEC,
6331             .e_machine = ELF_HOST_MACHINE,
6332             .e_version = EV_CURRENT,
6333             .e_phoff = offsetof(struct ElfImage, phdr),
6334             .e_shoff = offsetof(struct ElfImage, shdr),
6335             .e_ehsize = sizeof(ElfW(Shdr)),
6336             .e_phentsize = sizeof(ElfW(Phdr)),
6337             .e_phnum = 1,
6338             .e_shentsize = sizeof(ElfW(Shdr)),
6339             .e_shnum = ARRAY_SIZE(img->shdr),
6340             .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
6341 #ifdef ELF_HOST_FLAGS
6342             .e_flags = ELF_HOST_FLAGS,
6343 #endif
6344 #ifdef ELF_OSABI
6345             .e_ident[EI_OSABI] = ELF_OSABI,
6346 #endif
6347         },
6348         .phdr = {
6349             .p_type = PT_LOAD,
6350             .p_flags = PF_X,
6351         },
6352         .shdr = {
6353             [0] = { .sh_type = SHT_NULL },
6354             /* Trick: The contents of code_gen_buffer are not present in
6355                this fake ELF file; that got allocated elsewhere.  Therefore
6356                we mark .text as SHT_NOBITS (similar to .bss) so that readers
6357                will not look for contents.  We can record any address.  */
6358             [1] = { /* .text */
6359                 .sh_type = SHT_NOBITS,
6360                 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
6361             },
6362             [2] = { /* .debug_info */
6363                 .sh_type = SHT_PROGBITS,
6364                 .sh_offset = offsetof(struct ElfImage, di),
6365                 .sh_size = sizeof(struct DebugInfo),
6366             },
6367             [3] = { /* .debug_abbrev */
6368                 .sh_type = SHT_PROGBITS,
6369                 .sh_offset = offsetof(struct ElfImage, da),
6370                 .sh_size = sizeof(img->da),
6371             },
6372             [4] = { /* .debug_frame */
6373                 .sh_type = SHT_PROGBITS,
6374                 .sh_offset = sizeof(struct ElfImage),
6375             },
6376             [5] = { /* .symtab */
6377                 .sh_type = SHT_SYMTAB,
6378                 .sh_offset = offsetof(struct ElfImage, sym),
6379                 .sh_size = sizeof(img->sym),
6380                 .sh_info = 1,
6381                 .sh_link = ARRAY_SIZE(img->shdr) - 1,
6382                 .sh_entsize = sizeof(ElfW(Sym)),
6383             },
6384             [6] = { /* .strtab */
6385                 .sh_type = SHT_STRTAB,
6386                 .sh_offset = offsetof(struct ElfImage, str),
6387                 .sh_size = sizeof(img->str),
6388             }
6389         },
6390         .sym = {
6391             [1] = { /* code_gen_buffer */
6392                 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
6393                 .st_shndx = 1,
6394             }
6395         },
6396         .di = {
6397             .len = sizeof(struct DebugInfo) - 4,
6398             .version = 2,
6399             .ptr_size = sizeof(void *),
6400             .cu_die = 1,
6401             .cu_lang = 0x8001,  /* DW_LANG_Mips_Assembler */
6402             .fn_die = 2,
6403             .fn_name = "code_gen_buffer"
6404         },
6405         .da = {
6406             1,          /* abbrev number (the cu) */
6407             0x11, 1,    /* DW_TAG_compile_unit, has children */
6408             0x13, 0x5,  /* DW_AT_language, DW_FORM_data2 */
6409             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
6410             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
6411             0, 0,       /* end of abbrev */
6412             2,          /* abbrev number (the fn) */
6413             0x2e, 0,    /* DW_TAG_subprogram, no children */
6414             0x3, 0x8,   /* DW_AT_name, DW_FORM_string */
6415             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
6416             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
6417             0, 0,       /* end of abbrev */
6418             0           /* no more abbrev */
6419         },
6420         .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
6421                ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
6422     };
6423 
6424     /* We only need a single jit entry; statically allocate it.  */
6425     static struct jit_code_entry one_entry;
6426 
6427     uintptr_t buf = (uintptr_t)buf_ptr;
6428     size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
6429     DebugFrameHeader *dfh;
6430 
6431     img = g_malloc(img_size);
6432     *img = img_template;
6433 
6434     img->phdr.p_vaddr = buf;
6435     img->phdr.p_paddr = buf;
6436     img->phdr.p_memsz = buf_size;
6437 
6438     img->shdr[1].sh_name = find_string(img->str, ".text");
6439     img->shdr[1].sh_addr = buf;
6440     img->shdr[1].sh_size = buf_size;
6441 
6442     img->shdr[2].sh_name = find_string(img->str, ".debug_info");
6443     img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
6444 
6445     img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
6446     img->shdr[4].sh_size = debug_frame_size;
6447 
6448     img->shdr[5].sh_name = find_string(img->str, ".symtab");
6449     img->shdr[6].sh_name = find_string(img->str, ".strtab");
6450 
6451     img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
6452     img->sym[1].st_value = buf;
6453     img->sym[1].st_size = buf_size;
6454 
6455     img->di.cu_low_pc = buf;
6456     img->di.cu_high_pc = buf + buf_size;
6457     img->di.fn_low_pc = buf;
6458     img->di.fn_high_pc = buf + buf_size;
6459 
6460     dfh = (DebugFrameHeader *)(img + 1);
6461     memcpy(dfh, debug_frame, debug_frame_size);
6462     dfh->fde.func_start = buf;
6463     dfh->fde.func_len = buf_size;
6464 
6465 #ifdef DEBUG_JIT
6466     /* Enable this block to be able to debug the ELF image file creation.
6467        One can use readelf, objdump, or other inspection utilities.  */
6468     {
6469         g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
6470         FILE *f = fopen(jit, "w+b");
6471         if (f) {
6472             if (fwrite(img, img_size, 1, f) != img_size) {
6473                 /* Avoid stupid unused return value warning for fwrite.  */
6474             }
6475             fclose(f);
6476         }
6477     }
6478 #endif
6479 
6480     one_entry.symfile_addr = img;
6481     one_entry.symfile_size = img_size;
6482 
6483     __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
6484     __jit_debug_descriptor.relevant_entry = &one_entry;
6485     __jit_debug_descriptor.first_entry = &one_entry;
6486     __jit_debug_register_code();
6487 }
6488 #else
6489 /* No support for the feature.  Provide the entry point expected by exec.c,
6490    and implement the internal function we declared earlier.  */
6491 
6492 static void tcg_register_jit_int(const void *buf, size_t size,
6493                                  const void *debug_frame,
6494                                  size_t debug_frame_size)
6495 {
6496 }
6497 
6498 void tcg_register_jit(const void *buf, size_t buf_size)
6499 {
6500 }
6501 #endif /* ELF_HOST_MACHINE */
6502 
6503 #if !TCG_TARGET_MAYBE_vec
6504 void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
6505 {
6506     g_assert_not_reached();
6507 }
6508 #endif
6509