xref: /openbmc/qemu/tcg/tcg.c (revision 278f064e)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
27 
28 #include "qemu/osdep.h"
29 
30 /* Define to jump the ELF file used to communicate with GDB.  */
31 #undef DEBUG_JIT
32 
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/timer.h"
38 #include "qemu/cacheflush.h"
39 
40 /* Note: the long term plan is to reduce the dependencies on the QEMU
41    CPU definitions. Currently they are used for qemu_ld/st
42    instructions */
43 #define NO_CPU_IO_DEFS
44 
45 #include "exec/exec-all.h"
46 #include "tcg/tcg-op.h"
47 
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS  ELFCLASS32
50 #else
51 # define ELF_CLASS  ELFCLASS64
52 #endif
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA   ELFDATA2MSB
55 #else
56 # define ELF_DATA   ELFDATA2LSB
57 #endif
58 
59 #include "elf.h"
60 #include "exec/log.h"
61 #include "tcg-internal.h"
62 
63 /* Forward declarations for functions declared in tcg-target.c.inc and
64    used here. */
65 static void tcg_target_init(TCGContext *s);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
68                         intptr_t value, intptr_t addend);
69 
70 /* The CIE and FDE header definitions will be common to all hosts.  */
71 typedef struct {
72     uint32_t len __attribute__((aligned((sizeof(void *)))));
73     uint32_t id;
74     uint8_t version;
75     char augmentation[1];
76     uint8_t code_align;
77     uint8_t data_align;
78     uint8_t return_column;
79 } DebugFrameCIE;
80 
81 typedef struct QEMU_PACKED {
82     uint32_t len __attribute__((aligned((sizeof(void *)))));
83     uint32_t cie_offset;
84     uintptr_t func_start;
85     uintptr_t func_len;
86 } DebugFrameFDEHeader;
87 
88 typedef struct QEMU_PACKED {
89     DebugFrameCIE cie;
90     DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
92 
93 static void tcg_register_jit_int(const void *buf, size_t size,
94                                  const void *debug_frame,
95                                  size_t debug_frame_size)
96     __attribute__((unused));
97 
98 /* Forward declarations for functions declared and used in tcg-target.c.inc. */
99 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
100                        intptr_t arg2);
101 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
102 static void tcg_out_movi(TCGContext *s, TCGType type,
103                          TCGReg ret, tcg_target_long arg);
104 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
105                        const TCGArg args[TCG_MAX_OP_ARGS],
106                        const int const_args[TCG_MAX_OP_ARGS]);
107 #if TCG_TARGET_MAYBE_vec
108 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
109                             TCGReg dst, TCGReg src);
110 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
111                              TCGReg dst, TCGReg base, intptr_t offset);
112 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
113                              TCGReg dst, int64_t arg);
114 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
115                            unsigned vecl, unsigned vece,
116                            const TCGArg args[TCG_MAX_OP_ARGS],
117                            const int const_args[TCG_MAX_OP_ARGS]);
118 #else
119 static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
120                                    TCGReg dst, TCGReg src)
121 {
122     g_assert_not_reached();
123 }
124 static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
125                                     TCGReg dst, TCGReg base, intptr_t offset)
126 {
127     g_assert_not_reached();
128 }
129 static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
130                                     TCGReg dst, int64_t arg)
131 {
132     g_assert_not_reached();
133 }
134 static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
135                                   unsigned vecl, unsigned vece,
136                                   const TCGArg args[TCG_MAX_OP_ARGS],
137                                   const int const_args[TCG_MAX_OP_ARGS])
138 {
139     g_assert_not_reached();
140 }
141 #endif
142 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
143                        intptr_t arg2);
144 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
145                         TCGReg base, intptr_t ofs);
146 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target);
147 static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
148 #ifdef TCG_TARGET_NEED_LDST_LABELS
149 static int tcg_out_ldst_finalize(TCGContext *s);
150 #endif
151 
152 TCGContext tcg_init_ctx;
153 __thread TCGContext *tcg_ctx;
154 
155 TCGContext **tcg_ctxs;
156 unsigned int tcg_cur_ctxs;
157 unsigned int tcg_max_ctxs;
158 TCGv_env cpu_env = 0;
159 const void *tcg_code_gen_epilogue;
160 uintptr_t tcg_splitwx_diff;
161 
162 #ifndef CONFIG_TCG_INTERPRETER
163 tcg_prologue_fn *tcg_qemu_tb_exec;
164 #endif
165 
166 static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
167 static TCGRegSet tcg_target_call_clobber_regs;
168 
169 #if TCG_TARGET_INSN_UNIT_SIZE == 1
170 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
171 {
172     *s->code_ptr++ = v;
173 }
174 
175 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
176                                                       uint8_t v)
177 {
178     *p = v;
179 }
180 #endif
181 
182 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
183 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
184 {
185     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
186         *s->code_ptr++ = v;
187     } else {
188         tcg_insn_unit *p = s->code_ptr;
189         memcpy(p, &v, sizeof(v));
190         s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
191     }
192 }
193 
194 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
195                                                        uint16_t v)
196 {
197     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
198         *p = v;
199     } else {
200         memcpy(p, &v, sizeof(v));
201     }
202 }
203 #endif
204 
205 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
206 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
207 {
208     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
209         *s->code_ptr++ = v;
210     } else {
211         tcg_insn_unit *p = s->code_ptr;
212         memcpy(p, &v, sizeof(v));
213         s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
214     }
215 }
216 
217 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
218                                                        uint32_t v)
219 {
220     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
221         *p = v;
222     } else {
223         memcpy(p, &v, sizeof(v));
224     }
225 }
226 #endif
227 
228 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
229 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
230 {
231     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
232         *s->code_ptr++ = v;
233     } else {
234         tcg_insn_unit *p = s->code_ptr;
235         memcpy(p, &v, sizeof(v));
236         s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
237     }
238 }
239 
240 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
241                                                        uint64_t v)
242 {
243     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
244         *p = v;
245     } else {
246         memcpy(p, &v, sizeof(v));
247     }
248 }
249 #endif
250 
251 /* label relocation processing */
252 
253 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
254                           TCGLabel *l, intptr_t addend)
255 {
256     TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
257 
258     r->type = type;
259     r->ptr = code_ptr;
260     r->addend = addend;
261     QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
262 }
263 
264 static void tcg_out_label(TCGContext *s, TCGLabel *l)
265 {
266     tcg_debug_assert(!l->has_value);
267     l->has_value = 1;
268     l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
269 }
270 
271 TCGLabel *gen_new_label(void)
272 {
273     TCGContext *s = tcg_ctx;
274     TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
275 
276     memset(l, 0, sizeof(TCGLabel));
277     l->id = s->nb_labels++;
278     QSIMPLEQ_INIT(&l->relocs);
279 
280     QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
281 
282     return l;
283 }
284 
285 static bool tcg_resolve_relocs(TCGContext *s)
286 {
287     TCGLabel *l;
288 
289     QSIMPLEQ_FOREACH(l, &s->labels, next) {
290         TCGRelocation *r;
291         uintptr_t value = l->u.value;
292 
293         QSIMPLEQ_FOREACH(r, &l->relocs, next) {
294             if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
295                 return false;
296             }
297         }
298     }
299     return true;
300 }
301 
302 static void set_jmp_reset_offset(TCGContext *s, int which)
303 {
304     /*
305      * We will check for overflow at the end of the opcode loop in
306      * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
307      */
308     s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
309 }
310 
311 /* Signal overflow, starting over with fewer guest insns. */
312 static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s)
313 {
314     siglongjmp(s->jmp_trans, -2);
315 }
316 
317 #define C_PFX1(P, A)                    P##A
318 #define C_PFX2(P, A, B)                 P##A##_##B
319 #define C_PFX3(P, A, B, C)              P##A##_##B##_##C
320 #define C_PFX4(P, A, B, C, D)           P##A##_##B##_##C##_##D
321 #define C_PFX5(P, A, B, C, D, E)        P##A##_##B##_##C##_##D##_##E
322 #define C_PFX6(P, A, B, C, D, E, F)     P##A##_##B##_##C##_##D##_##E##_##F
323 
324 /* Define an enumeration for the various combinations. */
325 
326 #define C_O0_I1(I1)                     C_PFX1(c_o0_i1_, I1),
327 #define C_O0_I2(I1, I2)                 C_PFX2(c_o0_i2_, I1, I2),
328 #define C_O0_I3(I1, I2, I3)             C_PFX3(c_o0_i3_, I1, I2, I3),
329 #define C_O0_I4(I1, I2, I3, I4)         C_PFX4(c_o0_i4_, I1, I2, I3, I4),
330 
331 #define C_O1_I1(O1, I1)                 C_PFX2(c_o1_i1_, O1, I1),
332 #define C_O1_I2(O1, I1, I2)             C_PFX3(c_o1_i2_, O1, I1, I2),
333 #define C_O1_I3(O1, I1, I2, I3)         C_PFX4(c_o1_i3_, O1, I1, I2, I3),
334 #define C_O1_I4(O1, I1, I2, I3, I4)     C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
335 
336 #define C_N1_I2(O1, I1, I2)             C_PFX3(c_n1_i2_, O1, I1, I2),
337 
338 #define C_O2_I1(O1, O2, I1)             C_PFX3(c_o2_i1_, O1, O2, I1),
339 #define C_O2_I2(O1, O2, I1, I2)         C_PFX4(c_o2_i2_, O1, O2, I1, I2),
340 #define C_O2_I3(O1, O2, I1, I2, I3)     C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
341 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
342 
343 typedef enum {
344 #include "tcg-target-con-set.h"
345 } TCGConstraintSetIndex;
346 
347 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
348 
349 #undef C_O0_I1
350 #undef C_O0_I2
351 #undef C_O0_I3
352 #undef C_O0_I4
353 #undef C_O1_I1
354 #undef C_O1_I2
355 #undef C_O1_I3
356 #undef C_O1_I4
357 #undef C_N1_I2
358 #undef C_O2_I1
359 #undef C_O2_I2
360 #undef C_O2_I3
361 #undef C_O2_I4
362 
363 /* Put all of the constraint sets into an array, indexed by the enum. */
364 
365 #define C_O0_I1(I1)                     { .args_ct_str = { #I1 } },
366 #define C_O0_I2(I1, I2)                 { .args_ct_str = { #I1, #I2 } },
367 #define C_O0_I3(I1, I2, I3)             { .args_ct_str = { #I1, #I2, #I3 } },
368 #define C_O0_I4(I1, I2, I3, I4)         { .args_ct_str = { #I1, #I2, #I3, #I4 } },
369 
370 #define C_O1_I1(O1, I1)                 { .args_ct_str = { #O1, #I1 } },
371 #define C_O1_I2(O1, I1, I2)             { .args_ct_str = { #O1, #I1, #I2 } },
372 #define C_O1_I3(O1, I1, I2, I3)         { .args_ct_str = { #O1, #I1, #I2, #I3 } },
373 #define C_O1_I4(O1, I1, I2, I3, I4)     { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
374 
375 #define C_N1_I2(O1, I1, I2)             { .args_ct_str = { "&" #O1, #I1, #I2 } },
376 
377 #define C_O2_I1(O1, O2, I1)             { .args_ct_str = { #O1, #O2, #I1 } },
378 #define C_O2_I2(O1, O2, I1, I2)         { .args_ct_str = { #O1, #O2, #I1, #I2 } },
379 #define C_O2_I3(O1, O2, I1, I2, I3)     { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
380 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
381 
382 static const TCGTargetOpDef constraint_sets[] = {
383 #include "tcg-target-con-set.h"
384 };
385 
386 
387 #undef C_O0_I1
388 #undef C_O0_I2
389 #undef C_O0_I3
390 #undef C_O0_I4
391 #undef C_O1_I1
392 #undef C_O1_I2
393 #undef C_O1_I3
394 #undef C_O1_I4
395 #undef C_N1_I2
396 #undef C_O2_I1
397 #undef C_O2_I2
398 #undef C_O2_I3
399 #undef C_O2_I4
400 
401 /* Expand the enumerator to be returned from tcg_target_op_def(). */
402 
403 #define C_O0_I1(I1)                     C_PFX1(c_o0_i1_, I1)
404 #define C_O0_I2(I1, I2)                 C_PFX2(c_o0_i2_, I1, I2)
405 #define C_O0_I3(I1, I2, I3)             C_PFX3(c_o0_i3_, I1, I2, I3)
406 #define C_O0_I4(I1, I2, I3, I4)         C_PFX4(c_o0_i4_, I1, I2, I3, I4)
407 
408 #define C_O1_I1(O1, I1)                 C_PFX2(c_o1_i1_, O1, I1)
409 #define C_O1_I2(O1, I1, I2)             C_PFX3(c_o1_i2_, O1, I1, I2)
410 #define C_O1_I3(O1, I1, I2, I3)         C_PFX4(c_o1_i3_, O1, I1, I2, I3)
411 #define C_O1_I4(O1, I1, I2, I3, I4)     C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
412 
413 #define C_N1_I2(O1, I1, I2)             C_PFX3(c_n1_i2_, O1, I1, I2)
414 
415 #define C_O2_I1(O1, O2, I1)             C_PFX3(c_o2_i1_, O1, O2, I1)
416 #define C_O2_I2(O1, O2, I1, I2)         C_PFX4(c_o2_i2_, O1, O2, I1, I2)
417 #define C_O2_I3(O1, O2, I1, I2, I3)     C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
418 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
419 
420 #include "tcg-target.c.inc"
421 
422 static void alloc_tcg_plugin_context(TCGContext *s)
423 {
424 #ifdef CONFIG_PLUGIN
425     s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
426     s->plugin_tb->insns =
427         g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
428 #endif
429 }
430 
431 /*
432  * All TCG threads except the parent (i.e. the one that called tcg_context_init
433  * and registered the target's TCG globals) must register with this function
434  * before initiating translation.
435  *
436  * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
437  * of tcg_region_init() for the reasoning behind this.
438  *
439  * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
440  * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
441  * is not used anymore for translation once this function is called.
442  *
443  * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
444  * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
445  */
446 #ifdef CONFIG_USER_ONLY
447 void tcg_register_thread(void)
448 {
449     tcg_ctx = &tcg_init_ctx;
450 }
451 #else
452 void tcg_register_thread(void)
453 {
454     TCGContext *s = g_malloc(sizeof(*s));
455     unsigned int i, n;
456 
457     *s = tcg_init_ctx;
458 
459     /* Relink mem_base.  */
460     for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
461         if (tcg_init_ctx.temps[i].mem_base) {
462             ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
463             tcg_debug_assert(b >= 0 && b < n);
464             s->temps[i].mem_base = &s->temps[b];
465         }
466     }
467 
468     /* Claim an entry in tcg_ctxs */
469     n = qatomic_fetch_inc(&tcg_cur_ctxs);
470     g_assert(n < tcg_max_ctxs);
471     qatomic_set(&tcg_ctxs[n], s);
472 
473     if (n > 0) {
474         alloc_tcg_plugin_context(s);
475         tcg_region_initial_alloc(s);
476     }
477 
478     tcg_ctx = s;
479 }
480 #endif /* !CONFIG_USER_ONLY */
481 
482 /* pool based memory allocation */
483 void *tcg_malloc_internal(TCGContext *s, int size)
484 {
485     TCGPool *p;
486     int pool_size;
487 
488     if (size > TCG_POOL_CHUNK_SIZE) {
489         /* big malloc: insert a new pool (XXX: could optimize) */
490         p = g_malloc(sizeof(TCGPool) + size);
491         p->size = size;
492         p->next = s->pool_first_large;
493         s->pool_first_large = p;
494         return p->data;
495     } else {
496         p = s->pool_current;
497         if (!p) {
498             p = s->pool_first;
499             if (!p)
500                 goto new_pool;
501         } else {
502             if (!p->next) {
503             new_pool:
504                 pool_size = TCG_POOL_CHUNK_SIZE;
505                 p = g_malloc(sizeof(TCGPool) + pool_size);
506                 p->size = pool_size;
507                 p->next = NULL;
508                 if (s->pool_current)
509                     s->pool_current->next = p;
510                 else
511                     s->pool_first = p;
512             } else {
513                 p = p->next;
514             }
515         }
516     }
517     s->pool_current = p;
518     s->pool_cur = p->data + size;
519     s->pool_end = p->data + p->size;
520     return p->data;
521 }
522 
523 void tcg_pool_reset(TCGContext *s)
524 {
525     TCGPool *p, *t;
526     for (p = s->pool_first_large; p; p = t) {
527         t = p->next;
528         g_free(p);
529     }
530     s->pool_first_large = NULL;
531     s->pool_cur = s->pool_end = NULL;
532     s->pool_current = NULL;
533 }
534 
535 typedef struct TCGHelperInfo {
536     void *func;
537     const char *name;
538     unsigned flags;
539     unsigned sizemask;
540 } TCGHelperInfo;
541 
542 #include "exec/helper-proto.h"
543 
544 static const TCGHelperInfo all_helpers[] = {
545 #include "exec/helper-tcg.h"
546 };
547 static GHashTable *helper_table;
548 
549 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
550 static void process_op_defs(TCGContext *s);
551 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
552                                             TCGReg reg, const char *name);
553 
554 static void tcg_context_init(unsigned max_cpus)
555 {
556     TCGContext *s = &tcg_init_ctx;
557     int op, total_args, n, i;
558     TCGOpDef *def;
559     TCGArgConstraint *args_ct;
560     TCGTemp *ts;
561 
562     memset(s, 0, sizeof(*s));
563     s->nb_globals = 0;
564 
565     /* Count total number of arguments and allocate the corresponding
566        space */
567     total_args = 0;
568     for(op = 0; op < NB_OPS; op++) {
569         def = &tcg_op_defs[op];
570         n = def->nb_iargs + def->nb_oargs;
571         total_args += n;
572     }
573 
574     args_ct = g_new0(TCGArgConstraint, total_args);
575 
576     for(op = 0; op < NB_OPS; op++) {
577         def = &tcg_op_defs[op];
578         def->args_ct = args_ct;
579         n = def->nb_iargs + def->nb_oargs;
580         args_ct += n;
581     }
582 
583     /* Register helpers.  */
584     /* Use g_direct_hash/equal for direct pointer comparisons on func.  */
585     helper_table = g_hash_table_new(NULL, NULL);
586 
587     for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
588         g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
589                             (gpointer)&all_helpers[i]);
590     }
591 
592     tcg_target_init(s);
593     process_op_defs(s);
594 
595     /* Reverse the order of the saved registers, assuming they're all at
596        the start of tcg_target_reg_alloc_order.  */
597     for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
598         int r = tcg_target_reg_alloc_order[n];
599         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
600             break;
601         }
602     }
603     for (i = 0; i < n; ++i) {
604         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
605     }
606     for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
607         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
608     }
609 
610     alloc_tcg_plugin_context(s);
611 
612     tcg_ctx = s;
613     /*
614      * In user-mode we simply share the init context among threads, since we
615      * use a single region. See the documentation tcg_region_init() for the
616      * reasoning behind this.
617      * In softmmu we will have at most max_cpus TCG threads.
618      */
619 #ifdef CONFIG_USER_ONLY
620     tcg_ctxs = &tcg_ctx;
621     tcg_cur_ctxs = 1;
622     tcg_max_ctxs = 1;
623 #else
624     tcg_max_ctxs = max_cpus;
625     tcg_ctxs = g_new0(TCGContext *, max_cpus);
626 #endif
627 
628     tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
629     ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
630     cpu_env = temp_tcgv_ptr(ts);
631 }
632 
633 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
634 {
635     tcg_context_init(max_cpus);
636     tcg_region_init(tb_size, splitwx, max_cpus);
637 }
638 
639 /*
640  * Allocate TBs right before their corresponding translated code, making
641  * sure that TBs and code are on different cache lines.
642  */
643 TranslationBlock *tcg_tb_alloc(TCGContext *s)
644 {
645     uintptr_t align = qemu_icache_linesize;
646     TranslationBlock *tb;
647     void *next;
648 
649  retry:
650     tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
651     next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
652 
653     if (unlikely(next > s->code_gen_highwater)) {
654         if (tcg_region_alloc(s)) {
655             return NULL;
656         }
657         goto retry;
658     }
659     qatomic_set(&s->code_gen_ptr, next);
660     s->data_gen_ptr = NULL;
661     return tb;
662 }
663 
664 void tcg_prologue_init(TCGContext *s)
665 {
666     size_t prologue_size;
667 
668     s->code_ptr = s->code_gen_ptr;
669     s->code_buf = s->code_gen_ptr;
670     s->data_gen_ptr = NULL;
671 
672 #ifndef CONFIG_TCG_INTERPRETER
673     tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
674 #endif
675 
676 #ifdef TCG_TARGET_NEED_POOL_LABELS
677     s->pool_labels = NULL;
678 #endif
679 
680     qemu_thread_jit_write();
681     /* Generate the prologue.  */
682     tcg_target_qemu_prologue(s);
683 
684 #ifdef TCG_TARGET_NEED_POOL_LABELS
685     /* Allow the prologue to put e.g. guest_base into a pool entry.  */
686     {
687         int result = tcg_out_pool_finalize(s);
688         tcg_debug_assert(result == 0);
689     }
690 #endif
691 
692     prologue_size = tcg_current_code_size(s);
693 
694 #ifndef CONFIG_TCG_INTERPRETER
695     flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
696                         (uintptr_t)s->code_buf, prologue_size);
697 #endif
698 
699     tcg_region_prologue_set(s);
700 
701 #ifdef DEBUG_DISAS
702     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
703         FILE *logfile = qemu_log_lock();
704         qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
705         if (s->data_gen_ptr) {
706             size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
707             size_t data_size = prologue_size - code_size;
708             size_t i;
709 
710             log_disas(s->code_gen_ptr, code_size);
711 
712             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
713                 if (sizeof(tcg_target_ulong) == 8) {
714                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
715                              (uintptr_t)s->data_gen_ptr + i,
716                              *(uint64_t *)(s->data_gen_ptr + i));
717                 } else {
718                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
719                              (uintptr_t)s->data_gen_ptr + i,
720                              *(uint32_t *)(s->data_gen_ptr + i));
721                 }
722             }
723         } else {
724             log_disas(s->code_gen_ptr, prologue_size);
725         }
726         qemu_log("\n");
727         qemu_log_flush();
728         qemu_log_unlock(logfile);
729     }
730 #endif
731 
732     /* Assert that goto_ptr is implemented completely.  */
733     if (TCG_TARGET_HAS_goto_ptr) {
734         tcg_debug_assert(tcg_code_gen_epilogue != NULL);
735     }
736 }
737 
738 void tcg_func_start(TCGContext *s)
739 {
740     tcg_pool_reset(s);
741     s->nb_temps = s->nb_globals;
742 
743     /* No temps have been previously allocated for size or locality.  */
744     memset(s->free_temps, 0, sizeof(s->free_temps));
745 
746     /* No constant temps have been previously allocated. */
747     for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
748         if (s->const_table[i]) {
749             g_hash_table_remove_all(s->const_table[i]);
750         }
751     }
752 
753     s->nb_ops = 0;
754     s->nb_labels = 0;
755     s->current_frame_offset = s->frame_start;
756 
757 #ifdef CONFIG_DEBUG_TCG
758     s->goto_tb_issue_mask = 0;
759 #endif
760 
761     QTAILQ_INIT(&s->ops);
762     QTAILQ_INIT(&s->free_ops);
763     QSIMPLEQ_INIT(&s->labels);
764 }
765 
766 static TCGTemp *tcg_temp_alloc(TCGContext *s)
767 {
768     int n = s->nb_temps++;
769 
770     if (n >= TCG_MAX_TEMPS) {
771         tcg_raise_tb_overflow(s);
772     }
773     return memset(&s->temps[n], 0, sizeof(TCGTemp));
774 }
775 
776 static TCGTemp *tcg_global_alloc(TCGContext *s)
777 {
778     TCGTemp *ts;
779 
780     tcg_debug_assert(s->nb_globals == s->nb_temps);
781     tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
782     s->nb_globals++;
783     ts = tcg_temp_alloc(s);
784     ts->kind = TEMP_GLOBAL;
785 
786     return ts;
787 }
788 
789 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
790                                             TCGReg reg, const char *name)
791 {
792     TCGTemp *ts;
793 
794     if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
795         tcg_abort();
796     }
797 
798     ts = tcg_global_alloc(s);
799     ts->base_type = type;
800     ts->type = type;
801     ts->kind = TEMP_FIXED;
802     ts->reg = reg;
803     ts->name = name;
804     tcg_regset_set_reg(s->reserved_regs, reg);
805 
806     return ts;
807 }
808 
809 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
810 {
811     s->frame_start = start;
812     s->frame_end = start + size;
813     s->frame_temp
814         = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
815 }
816 
817 TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
818                                      intptr_t offset, const char *name)
819 {
820     TCGContext *s = tcg_ctx;
821     TCGTemp *base_ts = tcgv_ptr_temp(base);
822     TCGTemp *ts = tcg_global_alloc(s);
823     int indirect_reg = 0, bigendian = 0;
824 #ifdef HOST_WORDS_BIGENDIAN
825     bigendian = 1;
826 #endif
827 
828     switch (base_ts->kind) {
829     case TEMP_FIXED:
830         break;
831     case TEMP_GLOBAL:
832         /* We do not support double-indirect registers.  */
833         tcg_debug_assert(!base_ts->indirect_reg);
834         base_ts->indirect_base = 1;
835         s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
836                             ? 2 : 1);
837         indirect_reg = 1;
838         break;
839     default:
840         g_assert_not_reached();
841     }
842 
843     if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
844         TCGTemp *ts2 = tcg_global_alloc(s);
845         char buf[64];
846 
847         ts->base_type = TCG_TYPE_I64;
848         ts->type = TCG_TYPE_I32;
849         ts->indirect_reg = indirect_reg;
850         ts->mem_allocated = 1;
851         ts->mem_base = base_ts;
852         ts->mem_offset = offset + bigendian * 4;
853         pstrcpy(buf, sizeof(buf), name);
854         pstrcat(buf, sizeof(buf), "_0");
855         ts->name = strdup(buf);
856 
857         tcg_debug_assert(ts2 == ts + 1);
858         ts2->base_type = TCG_TYPE_I64;
859         ts2->type = TCG_TYPE_I32;
860         ts2->indirect_reg = indirect_reg;
861         ts2->mem_allocated = 1;
862         ts2->mem_base = base_ts;
863         ts2->mem_offset = offset + (1 - bigendian) * 4;
864         pstrcpy(buf, sizeof(buf), name);
865         pstrcat(buf, sizeof(buf), "_1");
866         ts2->name = strdup(buf);
867     } else {
868         ts->base_type = type;
869         ts->type = type;
870         ts->indirect_reg = indirect_reg;
871         ts->mem_allocated = 1;
872         ts->mem_base = base_ts;
873         ts->mem_offset = offset;
874         ts->name = name;
875     }
876     return ts;
877 }
878 
879 TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
880 {
881     TCGContext *s = tcg_ctx;
882     TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL;
883     TCGTemp *ts;
884     int idx, k;
885 
886     k = type + (temp_local ? TCG_TYPE_COUNT : 0);
887     idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
888     if (idx < TCG_MAX_TEMPS) {
889         /* There is already an available temp with the right type.  */
890         clear_bit(idx, s->free_temps[k].l);
891 
892         ts = &s->temps[idx];
893         ts->temp_allocated = 1;
894         tcg_debug_assert(ts->base_type == type);
895         tcg_debug_assert(ts->kind == kind);
896     } else {
897         ts = tcg_temp_alloc(s);
898         if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
899             TCGTemp *ts2 = tcg_temp_alloc(s);
900 
901             ts->base_type = type;
902             ts->type = TCG_TYPE_I32;
903             ts->temp_allocated = 1;
904             ts->kind = kind;
905 
906             tcg_debug_assert(ts2 == ts + 1);
907             ts2->base_type = TCG_TYPE_I64;
908             ts2->type = TCG_TYPE_I32;
909             ts2->temp_allocated = 1;
910             ts2->kind = kind;
911         } else {
912             ts->base_type = type;
913             ts->type = type;
914             ts->temp_allocated = 1;
915             ts->kind = kind;
916         }
917     }
918 
919 #if defined(CONFIG_DEBUG_TCG)
920     s->temps_in_use++;
921 #endif
922     return ts;
923 }
924 
925 TCGv_vec tcg_temp_new_vec(TCGType type)
926 {
927     TCGTemp *t;
928 
929 #ifdef CONFIG_DEBUG_TCG
930     switch (type) {
931     case TCG_TYPE_V64:
932         assert(TCG_TARGET_HAS_v64);
933         break;
934     case TCG_TYPE_V128:
935         assert(TCG_TARGET_HAS_v128);
936         break;
937     case TCG_TYPE_V256:
938         assert(TCG_TARGET_HAS_v256);
939         break;
940     default:
941         g_assert_not_reached();
942     }
943 #endif
944 
945     t = tcg_temp_new_internal(type, 0);
946     return temp_tcgv_vec(t);
947 }
948 
949 /* Create a new temp of the same type as an existing temp.  */
950 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
951 {
952     TCGTemp *t = tcgv_vec_temp(match);
953 
954     tcg_debug_assert(t->temp_allocated != 0);
955 
956     t = tcg_temp_new_internal(t->base_type, 0);
957     return temp_tcgv_vec(t);
958 }
959 
960 void tcg_temp_free_internal(TCGTemp *ts)
961 {
962     TCGContext *s = tcg_ctx;
963     int k, idx;
964 
965     /* In order to simplify users of tcg_constant_*, silently ignore free. */
966     if (ts->kind == TEMP_CONST) {
967         return;
968     }
969 
970 #if defined(CONFIG_DEBUG_TCG)
971     s->temps_in_use--;
972     if (s->temps_in_use < 0) {
973         fprintf(stderr, "More temporaries freed than allocated!\n");
974     }
975 #endif
976 
977     tcg_debug_assert(ts->kind < TEMP_GLOBAL);
978     tcg_debug_assert(ts->temp_allocated != 0);
979     ts->temp_allocated = 0;
980 
981     idx = temp_idx(ts);
982     k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT);
983     set_bit(idx, s->free_temps[k].l);
984 }
985 
986 TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
987 {
988     TCGContext *s = tcg_ctx;
989     GHashTable *h = s->const_table[type];
990     TCGTemp *ts;
991 
992     if (h == NULL) {
993         h = g_hash_table_new(g_int64_hash, g_int64_equal);
994         s->const_table[type] = h;
995     }
996 
997     ts = g_hash_table_lookup(h, &val);
998     if (ts == NULL) {
999         ts = tcg_temp_alloc(s);
1000 
1001         if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1002             TCGTemp *ts2 = tcg_temp_alloc(s);
1003 
1004             ts->base_type = TCG_TYPE_I64;
1005             ts->type = TCG_TYPE_I32;
1006             ts->kind = TEMP_CONST;
1007             ts->temp_allocated = 1;
1008             /*
1009              * Retain the full value of the 64-bit constant in the low
1010              * part, so that the hash table works.  Actual uses will
1011              * truncate the value to the low part.
1012              */
1013             ts->val = val;
1014 
1015             tcg_debug_assert(ts2 == ts + 1);
1016             ts2->base_type = TCG_TYPE_I64;
1017             ts2->type = TCG_TYPE_I32;
1018             ts2->kind = TEMP_CONST;
1019             ts2->temp_allocated = 1;
1020             ts2->val = val >> 32;
1021         } else {
1022             ts->base_type = type;
1023             ts->type = type;
1024             ts->kind = TEMP_CONST;
1025             ts->temp_allocated = 1;
1026             ts->val = val;
1027         }
1028         g_hash_table_insert(h, &ts->val, ts);
1029     }
1030 
1031     return ts;
1032 }
1033 
1034 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
1035 {
1036     val = dup_const(vece, val);
1037     return temp_tcgv_vec(tcg_constant_internal(type, val));
1038 }
1039 
1040 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
1041 {
1042     TCGTemp *t = tcgv_vec_temp(match);
1043 
1044     tcg_debug_assert(t->temp_allocated != 0);
1045     return tcg_constant_vec(t->base_type, vece, val);
1046 }
1047 
1048 TCGv_i32 tcg_const_i32(int32_t val)
1049 {
1050     TCGv_i32 t0;
1051     t0 = tcg_temp_new_i32();
1052     tcg_gen_movi_i32(t0, val);
1053     return t0;
1054 }
1055 
1056 TCGv_i64 tcg_const_i64(int64_t val)
1057 {
1058     TCGv_i64 t0;
1059     t0 = tcg_temp_new_i64();
1060     tcg_gen_movi_i64(t0, val);
1061     return t0;
1062 }
1063 
1064 TCGv_i32 tcg_const_local_i32(int32_t val)
1065 {
1066     TCGv_i32 t0;
1067     t0 = tcg_temp_local_new_i32();
1068     tcg_gen_movi_i32(t0, val);
1069     return t0;
1070 }
1071 
1072 TCGv_i64 tcg_const_local_i64(int64_t val)
1073 {
1074     TCGv_i64 t0;
1075     t0 = tcg_temp_local_new_i64();
1076     tcg_gen_movi_i64(t0, val);
1077     return t0;
1078 }
1079 
1080 #if defined(CONFIG_DEBUG_TCG)
1081 void tcg_clear_temp_count(void)
1082 {
1083     TCGContext *s = tcg_ctx;
1084     s->temps_in_use = 0;
1085 }
1086 
1087 int tcg_check_temp_count(void)
1088 {
1089     TCGContext *s = tcg_ctx;
1090     if (s->temps_in_use) {
1091         /* Clear the count so that we don't give another
1092          * warning immediately next time around.
1093          */
1094         s->temps_in_use = 0;
1095         return 1;
1096     }
1097     return 0;
1098 }
1099 #endif
1100 
1101 /* Return true if OP may appear in the opcode stream.
1102    Test the runtime variable that controls each opcode.  */
1103 bool tcg_op_supported(TCGOpcode op)
1104 {
1105     const bool have_vec
1106         = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1107 
1108     switch (op) {
1109     case INDEX_op_discard:
1110     case INDEX_op_set_label:
1111     case INDEX_op_call:
1112     case INDEX_op_br:
1113     case INDEX_op_mb:
1114     case INDEX_op_insn_start:
1115     case INDEX_op_exit_tb:
1116     case INDEX_op_goto_tb:
1117     case INDEX_op_qemu_ld_i32:
1118     case INDEX_op_qemu_st_i32:
1119     case INDEX_op_qemu_ld_i64:
1120     case INDEX_op_qemu_st_i64:
1121         return true;
1122 
1123     case INDEX_op_qemu_st8_i32:
1124         return TCG_TARGET_HAS_qemu_st8_i32;
1125 
1126     case INDEX_op_goto_ptr:
1127         return TCG_TARGET_HAS_goto_ptr;
1128 
1129     case INDEX_op_mov_i32:
1130     case INDEX_op_setcond_i32:
1131     case INDEX_op_brcond_i32:
1132     case INDEX_op_ld8u_i32:
1133     case INDEX_op_ld8s_i32:
1134     case INDEX_op_ld16u_i32:
1135     case INDEX_op_ld16s_i32:
1136     case INDEX_op_ld_i32:
1137     case INDEX_op_st8_i32:
1138     case INDEX_op_st16_i32:
1139     case INDEX_op_st_i32:
1140     case INDEX_op_add_i32:
1141     case INDEX_op_sub_i32:
1142     case INDEX_op_mul_i32:
1143     case INDEX_op_and_i32:
1144     case INDEX_op_or_i32:
1145     case INDEX_op_xor_i32:
1146     case INDEX_op_shl_i32:
1147     case INDEX_op_shr_i32:
1148     case INDEX_op_sar_i32:
1149         return true;
1150 
1151     case INDEX_op_movcond_i32:
1152         return TCG_TARGET_HAS_movcond_i32;
1153     case INDEX_op_div_i32:
1154     case INDEX_op_divu_i32:
1155         return TCG_TARGET_HAS_div_i32;
1156     case INDEX_op_rem_i32:
1157     case INDEX_op_remu_i32:
1158         return TCG_TARGET_HAS_rem_i32;
1159     case INDEX_op_div2_i32:
1160     case INDEX_op_divu2_i32:
1161         return TCG_TARGET_HAS_div2_i32;
1162     case INDEX_op_rotl_i32:
1163     case INDEX_op_rotr_i32:
1164         return TCG_TARGET_HAS_rot_i32;
1165     case INDEX_op_deposit_i32:
1166         return TCG_TARGET_HAS_deposit_i32;
1167     case INDEX_op_extract_i32:
1168         return TCG_TARGET_HAS_extract_i32;
1169     case INDEX_op_sextract_i32:
1170         return TCG_TARGET_HAS_sextract_i32;
1171     case INDEX_op_extract2_i32:
1172         return TCG_TARGET_HAS_extract2_i32;
1173     case INDEX_op_add2_i32:
1174         return TCG_TARGET_HAS_add2_i32;
1175     case INDEX_op_sub2_i32:
1176         return TCG_TARGET_HAS_sub2_i32;
1177     case INDEX_op_mulu2_i32:
1178         return TCG_TARGET_HAS_mulu2_i32;
1179     case INDEX_op_muls2_i32:
1180         return TCG_TARGET_HAS_muls2_i32;
1181     case INDEX_op_muluh_i32:
1182         return TCG_TARGET_HAS_muluh_i32;
1183     case INDEX_op_mulsh_i32:
1184         return TCG_TARGET_HAS_mulsh_i32;
1185     case INDEX_op_ext8s_i32:
1186         return TCG_TARGET_HAS_ext8s_i32;
1187     case INDEX_op_ext16s_i32:
1188         return TCG_TARGET_HAS_ext16s_i32;
1189     case INDEX_op_ext8u_i32:
1190         return TCG_TARGET_HAS_ext8u_i32;
1191     case INDEX_op_ext16u_i32:
1192         return TCG_TARGET_HAS_ext16u_i32;
1193     case INDEX_op_bswap16_i32:
1194         return TCG_TARGET_HAS_bswap16_i32;
1195     case INDEX_op_bswap32_i32:
1196         return TCG_TARGET_HAS_bswap32_i32;
1197     case INDEX_op_not_i32:
1198         return TCG_TARGET_HAS_not_i32;
1199     case INDEX_op_neg_i32:
1200         return TCG_TARGET_HAS_neg_i32;
1201     case INDEX_op_andc_i32:
1202         return TCG_TARGET_HAS_andc_i32;
1203     case INDEX_op_orc_i32:
1204         return TCG_TARGET_HAS_orc_i32;
1205     case INDEX_op_eqv_i32:
1206         return TCG_TARGET_HAS_eqv_i32;
1207     case INDEX_op_nand_i32:
1208         return TCG_TARGET_HAS_nand_i32;
1209     case INDEX_op_nor_i32:
1210         return TCG_TARGET_HAS_nor_i32;
1211     case INDEX_op_clz_i32:
1212         return TCG_TARGET_HAS_clz_i32;
1213     case INDEX_op_ctz_i32:
1214         return TCG_TARGET_HAS_ctz_i32;
1215     case INDEX_op_ctpop_i32:
1216         return TCG_TARGET_HAS_ctpop_i32;
1217 
1218     case INDEX_op_brcond2_i32:
1219     case INDEX_op_setcond2_i32:
1220         return TCG_TARGET_REG_BITS == 32;
1221 
1222     case INDEX_op_mov_i64:
1223     case INDEX_op_setcond_i64:
1224     case INDEX_op_brcond_i64:
1225     case INDEX_op_ld8u_i64:
1226     case INDEX_op_ld8s_i64:
1227     case INDEX_op_ld16u_i64:
1228     case INDEX_op_ld16s_i64:
1229     case INDEX_op_ld32u_i64:
1230     case INDEX_op_ld32s_i64:
1231     case INDEX_op_ld_i64:
1232     case INDEX_op_st8_i64:
1233     case INDEX_op_st16_i64:
1234     case INDEX_op_st32_i64:
1235     case INDEX_op_st_i64:
1236     case INDEX_op_add_i64:
1237     case INDEX_op_sub_i64:
1238     case INDEX_op_mul_i64:
1239     case INDEX_op_and_i64:
1240     case INDEX_op_or_i64:
1241     case INDEX_op_xor_i64:
1242     case INDEX_op_shl_i64:
1243     case INDEX_op_shr_i64:
1244     case INDEX_op_sar_i64:
1245     case INDEX_op_ext_i32_i64:
1246     case INDEX_op_extu_i32_i64:
1247         return TCG_TARGET_REG_BITS == 64;
1248 
1249     case INDEX_op_movcond_i64:
1250         return TCG_TARGET_HAS_movcond_i64;
1251     case INDEX_op_div_i64:
1252     case INDEX_op_divu_i64:
1253         return TCG_TARGET_HAS_div_i64;
1254     case INDEX_op_rem_i64:
1255     case INDEX_op_remu_i64:
1256         return TCG_TARGET_HAS_rem_i64;
1257     case INDEX_op_div2_i64:
1258     case INDEX_op_divu2_i64:
1259         return TCG_TARGET_HAS_div2_i64;
1260     case INDEX_op_rotl_i64:
1261     case INDEX_op_rotr_i64:
1262         return TCG_TARGET_HAS_rot_i64;
1263     case INDEX_op_deposit_i64:
1264         return TCG_TARGET_HAS_deposit_i64;
1265     case INDEX_op_extract_i64:
1266         return TCG_TARGET_HAS_extract_i64;
1267     case INDEX_op_sextract_i64:
1268         return TCG_TARGET_HAS_sextract_i64;
1269     case INDEX_op_extract2_i64:
1270         return TCG_TARGET_HAS_extract2_i64;
1271     case INDEX_op_extrl_i64_i32:
1272         return TCG_TARGET_HAS_extrl_i64_i32;
1273     case INDEX_op_extrh_i64_i32:
1274         return TCG_TARGET_HAS_extrh_i64_i32;
1275     case INDEX_op_ext8s_i64:
1276         return TCG_TARGET_HAS_ext8s_i64;
1277     case INDEX_op_ext16s_i64:
1278         return TCG_TARGET_HAS_ext16s_i64;
1279     case INDEX_op_ext32s_i64:
1280         return TCG_TARGET_HAS_ext32s_i64;
1281     case INDEX_op_ext8u_i64:
1282         return TCG_TARGET_HAS_ext8u_i64;
1283     case INDEX_op_ext16u_i64:
1284         return TCG_TARGET_HAS_ext16u_i64;
1285     case INDEX_op_ext32u_i64:
1286         return TCG_TARGET_HAS_ext32u_i64;
1287     case INDEX_op_bswap16_i64:
1288         return TCG_TARGET_HAS_bswap16_i64;
1289     case INDEX_op_bswap32_i64:
1290         return TCG_TARGET_HAS_bswap32_i64;
1291     case INDEX_op_bswap64_i64:
1292         return TCG_TARGET_HAS_bswap64_i64;
1293     case INDEX_op_not_i64:
1294         return TCG_TARGET_HAS_not_i64;
1295     case INDEX_op_neg_i64:
1296         return TCG_TARGET_HAS_neg_i64;
1297     case INDEX_op_andc_i64:
1298         return TCG_TARGET_HAS_andc_i64;
1299     case INDEX_op_orc_i64:
1300         return TCG_TARGET_HAS_orc_i64;
1301     case INDEX_op_eqv_i64:
1302         return TCG_TARGET_HAS_eqv_i64;
1303     case INDEX_op_nand_i64:
1304         return TCG_TARGET_HAS_nand_i64;
1305     case INDEX_op_nor_i64:
1306         return TCG_TARGET_HAS_nor_i64;
1307     case INDEX_op_clz_i64:
1308         return TCG_TARGET_HAS_clz_i64;
1309     case INDEX_op_ctz_i64:
1310         return TCG_TARGET_HAS_ctz_i64;
1311     case INDEX_op_ctpop_i64:
1312         return TCG_TARGET_HAS_ctpop_i64;
1313     case INDEX_op_add2_i64:
1314         return TCG_TARGET_HAS_add2_i64;
1315     case INDEX_op_sub2_i64:
1316         return TCG_TARGET_HAS_sub2_i64;
1317     case INDEX_op_mulu2_i64:
1318         return TCG_TARGET_HAS_mulu2_i64;
1319     case INDEX_op_muls2_i64:
1320         return TCG_TARGET_HAS_muls2_i64;
1321     case INDEX_op_muluh_i64:
1322         return TCG_TARGET_HAS_muluh_i64;
1323     case INDEX_op_mulsh_i64:
1324         return TCG_TARGET_HAS_mulsh_i64;
1325 
1326     case INDEX_op_mov_vec:
1327     case INDEX_op_dup_vec:
1328     case INDEX_op_dupm_vec:
1329     case INDEX_op_ld_vec:
1330     case INDEX_op_st_vec:
1331     case INDEX_op_add_vec:
1332     case INDEX_op_sub_vec:
1333     case INDEX_op_and_vec:
1334     case INDEX_op_or_vec:
1335     case INDEX_op_xor_vec:
1336     case INDEX_op_cmp_vec:
1337         return have_vec;
1338     case INDEX_op_dup2_vec:
1339         return have_vec && TCG_TARGET_REG_BITS == 32;
1340     case INDEX_op_not_vec:
1341         return have_vec && TCG_TARGET_HAS_not_vec;
1342     case INDEX_op_neg_vec:
1343         return have_vec && TCG_TARGET_HAS_neg_vec;
1344     case INDEX_op_abs_vec:
1345         return have_vec && TCG_TARGET_HAS_abs_vec;
1346     case INDEX_op_andc_vec:
1347         return have_vec && TCG_TARGET_HAS_andc_vec;
1348     case INDEX_op_orc_vec:
1349         return have_vec && TCG_TARGET_HAS_orc_vec;
1350     case INDEX_op_mul_vec:
1351         return have_vec && TCG_TARGET_HAS_mul_vec;
1352     case INDEX_op_shli_vec:
1353     case INDEX_op_shri_vec:
1354     case INDEX_op_sari_vec:
1355         return have_vec && TCG_TARGET_HAS_shi_vec;
1356     case INDEX_op_shls_vec:
1357     case INDEX_op_shrs_vec:
1358     case INDEX_op_sars_vec:
1359         return have_vec && TCG_TARGET_HAS_shs_vec;
1360     case INDEX_op_shlv_vec:
1361     case INDEX_op_shrv_vec:
1362     case INDEX_op_sarv_vec:
1363         return have_vec && TCG_TARGET_HAS_shv_vec;
1364     case INDEX_op_rotli_vec:
1365         return have_vec && TCG_TARGET_HAS_roti_vec;
1366     case INDEX_op_rotls_vec:
1367         return have_vec && TCG_TARGET_HAS_rots_vec;
1368     case INDEX_op_rotlv_vec:
1369     case INDEX_op_rotrv_vec:
1370         return have_vec && TCG_TARGET_HAS_rotv_vec;
1371     case INDEX_op_ssadd_vec:
1372     case INDEX_op_usadd_vec:
1373     case INDEX_op_sssub_vec:
1374     case INDEX_op_ussub_vec:
1375         return have_vec && TCG_TARGET_HAS_sat_vec;
1376     case INDEX_op_smin_vec:
1377     case INDEX_op_umin_vec:
1378     case INDEX_op_smax_vec:
1379     case INDEX_op_umax_vec:
1380         return have_vec && TCG_TARGET_HAS_minmax_vec;
1381     case INDEX_op_bitsel_vec:
1382         return have_vec && TCG_TARGET_HAS_bitsel_vec;
1383     case INDEX_op_cmpsel_vec:
1384         return have_vec && TCG_TARGET_HAS_cmpsel_vec;
1385 
1386     default:
1387         tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1388         return true;
1389     }
1390 }
1391 
1392 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1393    and endian swap. Maybe it would be better to do the alignment
1394    and endian swap in tcg_reg_alloc_call(). */
1395 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
1396 {
1397     int i, real_args, nb_rets, pi;
1398     unsigned sizemask, flags;
1399     TCGHelperInfo *info;
1400     TCGOp *op;
1401 
1402     info = g_hash_table_lookup(helper_table, (gpointer)func);
1403     flags = info->flags;
1404     sizemask = info->sizemask;
1405 
1406 #ifdef CONFIG_PLUGIN
1407     /* detect non-plugin helpers */
1408     if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) {
1409         tcg_ctx->plugin_insn->calls_helpers = true;
1410     }
1411 #endif
1412 
1413 #if defined(__sparc__) && !defined(__arch64__) \
1414     && !defined(CONFIG_TCG_INTERPRETER)
1415     /* We have 64-bit values in one register, but need to pass as two
1416        separate parameters.  Split them.  */
1417     int orig_sizemask = sizemask;
1418     int orig_nargs = nargs;
1419     TCGv_i64 retl, reth;
1420     TCGTemp *split_args[MAX_OPC_PARAM];
1421 
1422     retl = NULL;
1423     reth = NULL;
1424     if (sizemask != 0) {
1425         for (i = real_args = 0; i < nargs; ++i) {
1426             int is_64bit = sizemask & (1 << (i+1)*2);
1427             if (is_64bit) {
1428                 TCGv_i64 orig = temp_tcgv_i64(args[i]);
1429                 TCGv_i32 h = tcg_temp_new_i32();
1430                 TCGv_i32 l = tcg_temp_new_i32();
1431                 tcg_gen_extr_i64_i32(l, h, orig);
1432                 split_args[real_args++] = tcgv_i32_temp(h);
1433                 split_args[real_args++] = tcgv_i32_temp(l);
1434             } else {
1435                 split_args[real_args++] = args[i];
1436             }
1437         }
1438         nargs = real_args;
1439         args = split_args;
1440         sizemask = 0;
1441     }
1442 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1443     for (i = 0; i < nargs; ++i) {
1444         int is_64bit = sizemask & (1 << (i+1)*2);
1445         int is_signed = sizemask & (2 << (i+1)*2);
1446         if (!is_64bit) {
1447             TCGv_i64 temp = tcg_temp_new_i64();
1448             TCGv_i64 orig = temp_tcgv_i64(args[i]);
1449             if (is_signed) {
1450                 tcg_gen_ext32s_i64(temp, orig);
1451             } else {
1452                 tcg_gen_ext32u_i64(temp, orig);
1453             }
1454             args[i] = tcgv_i64_temp(temp);
1455         }
1456     }
1457 #endif /* TCG_TARGET_EXTEND_ARGS */
1458 
1459     op = tcg_emit_op(INDEX_op_call);
1460 
1461     pi = 0;
1462     if (ret != NULL) {
1463 #if defined(__sparc__) && !defined(__arch64__) \
1464     && !defined(CONFIG_TCG_INTERPRETER)
1465         if (orig_sizemask & 1) {
1466             /* The 32-bit ABI is going to return the 64-bit value in
1467                the %o0/%o1 register pair.  Prepare for this by using
1468                two return temporaries, and reassemble below.  */
1469             retl = tcg_temp_new_i64();
1470             reth = tcg_temp_new_i64();
1471             op->args[pi++] = tcgv_i64_arg(reth);
1472             op->args[pi++] = tcgv_i64_arg(retl);
1473             nb_rets = 2;
1474         } else {
1475             op->args[pi++] = temp_arg(ret);
1476             nb_rets = 1;
1477         }
1478 #else
1479         if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
1480 #ifdef HOST_WORDS_BIGENDIAN
1481             op->args[pi++] = temp_arg(ret + 1);
1482             op->args[pi++] = temp_arg(ret);
1483 #else
1484             op->args[pi++] = temp_arg(ret);
1485             op->args[pi++] = temp_arg(ret + 1);
1486 #endif
1487             nb_rets = 2;
1488         } else {
1489             op->args[pi++] = temp_arg(ret);
1490             nb_rets = 1;
1491         }
1492 #endif
1493     } else {
1494         nb_rets = 0;
1495     }
1496     TCGOP_CALLO(op) = nb_rets;
1497 
1498     real_args = 0;
1499     for (i = 0; i < nargs; i++) {
1500         int is_64bit = sizemask & (1 << (i+1)*2);
1501         if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
1502 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1503             /* some targets want aligned 64 bit args */
1504             if (real_args & 1) {
1505                 op->args[pi++] = TCG_CALL_DUMMY_ARG;
1506                 real_args++;
1507             }
1508 #endif
1509            /* If stack grows up, then we will be placing successive
1510               arguments at lower addresses, which means we need to
1511               reverse the order compared to how we would normally
1512               treat either big or little-endian.  For those arguments
1513               that will wind up in registers, this still works for
1514               HPPA (the only current STACK_GROWSUP target) since the
1515               argument registers are *also* allocated in decreasing
1516               order.  If another such target is added, this logic may
1517               have to get more complicated to differentiate between
1518               stack arguments and register arguments.  */
1519 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1520             op->args[pi++] = temp_arg(args[i] + 1);
1521             op->args[pi++] = temp_arg(args[i]);
1522 #else
1523             op->args[pi++] = temp_arg(args[i]);
1524             op->args[pi++] = temp_arg(args[i] + 1);
1525 #endif
1526             real_args += 2;
1527             continue;
1528         }
1529 
1530         op->args[pi++] = temp_arg(args[i]);
1531         real_args++;
1532     }
1533     op->args[pi++] = (uintptr_t)func;
1534     op->args[pi++] = flags;
1535     TCGOP_CALLI(op) = real_args;
1536 
1537     /* Make sure the fields didn't overflow.  */
1538     tcg_debug_assert(TCGOP_CALLI(op) == real_args);
1539     tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
1540 
1541 #if defined(__sparc__) && !defined(__arch64__) \
1542     && !defined(CONFIG_TCG_INTERPRETER)
1543     /* Free all of the parts we allocated above.  */
1544     for (i = real_args = 0; i < orig_nargs; ++i) {
1545         int is_64bit = orig_sizemask & (1 << (i+1)*2);
1546         if (is_64bit) {
1547             tcg_temp_free_internal(args[real_args++]);
1548             tcg_temp_free_internal(args[real_args++]);
1549         } else {
1550             real_args++;
1551         }
1552     }
1553     if (orig_sizemask & 1) {
1554         /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
1555            Note that describing these as TCGv_i64 eliminates an unnecessary
1556            zero-extension that tcg_gen_concat_i32_i64 would create.  */
1557         tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
1558         tcg_temp_free_i64(retl);
1559         tcg_temp_free_i64(reth);
1560     }
1561 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1562     for (i = 0; i < nargs; ++i) {
1563         int is_64bit = sizemask & (1 << (i+1)*2);
1564         if (!is_64bit) {
1565             tcg_temp_free_internal(args[i]);
1566         }
1567     }
1568 #endif /* TCG_TARGET_EXTEND_ARGS */
1569 }
1570 
1571 static void tcg_reg_alloc_start(TCGContext *s)
1572 {
1573     int i, n;
1574 
1575     for (i = 0, n = s->nb_temps; i < n; i++) {
1576         TCGTemp *ts = &s->temps[i];
1577         TCGTempVal val = TEMP_VAL_MEM;
1578 
1579         switch (ts->kind) {
1580         case TEMP_CONST:
1581             val = TEMP_VAL_CONST;
1582             break;
1583         case TEMP_FIXED:
1584             val = TEMP_VAL_REG;
1585             break;
1586         case TEMP_GLOBAL:
1587             break;
1588         case TEMP_NORMAL:
1589             val = TEMP_VAL_DEAD;
1590             /* fall through */
1591         case TEMP_LOCAL:
1592             ts->mem_allocated = 0;
1593             break;
1594         default:
1595             g_assert_not_reached();
1596         }
1597         ts->val_type = val;
1598     }
1599 
1600     memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
1601 }
1602 
1603 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1604                                  TCGTemp *ts)
1605 {
1606     int idx = temp_idx(ts);
1607 
1608     switch (ts->kind) {
1609     case TEMP_FIXED:
1610     case TEMP_GLOBAL:
1611         pstrcpy(buf, buf_size, ts->name);
1612         break;
1613     case TEMP_LOCAL:
1614         snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
1615         break;
1616     case TEMP_NORMAL:
1617         snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
1618         break;
1619     case TEMP_CONST:
1620         switch (ts->type) {
1621         case TCG_TYPE_I32:
1622             snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
1623             break;
1624 #if TCG_TARGET_REG_BITS > 32
1625         case TCG_TYPE_I64:
1626             snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
1627             break;
1628 #endif
1629         case TCG_TYPE_V64:
1630         case TCG_TYPE_V128:
1631         case TCG_TYPE_V256:
1632             snprintf(buf, buf_size, "v%d$0x%" PRIx64,
1633                      64 << (ts->type - TCG_TYPE_V64), ts->val);
1634             break;
1635         default:
1636             g_assert_not_reached();
1637         }
1638         break;
1639     }
1640     return buf;
1641 }
1642 
1643 static char *tcg_get_arg_str(TCGContext *s, char *buf,
1644                              int buf_size, TCGArg arg)
1645 {
1646     return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
1647 }
1648 
1649 /* Find helper name.  */
1650 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
1651 {
1652     const char *ret = NULL;
1653     if (helper_table) {
1654         TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
1655         if (info) {
1656             ret = info->name;
1657         }
1658     }
1659     return ret;
1660 }
1661 
1662 static const char * const cond_name[] =
1663 {
1664     [TCG_COND_NEVER] = "never",
1665     [TCG_COND_ALWAYS] = "always",
1666     [TCG_COND_EQ] = "eq",
1667     [TCG_COND_NE] = "ne",
1668     [TCG_COND_LT] = "lt",
1669     [TCG_COND_GE] = "ge",
1670     [TCG_COND_LE] = "le",
1671     [TCG_COND_GT] = "gt",
1672     [TCG_COND_LTU] = "ltu",
1673     [TCG_COND_GEU] = "geu",
1674     [TCG_COND_LEU] = "leu",
1675     [TCG_COND_GTU] = "gtu"
1676 };
1677 
1678 static const char * const ldst_name[] =
1679 {
1680     [MO_UB]   = "ub",
1681     [MO_SB]   = "sb",
1682     [MO_LEUW] = "leuw",
1683     [MO_LESW] = "lesw",
1684     [MO_LEUL] = "leul",
1685     [MO_LESL] = "lesl",
1686     [MO_LEQ]  = "leq",
1687     [MO_BEUW] = "beuw",
1688     [MO_BESW] = "besw",
1689     [MO_BEUL] = "beul",
1690     [MO_BESL] = "besl",
1691     [MO_BEQ]  = "beq",
1692 };
1693 
1694 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1695 #ifdef TARGET_ALIGNED_ONLY
1696     [MO_UNALN >> MO_ASHIFT]    = "un+",
1697     [MO_ALIGN >> MO_ASHIFT]    = "",
1698 #else
1699     [MO_UNALN >> MO_ASHIFT]    = "",
1700     [MO_ALIGN >> MO_ASHIFT]    = "al+",
1701 #endif
1702     [MO_ALIGN_2 >> MO_ASHIFT]  = "al2+",
1703     [MO_ALIGN_4 >> MO_ASHIFT]  = "al4+",
1704     [MO_ALIGN_8 >> MO_ASHIFT]  = "al8+",
1705     [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1706     [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1707     [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1708 };
1709 
1710 static inline bool tcg_regset_single(TCGRegSet d)
1711 {
1712     return (d & (d - 1)) == 0;
1713 }
1714 
1715 static inline TCGReg tcg_regset_first(TCGRegSet d)
1716 {
1717     if (TCG_TARGET_NB_REGS <= 32) {
1718         return ctz32(d);
1719     } else {
1720         return ctz64(d);
1721     }
1722 }
1723 
1724 static void tcg_dump_ops(TCGContext *s, bool have_prefs)
1725 {
1726     char buf[128];
1727     TCGOp *op;
1728 
1729     QTAILQ_FOREACH(op, &s->ops, link) {
1730         int i, k, nb_oargs, nb_iargs, nb_cargs;
1731         const TCGOpDef *def;
1732         TCGOpcode c;
1733         int col = 0;
1734 
1735         c = op->opc;
1736         def = &tcg_op_defs[c];
1737 
1738         if (c == INDEX_op_insn_start) {
1739             nb_oargs = 0;
1740             col += qemu_log("\n ----");
1741 
1742             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1743                 target_ulong a;
1744 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1745                 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
1746 #else
1747                 a = op->args[i];
1748 #endif
1749                 col += qemu_log(" " TARGET_FMT_lx, a);
1750             }
1751         } else if (c == INDEX_op_call) {
1752             /* variable number of arguments */
1753             nb_oargs = TCGOP_CALLO(op);
1754             nb_iargs = TCGOP_CALLI(op);
1755             nb_cargs = def->nb_cargs;
1756 
1757             /* function name, flags, out args */
1758             col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1759                             tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
1760                             op->args[nb_oargs + nb_iargs + 1], nb_oargs);
1761             for (i = 0; i < nb_oargs; i++) {
1762                 col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1763                                                        op->args[i]));
1764             }
1765             for (i = 0; i < nb_iargs; i++) {
1766                 TCGArg arg = op->args[nb_oargs + i];
1767                 const char *t = "<dummy>";
1768                 if (arg != TCG_CALL_DUMMY_ARG) {
1769                     t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
1770                 }
1771                 col += qemu_log(",%s", t);
1772             }
1773         } else {
1774             col += qemu_log(" %s ", def->name);
1775 
1776             nb_oargs = def->nb_oargs;
1777             nb_iargs = def->nb_iargs;
1778             nb_cargs = def->nb_cargs;
1779 
1780             if (def->flags & TCG_OPF_VECTOR) {
1781                 col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
1782                                 8 << TCGOP_VECE(op));
1783             }
1784 
1785             k = 0;
1786             for (i = 0; i < nb_oargs; i++) {
1787                 if (k != 0) {
1788                     col += qemu_log(",");
1789                 }
1790                 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1791                                                       op->args[k++]));
1792             }
1793             for (i = 0; i < nb_iargs; i++) {
1794                 if (k != 0) {
1795                     col += qemu_log(",");
1796                 }
1797                 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1798                                                       op->args[k++]));
1799             }
1800             switch (c) {
1801             case INDEX_op_brcond_i32:
1802             case INDEX_op_setcond_i32:
1803             case INDEX_op_movcond_i32:
1804             case INDEX_op_brcond2_i32:
1805             case INDEX_op_setcond2_i32:
1806             case INDEX_op_brcond_i64:
1807             case INDEX_op_setcond_i64:
1808             case INDEX_op_movcond_i64:
1809             case INDEX_op_cmp_vec:
1810             case INDEX_op_cmpsel_vec:
1811                 if (op->args[k] < ARRAY_SIZE(cond_name)
1812                     && cond_name[op->args[k]]) {
1813                     col += qemu_log(",%s", cond_name[op->args[k++]]);
1814                 } else {
1815                     col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
1816                 }
1817                 i = 1;
1818                 break;
1819             case INDEX_op_qemu_ld_i32:
1820             case INDEX_op_qemu_st_i32:
1821             case INDEX_op_qemu_st8_i32:
1822             case INDEX_op_qemu_ld_i64:
1823             case INDEX_op_qemu_st_i64:
1824                 {
1825                     TCGMemOpIdx oi = op->args[k++];
1826                     MemOp op = get_memop(oi);
1827                     unsigned ix = get_mmuidx(oi);
1828 
1829                     if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1830                         col += qemu_log(",$0x%x,%u", op, ix);
1831                     } else {
1832                         const char *s_al, *s_op;
1833                         s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
1834                         s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1835                         col += qemu_log(",%s%s,%u", s_al, s_op, ix);
1836                     }
1837                     i = 1;
1838                 }
1839                 break;
1840             default:
1841                 i = 0;
1842                 break;
1843             }
1844             switch (c) {
1845             case INDEX_op_set_label:
1846             case INDEX_op_br:
1847             case INDEX_op_brcond_i32:
1848             case INDEX_op_brcond_i64:
1849             case INDEX_op_brcond2_i32:
1850                 col += qemu_log("%s$L%d", k ? "," : "",
1851                                 arg_label(op->args[k])->id);
1852                 i++, k++;
1853                 break;
1854             default:
1855                 break;
1856             }
1857             for (; i < nb_cargs; i++, k++) {
1858                 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
1859             }
1860         }
1861 
1862         if (have_prefs || op->life) {
1863 
1864             QemuLogFile *logfile;
1865 
1866             rcu_read_lock();
1867             logfile = qatomic_rcu_read(&qemu_logfile);
1868             if (logfile) {
1869                 for (; col < 40; ++col) {
1870                     putc(' ', logfile->fd);
1871                 }
1872             }
1873             rcu_read_unlock();
1874         }
1875 
1876         if (op->life) {
1877             unsigned life = op->life;
1878 
1879             if (life & (SYNC_ARG * 3)) {
1880                 qemu_log("  sync:");
1881                 for (i = 0; i < 2; ++i) {
1882                     if (life & (SYNC_ARG << i)) {
1883                         qemu_log(" %d", i);
1884                     }
1885                 }
1886             }
1887             life /= DEAD_ARG;
1888             if (life) {
1889                 qemu_log("  dead:");
1890                 for (i = 0; life; ++i, life >>= 1) {
1891                     if (life & 1) {
1892                         qemu_log(" %d", i);
1893                     }
1894                 }
1895             }
1896         }
1897 
1898         if (have_prefs) {
1899             for (i = 0; i < nb_oargs; ++i) {
1900                 TCGRegSet set = op->output_pref[i];
1901 
1902                 if (i == 0) {
1903                     qemu_log("  pref=");
1904                 } else {
1905                     qemu_log(",");
1906                 }
1907                 if (set == 0) {
1908                     qemu_log("none");
1909                 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
1910                     qemu_log("all");
1911 #ifdef CONFIG_DEBUG_TCG
1912                 } else if (tcg_regset_single(set)) {
1913                     TCGReg reg = tcg_regset_first(set);
1914                     qemu_log("%s", tcg_target_reg_names[reg]);
1915 #endif
1916                 } else if (TCG_TARGET_NB_REGS <= 32) {
1917                     qemu_log("%#x", (uint32_t)set);
1918                 } else {
1919                     qemu_log("%#" PRIx64, (uint64_t)set);
1920                 }
1921             }
1922         }
1923 
1924         qemu_log("\n");
1925     }
1926 }
1927 
1928 /* we give more priority to constraints with less registers */
1929 static int get_constraint_priority(const TCGOpDef *def, int k)
1930 {
1931     const TCGArgConstraint *arg_ct = &def->args_ct[k];
1932     int n;
1933 
1934     if (arg_ct->oalias) {
1935         /* an alias is equivalent to a single register */
1936         n = 1;
1937     } else {
1938         n = ctpop64(arg_ct->regs);
1939     }
1940     return TCG_TARGET_NB_REGS - n + 1;
1941 }
1942 
1943 /* sort from highest priority to lowest */
1944 static void sort_constraints(TCGOpDef *def, int start, int n)
1945 {
1946     int i, j;
1947     TCGArgConstraint *a = def->args_ct;
1948 
1949     for (i = 0; i < n; i++) {
1950         a[start + i].sort_index = start + i;
1951     }
1952     if (n <= 1) {
1953         return;
1954     }
1955     for (i = 0; i < n - 1; i++) {
1956         for (j = i + 1; j < n; j++) {
1957             int p1 = get_constraint_priority(def, a[start + i].sort_index);
1958             int p2 = get_constraint_priority(def, a[start + j].sort_index);
1959             if (p1 < p2) {
1960                 int tmp = a[start + i].sort_index;
1961                 a[start + i].sort_index = a[start + j].sort_index;
1962                 a[start + j].sort_index = tmp;
1963             }
1964         }
1965     }
1966 }
1967 
1968 static void process_op_defs(TCGContext *s)
1969 {
1970     TCGOpcode op;
1971 
1972     for (op = 0; op < NB_OPS; op++) {
1973         TCGOpDef *def = &tcg_op_defs[op];
1974         const TCGTargetOpDef *tdefs;
1975         int i, nb_args;
1976 
1977         if (def->flags & TCG_OPF_NOT_PRESENT) {
1978             continue;
1979         }
1980 
1981         nb_args = def->nb_iargs + def->nb_oargs;
1982         if (nb_args == 0) {
1983             continue;
1984         }
1985 
1986         /*
1987          * Macro magic should make it impossible, but double-check that
1988          * the array index is in range.  Since the signness of an enum
1989          * is implementation defined, force the result to unsigned.
1990          */
1991         unsigned con_set = tcg_target_op_def(op);
1992         tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
1993         tdefs = &constraint_sets[con_set];
1994 
1995         for (i = 0; i < nb_args; i++) {
1996             const char *ct_str = tdefs->args_ct_str[i];
1997             /* Incomplete TCGTargetOpDef entry. */
1998             tcg_debug_assert(ct_str != NULL);
1999 
2000             while (*ct_str != '\0') {
2001                 switch(*ct_str) {
2002                 case '0' ... '9':
2003                     {
2004                         int oarg = *ct_str - '0';
2005                         tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
2006                         tcg_debug_assert(oarg < def->nb_oargs);
2007                         tcg_debug_assert(def->args_ct[oarg].regs != 0);
2008                         def->args_ct[i] = def->args_ct[oarg];
2009                         /* The output sets oalias.  */
2010                         def->args_ct[oarg].oalias = true;
2011                         def->args_ct[oarg].alias_index = i;
2012                         /* The input sets ialias. */
2013                         def->args_ct[i].ialias = true;
2014                         def->args_ct[i].alias_index = oarg;
2015                     }
2016                     ct_str++;
2017                     break;
2018                 case '&':
2019                     def->args_ct[i].newreg = true;
2020                     ct_str++;
2021                     break;
2022                 case 'i':
2023                     def->args_ct[i].ct |= TCG_CT_CONST;
2024                     ct_str++;
2025                     break;
2026 
2027                 /* Include all of the target-specific constraints. */
2028 
2029 #undef CONST
2030 #define CONST(CASE, MASK) \
2031     case CASE: def->args_ct[i].ct |= MASK; ct_str++; break;
2032 #define REGS(CASE, MASK) \
2033     case CASE: def->args_ct[i].regs |= MASK; ct_str++; break;
2034 
2035 #include "tcg-target-con-str.h"
2036 
2037 #undef REGS
2038 #undef CONST
2039                 default:
2040                     /* Typo in TCGTargetOpDef constraint. */
2041                     g_assert_not_reached();
2042                 }
2043             }
2044         }
2045 
2046         /* TCGTargetOpDef entry with too much information? */
2047         tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
2048 
2049         /* sort the constraints (XXX: this is just an heuristic) */
2050         sort_constraints(def, 0, def->nb_oargs);
2051         sort_constraints(def, def->nb_oargs, def->nb_iargs);
2052     }
2053 }
2054 
2055 void tcg_op_remove(TCGContext *s, TCGOp *op)
2056 {
2057     TCGLabel *label;
2058 
2059     switch (op->opc) {
2060     case INDEX_op_br:
2061         label = arg_label(op->args[0]);
2062         label->refs--;
2063         break;
2064     case INDEX_op_brcond_i32:
2065     case INDEX_op_brcond_i64:
2066         label = arg_label(op->args[3]);
2067         label->refs--;
2068         break;
2069     case INDEX_op_brcond2_i32:
2070         label = arg_label(op->args[5]);
2071         label->refs--;
2072         break;
2073     default:
2074         break;
2075     }
2076 
2077     QTAILQ_REMOVE(&s->ops, op, link);
2078     QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
2079     s->nb_ops--;
2080 
2081 #ifdef CONFIG_PROFILER
2082     qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
2083 #endif
2084 }
2085 
2086 void tcg_remove_ops_after(TCGOp *op)
2087 {
2088     TCGContext *s = tcg_ctx;
2089 
2090     while (true) {
2091         TCGOp *last = tcg_last_op();
2092         if (last == op) {
2093             return;
2094         }
2095         tcg_op_remove(s, last);
2096     }
2097 }
2098 
2099 static TCGOp *tcg_op_alloc(TCGOpcode opc)
2100 {
2101     TCGContext *s = tcg_ctx;
2102     TCGOp *op;
2103 
2104     if (likely(QTAILQ_EMPTY(&s->free_ops))) {
2105         op = tcg_malloc(sizeof(TCGOp));
2106     } else {
2107         op = QTAILQ_FIRST(&s->free_ops);
2108         QTAILQ_REMOVE(&s->free_ops, op, link);
2109     }
2110     memset(op, 0, offsetof(TCGOp, link));
2111     op->opc = opc;
2112     s->nb_ops++;
2113 
2114     return op;
2115 }
2116 
2117 TCGOp *tcg_emit_op(TCGOpcode opc)
2118 {
2119     TCGOp *op = tcg_op_alloc(opc);
2120     QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2121     return op;
2122 }
2123 
2124 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
2125 {
2126     TCGOp *new_op = tcg_op_alloc(opc);
2127     QTAILQ_INSERT_BEFORE(old_op, new_op, link);
2128     return new_op;
2129 }
2130 
2131 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
2132 {
2133     TCGOp *new_op = tcg_op_alloc(opc);
2134     QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
2135     return new_op;
2136 }
2137 
2138 /* Reachable analysis : remove unreachable code.  */
2139 static void reachable_code_pass(TCGContext *s)
2140 {
2141     TCGOp *op, *op_next;
2142     bool dead = false;
2143 
2144     QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2145         bool remove = dead;
2146         TCGLabel *label;
2147         int call_flags;
2148 
2149         switch (op->opc) {
2150         case INDEX_op_set_label:
2151             label = arg_label(op->args[0]);
2152             if (label->refs == 0) {
2153                 /*
2154                  * While there is an occasional backward branch, virtually
2155                  * all branches generated by the translators are forward.
2156                  * Which means that generally we will have already removed
2157                  * all references to the label that will be, and there is
2158                  * little to be gained by iterating.
2159                  */
2160                 remove = true;
2161             } else {
2162                 /* Once we see a label, insns become live again.  */
2163                 dead = false;
2164                 remove = false;
2165 
2166                 /*
2167                  * Optimization can fold conditional branches to unconditional.
2168                  * If we find a label with one reference which is preceded by
2169                  * an unconditional branch to it, remove both.  This needed to
2170                  * wait until the dead code in between them was removed.
2171                  */
2172                 if (label->refs == 1) {
2173                     TCGOp *op_prev = QTAILQ_PREV(op, link);
2174                     if (op_prev->opc == INDEX_op_br &&
2175                         label == arg_label(op_prev->args[0])) {
2176                         tcg_op_remove(s, op_prev);
2177                         remove = true;
2178                     }
2179                 }
2180             }
2181             break;
2182 
2183         case INDEX_op_br:
2184         case INDEX_op_exit_tb:
2185         case INDEX_op_goto_ptr:
2186             /* Unconditional branches; everything following is dead.  */
2187             dead = true;
2188             break;
2189 
2190         case INDEX_op_call:
2191             /* Notice noreturn helper calls, raising exceptions.  */
2192             call_flags = op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1];
2193             if (call_flags & TCG_CALL_NO_RETURN) {
2194                 dead = true;
2195             }
2196             break;
2197 
2198         case INDEX_op_insn_start:
2199             /* Never remove -- we need to keep these for unwind.  */
2200             remove = false;
2201             break;
2202 
2203         default:
2204             break;
2205         }
2206 
2207         if (remove) {
2208             tcg_op_remove(s, op);
2209         }
2210     }
2211 }
2212 
2213 #define TS_DEAD  1
2214 #define TS_MEM   2
2215 
2216 #define IS_DEAD_ARG(n)   (arg_life & (DEAD_ARG << (n)))
2217 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2218 
2219 /* For liveness_pass_1, the register preferences for a given temp.  */
2220 static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
2221 {
2222     return ts->state_ptr;
2223 }
2224 
2225 /* For liveness_pass_1, reset the preferences for a given temp to the
2226  * maximal regset for its type.
2227  */
2228 static inline void la_reset_pref(TCGTemp *ts)
2229 {
2230     *la_temp_pref(ts)
2231         = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
2232 }
2233 
2234 /* liveness analysis: end of function: all temps are dead, and globals
2235    should be in memory. */
2236 static void la_func_end(TCGContext *s, int ng, int nt)
2237 {
2238     int i;
2239 
2240     for (i = 0; i < ng; ++i) {
2241         s->temps[i].state = TS_DEAD | TS_MEM;
2242         la_reset_pref(&s->temps[i]);
2243     }
2244     for (i = ng; i < nt; ++i) {
2245         s->temps[i].state = TS_DEAD;
2246         la_reset_pref(&s->temps[i]);
2247     }
2248 }
2249 
2250 /* liveness analysis: end of basic block: all temps are dead, globals
2251    and local temps should be in memory. */
2252 static void la_bb_end(TCGContext *s, int ng, int nt)
2253 {
2254     int i;
2255 
2256     for (i = 0; i < nt; ++i) {
2257         TCGTemp *ts = &s->temps[i];
2258         int state;
2259 
2260         switch (ts->kind) {
2261         case TEMP_FIXED:
2262         case TEMP_GLOBAL:
2263         case TEMP_LOCAL:
2264             state = TS_DEAD | TS_MEM;
2265             break;
2266         case TEMP_NORMAL:
2267         case TEMP_CONST:
2268             state = TS_DEAD;
2269             break;
2270         default:
2271             g_assert_not_reached();
2272         }
2273         ts->state = state;
2274         la_reset_pref(ts);
2275     }
2276 }
2277 
2278 /* liveness analysis: sync globals back to memory.  */
2279 static void la_global_sync(TCGContext *s, int ng)
2280 {
2281     int i;
2282 
2283     for (i = 0; i < ng; ++i) {
2284         int state = s->temps[i].state;
2285         s->temps[i].state = state | TS_MEM;
2286         if (state == TS_DEAD) {
2287             /* If the global was previously dead, reset prefs.  */
2288             la_reset_pref(&s->temps[i]);
2289         }
2290     }
2291 }
2292 
2293 /*
2294  * liveness analysis: conditional branch: all temps are dead,
2295  * globals and local temps should be synced.
2296  */
2297 static void la_bb_sync(TCGContext *s, int ng, int nt)
2298 {
2299     la_global_sync(s, ng);
2300 
2301     for (int i = ng; i < nt; ++i) {
2302         TCGTemp *ts = &s->temps[i];
2303         int state;
2304 
2305         switch (ts->kind) {
2306         case TEMP_LOCAL:
2307             state = ts->state;
2308             ts->state = state | TS_MEM;
2309             if (state != TS_DEAD) {
2310                 continue;
2311             }
2312             break;
2313         case TEMP_NORMAL:
2314             s->temps[i].state = TS_DEAD;
2315             break;
2316         case TEMP_CONST:
2317             continue;
2318         default:
2319             g_assert_not_reached();
2320         }
2321         la_reset_pref(&s->temps[i]);
2322     }
2323 }
2324 
2325 /* liveness analysis: sync globals back to memory and kill.  */
2326 static void la_global_kill(TCGContext *s, int ng)
2327 {
2328     int i;
2329 
2330     for (i = 0; i < ng; i++) {
2331         s->temps[i].state = TS_DEAD | TS_MEM;
2332         la_reset_pref(&s->temps[i]);
2333     }
2334 }
2335 
2336 /* liveness analysis: note live globals crossing calls.  */
2337 static void la_cross_call(TCGContext *s, int nt)
2338 {
2339     TCGRegSet mask = ~tcg_target_call_clobber_regs;
2340     int i;
2341 
2342     for (i = 0; i < nt; i++) {
2343         TCGTemp *ts = &s->temps[i];
2344         if (!(ts->state & TS_DEAD)) {
2345             TCGRegSet *pset = la_temp_pref(ts);
2346             TCGRegSet set = *pset;
2347 
2348             set &= mask;
2349             /* If the combination is not possible, restart.  */
2350             if (set == 0) {
2351                 set = tcg_target_available_regs[ts->type] & mask;
2352             }
2353             *pset = set;
2354         }
2355     }
2356 }
2357 
2358 /* Liveness analysis : update the opc_arg_life array to tell if a
2359    given input arguments is dead. Instructions updating dead
2360    temporaries are removed. */
2361 static void liveness_pass_1(TCGContext *s)
2362 {
2363     int nb_globals = s->nb_globals;
2364     int nb_temps = s->nb_temps;
2365     TCGOp *op, *op_prev;
2366     TCGRegSet *prefs;
2367     int i;
2368 
2369     prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
2370     for (i = 0; i < nb_temps; ++i) {
2371         s->temps[i].state_ptr = prefs + i;
2372     }
2373 
2374     /* ??? Should be redundant with the exit_tb that ends the TB.  */
2375     la_func_end(s, nb_globals, nb_temps);
2376 
2377     QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
2378         int nb_iargs, nb_oargs;
2379         TCGOpcode opc_new, opc_new2;
2380         bool have_opc_new2;
2381         TCGLifeData arg_life = 0;
2382         TCGTemp *ts;
2383         TCGOpcode opc = op->opc;
2384         const TCGOpDef *def = &tcg_op_defs[opc];
2385 
2386         switch (opc) {
2387         case INDEX_op_call:
2388             {
2389                 int call_flags;
2390                 int nb_call_regs;
2391 
2392                 nb_oargs = TCGOP_CALLO(op);
2393                 nb_iargs = TCGOP_CALLI(op);
2394                 call_flags = op->args[nb_oargs + nb_iargs + 1];
2395 
2396                 /* pure functions can be removed if their result is unused */
2397                 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
2398                     for (i = 0; i < nb_oargs; i++) {
2399                         ts = arg_temp(op->args[i]);
2400                         if (ts->state != TS_DEAD) {
2401                             goto do_not_remove_call;
2402                         }
2403                     }
2404                     goto do_remove;
2405                 }
2406             do_not_remove_call:
2407 
2408                 /* Output args are dead.  */
2409                 for (i = 0; i < nb_oargs; i++) {
2410                     ts = arg_temp(op->args[i]);
2411                     if (ts->state & TS_DEAD) {
2412                         arg_life |= DEAD_ARG << i;
2413                     }
2414                     if (ts->state & TS_MEM) {
2415                         arg_life |= SYNC_ARG << i;
2416                     }
2417                     ts->state = TS_DEAD;
2418                     la_reset_pref(ts);
2419 
2420                     /* Not used -- it will be tcg_target_call_oarg_regs[i].  */
2421                     op->output_pref[i] = 0;
2422                 }
2423 
2424                 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2425                                     TCG_CALL_NO_READ_GLOBALS))) {
2426                     la_global_kill(s, nb_globals);
2427                 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
2428                     la_global_sync(s, nb_globals);
2429                 }
2430 
2431                 /* Record arguments that die in this helper.  */
2432                 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2433                     ts = arg_temp(op->args[i]);
2434                     if (ts && ts->state & TS_DEAD) {
2435                         arg_life |= DEAD_ARG << i;
2436                     }
2437                 }
2438 
2439                 /* For all live registers, remove call-clobbered prefs.  */
2440                 la_cross_call(s, nb_temps);
2441 
2442                 nb_call_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2443 
2444                 /* Input arguments are live for preceding opcodes.  */
2445                 for (i = 0; i < nb_iargs; i++) {
2446                     ts = arg_temp(op->args[i + nb_oargs]);
2447                     if (ts && ts->state & TS_DEAD) {
2448                         /* For those arguments that die, and will be allocated
2449                          * in registers, clear the register set for that arg,
2450                          * to be filled in below.  For args that will be on
2451                          * the stack, reset to any available reg.
2452                          */
2453                         *la_temp_pref(ts)
2454                             = (i < nb_call_regs ? 0 :
2455                                tcg_target_available_regs[ts->type]);
2456                         ts->state &= ~TS_DEAD;
2457                     }
2458                 }
2459 
2460                 /* For each input argument, add its input register to prefs.
2461                    If a temp is used once, this produces a single set bit.  */
2462                 for (i = 0; i < MIN(nb_call_regs, nb_iargs); i++) {
2463                     ts = arg_temp(op->args[i + nb_oargs]);
2464                     if (ts) {
2465                         tcg_regset_set_reg(*la_temp_pref(ts),
2466                                            tcg_target_call_iarg_regs[i]);
2467                     }
2468                 }
2469             }
2470             break;
2471         case INDEX_op_insn_start:
2472             break;
2473         case INDEX_op_discard:
2474             /* mark the temporary as dead */
2475             ts = arg_temp(op->args[0]);
2476             ts->state = TS_DEAD;
2477             la_reset_pref(ts);
2478             break;
2479 
2480         case INDEX_op_add2_i32:
2481             opc_new = INDEX_op_add_i32;
2482             goto do_addsub2;
2483         case INDEX_op_sub2_i32:
2484             opc_new = INDEX_op_sub_i32;
2485             goto do_addsub2;
2486         case INDEX_op_add2_i64:
2487             opc_new = INDEX_op_add_i64;
2488             goto do_addsub2;
2489         case INDEX_op_sub2_i64:
2490             opc_new = INDEX_op_sub_i64;
2491         do_addsub2:
2492             nb_iargs = 4;
2493             nb_oargs = 2;
2494             /* Test if the high part of the operation is dead, but not
2495                the low part.  The result can be optimized to a simple
2496                add or sub.  This happens often for x86_64 guest when the
2497                cpu mode is set to 32 bit.  */
2498             if (arg_temp(op->args[1])->state == TS_DEAD) {
2499                 if (arg_temp(op->args[0])->state == TS_DEAD) {
2500                     goto do_remove;
2501                 }
2502                 /* Replace the opcode and adjust the args in place,
2503                    leaving 3 unused args at the end.  */
2504                 op->opc = opc = opc_new;
2505                 op->args[1] = op->args[2];
2506                 op->args[2] = op->args[4];
2507                 /* Fall through and mark the single-word operation live.  */
2508                 nb_iargs = 2;
2509                 nb_oargs = 1;
2510             }
2511             goto do_not_remove;
2512 
2513         case INDEX_op_mulu2_i32:
2514             opc_new = INDEX_op_mul_i32;
2515             opc_new2 = INDEX_op_muluh_i32;
2516             have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
2517             goto do_mul2;
2518         case INDEX_op_muls2_i32:
2519             opc_new = INDEX_op_mul_i32;
2520             opc_new2 = INDEX_op_mulsh_i32;
2521             have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
2522             goto do_mul2;
2523         case INDEX_op_mulu2_i64:
2524             opc_new = INDEX_op_mul_i64;
2525             opc_new2 = INDEX_op_muluh_i64;
2526             have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
2527             goto do_mul2;
2528         case INDEX_op_muls2_i64:
2529             opc_new = INDEX_op_mul_i64;
2530             opc_new2 = INDEX_op_mulsh_i64;
2531             have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
2532             goto do_mul2;
2533         do_mul2:
2534             nb_iargs = 2;
2535             nb_oargs = 2;
2536             if (arg_temp(op->args[1])->state == TS_DEAD) {
2537                 if (arg_temp(op->args[0])->state == TS_DEAD) {
2538                     /* Both parts of the operation are dead.  */
2539                     goto do_remove;
2540                 }
2541                 /* The high part of the operation is dead; generate the low. */
2542                 op->opc = opc = opc_new;
2543                 op->args[1] = op->args[2];
2544                 op->args[2] = op->args[3];
2545             } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
2546                 /* The low part of the operation is dead; generate the high. */
2547                 op->opc = opc = opc_new2;
2548                 op->args[0] = op->args[1];
2549                 op->args[1] = op->args[2];
2550                 op->args[2] = op->args[3];
2551             } else {
2552                 goto do_not_remove;
2553             }
2554             /* Mark the single-word operation live.  */
2555             nb_oargs = 1;
2556             goto do_not_remove;
2557 
2558         default:
2559             /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2560             nb_iargs = def->nb_iargs;
2561             nb_oargs = def->nb_oargs;
2562 
2563             /* Test if the operation can be removed because all
2564                its outputs are dead. We assume that nb_oargs == 0
2565                implies side effects */
2566             if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
2567                 for (i = 0; i < nb_oargs; i++) {
2568                     if (arg_temp(op->args[i])->state != TS_DEAD) {
2569                         goto do_not_remove;
2570                     }
2571                 }
2572                 goto do_remove;
2573             }
2574             goto do_not_remove;
2575 
2576         do_remove:
2577             tcg_op_remove(s, op);
2578             break;
2579 
2580         do_not_remove:
2581             for (i = 0; i < nb_oargs; i++) {
2582                 ts = arg_temp(op->args[i]);
2583 
2584                 /* Remember the preference of the uses that followed.  */
2585                 op->output_pref[i] = *la_temp_pref(ts);
2586 
2587                 /* Output args are dead.  */
2588                 if (ts->state & TS_DEAD) {
2589                     arg_life |= DEAD_ARG << i;
2590                 }
2591                 if (ts->state & TS_MEM) {
2592                     arg_life |= SYNC_ARG << i;
2593                 }
2594                 ts->state = TS_DEAD;
2595                 la_reset_pref(ts);
2596             }
2597 
2598             /* If end of basic block, update.  */
2599             if (def->flags & TCG_OPF_BB_EXIT) {
2600                 la_func_end(s, nb_globals, nb_temps);
2601             } else if (def->flags & TCG_OPF_COND_BRANCH) {
2602                 la_bb_sync(s, nb_globals, nb_temps);
2603             } else if (def->flags & TCG_OPF_BB_END) {
2604                 la_bb_end(s, nb_globals, nb_temps);
2605             } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2606                 la_global_sync(s, nb_globals);
2607                 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2608                     la_cross_call(s, nb_temps);
2609                 }
2610             }
2611 
2612             /* Record arguments that die in this opcode.  */
2613             for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2614                 ts = arg_temp(op->args[i]);
2615                 if (ts->state & TS_DEAD) {
2616                     arg_life |= DEAD_ARG << i;
2617                 }
2618             }
2619 
2620             /* Input arguments are live for preceding opcodes.  */
2621             for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2622                 ts = arg_temp(op->args[i]);
2623                 if (ts->state & TS_DEAD) {
2624                     /* For operands that were dead, initially allow
2625                        all regs for the type.  */
2626                     *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
2627                     ts->state &= ~TS_DEAD;
2628                 }
2629             }
2630 
2631             /* Incorporate constraints for this operand.  */
2632             switch (opc) {
2633             case INDEX_op_mov_i32:
2634             case INDEX_op_mov_i64:
2635                 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2636                    have proper constraints.  That said, special case
2637                    moves to propagate preferences backward.  */
2638                 if (IS_DEAD_ARG(1)) {
2639                     *la_temp_pref(arg_temp(op->args[0]))
2640                         = *la_temp_pref(arg_temp(op->args[1]));
2641                 }
2642                 break;
2643 
2644             default:
2645                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2646                     const TCGArgConstraint *ct = &def->args_ct[i];
2647                     TCGRegSet set, *pset;
2648 
2649                     ts = arg_temp(op->args[i]);
2650                     pset = la_temp_pref(ts);
2651                     set = *pset;
2652 
2653                     set &= ct->regs;
2654                     if (ct->ialias) {
2655                         set &= op->output_pref[ct->alias_index];
2656                     }
2657                     /* If the combination is not possible, restart.  */
2658                     if (set == 0) {
2659                         set = ct->regs;
2660                     }
2661                     *pset = set;
2662                 }
2663                 break;
2664             }
2665             break;
2666         }
2667         op->life = arg_life;
2668     }
2669 }
2670 
2671 /* Liveness analysis: Convert indirect regs to direct temporaries.  */
2672 static bool liveness_pass_2(TCGContext *s)
2673 {
2674     int nb_globals = s->nb_globals;
2675     int nb_temps, i;
2676     bool changes = false;
2677     TCGOp *op, *op_next;
2678 
2679     /* Create a temporary for each indirect global.  */
2680     for (i = 0; i < nb_globals; ++i) {
2681         TCGTemp *its = &s->temps[i];
2682         if (its->indirect_reg) {
2683             TCGTemp *dts = tcg_temp_alloc(s);
2684             dts->type = its->type;
2685             dts->base_type = its->base_type;
2686             its->state_ptr = dts;
2687         } else {
2688             its->state_ptr = NULL;
2689         }
2690         /* All globals begin dead.  */
2691         its->state = TS_DEAD;
2692     }
2693     for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
2694         TCGTemp *its = &s->temps[i];
2695         its->state_ptr = NULL;
2696         its->state = TS_DEAD;
2697     }
2698 
2699     QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2700         TCGOpcode opc = op->opc;
2701         const TCGOpDef *def = &tcg_op_defs[opc];
2702         TCGLifeData arg_life = op->life;
2703         int nb_iargs, nb_oargs, call_flags;
2704         TCGTemp *arg_ts, *dir_ts;
2705 
2706         if (opc == INDEX_op_call) {
2707             nb_oargs = TCGOP_CALLO(op);
2708             nb_iargs = TCGOP_CALLI(op);
2709             call_flags = op->args[nb_oargs + nb_iargs + 1];
2710         } else {
2711             nb_iargs = def->nb_iargs;
2712             nb_oargs = def->nb_oargs;
2713 
2714             /* Set flags similar to how calls require.  */
2715             if (def->flags & TCG_OPF_COND_BRANCH) {
2716                 /* Like reading globals: sync_globals */
2717                 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2718             } else if (def->flags & TCG_OPF_BB_END) {
2719                 /* Like writing globals: save_globals */
2720                 call_flags = 0;
2721             } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2722                 /* Like reading globals: sync_globals */
2723                 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2724             } else {
2725                 /* No effect on globals.  */
2726                 call_flags = (TCG_CALL_NO_READ_GLOBALS |
2727                               TCG_CALL_NO_WRITE_GLOBALS);
2728             }
2729         }
2730 
2731         /* Make sure that input arguments are available.  */
2732         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2733             arg_ts = arg_temp(op->args[i]);
2734             if (arg_ts) {
2735                 dir_ts = arg_ts->state_ptr;
2736                 if (dir_ts && arg_ts->state == TS_DEAD) {
2737                     TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
2738                                       ? INDEX_op_ld_i32
2739                                       : INDEX_op_ld_i64);
2740                     TCGOp *lop = tcg_op_insert_before(s, op, lopc);
2741 
2742                     lop->args[0] = temp_arg(dir_ts);
2743                     lop->args[1] = temp_arg(arg_ts->mem_base);
2744                     lop->args[2] = arg_ts->mem_offset;
2745 
2746                     /* Loaded, but synced with memory.  */
2747                     arg_ts->state = TS_MEM;
2748                 }
2749             }
2750         }
2751 
2752         /* Perform input replacement, and mark inputs that became dead.
2753            No action is required except keeping temp_state up to date
2754            so that we reload when needed.  */
2755         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2756             arg_ts = arg_temp(op->args[i]);
2757             if (arg_ts) {
2758                 dir_ts = arg_ts->state_ptr;
2759                 if (dir_ts) {
2760                     op->args[i] = temp_arg(dir_ts);
2761                     changes = true;
2762                     if (IS_DEAD_ARG(i)) {
2763                         arg_ts->state = TS_DEAD;
2764                     }
2765                 }
2766             }
2767         }
2768 
2769         /* Liveness analysis should ensure that the following are
2770            all correct, for call sites and basic block end points.  */
2771         if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
2772             /* Nothing to do */
2773         } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
2774             for (i = 0; i < nb_globals; ++i) {
2775                 /* Liveness should see that globals are synced back,
2776                    that is, either TS_DEAD or TS_MEM.  */
2777                 arg_ts = &s->temps[i];
2778                 tcg_debug_assert(arg_ts->state_ptr == 0
2779                                  || arg_ts->state != 0);
2780             }
2781         } else {
2782             for (i = 0; i < nb_globals; ++i) {
2783                 /* Liveness should see that globals are saved back,
2784                    that is, TS_DEAD, waiting to be reloaded.  */
2785                 arg_ts = &s->temps[i];
2786                 tcg_debug_assert(arg_ts->state_ptr == 0
2787                                  || arg_ts->state == TS_DEAD);
2788             }
2789         }
2790 
2791         /* Outputs become available.  */
2792         if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
2793             arg_ts = arg_temp(op->args[0]);
2794             dir_ts = arg_ts->state_ptr;
2795             if (dir_ts) {
2796                 op->args[0] = temp_arg(dir_ts);
2797                 changes = true;
2798 
2799                 /* The output is now live and modified.  */
2800                 arg_ts->state = 0;
2801 
2802                 if (NEED_SYNC_ARG(0)) {
2803                     TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
2804                                       ? INDEX_op_st_i32
2805                                       : INDEX_op_st_i64);
2806                     TCGOp *sop = tcg_op_insert_after(s, op, sopc);
2807                     TCGTemp *out_ts = dir_ts;
2808 
2809                     if (IS_DEAD_ARG(0)) {
2810                         out_ts = arg_temp(op->args[1]);
2811                         arg_ts->state = TS_DEAD;
2812                         tcg_op_remove(s, op);
2813                     } else {
2814                         arg_ts->state = TS_MEM;
2815                     }
2816 
2817                     sop->args[0] = temp_arg(out_ts);
2818                     sop->args[1] = temp_arg(arg_ts->mem_base);
2819                     sop->args[2] = arg_ts->mem_offset;
2820                 } else {
2821                     tcg_debug_assert(!IS_DEAD_ARG(0));
2822                 }
2823             }
2824         } else {
2825             for (i = 0; i < nb_oargs; i++) {
2826                 arg_ts = arg_temp(op->args[i]);
2827                 dir_ts = arg_ts->state_ptr;
2828                 if (!dir_ts) {
2829                     continue;
2830                 }
2831                 op->args[i] = temp_arg(dir_ts);
2832                 changes = true;
2833 
2834                 /* The output is now live and modified.  */
2835                 arg_ts->state = 0;
2836 
2837                 /* Sync outputs upon their last write.  */
2838                 if (NEED_SYNC_ARG(i)) {
2839                     TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
2840                                       ? INDEX_op_st_i32
2841                                       : INDEX_op_st_i64);
2842                     TCGOp *sop = tcg_op_insert_after(s, op, sopc);
2843 
2844                     sop->args[0] = temp_arg(dir_ts);
2845                     sop->args[1] = temp_arg(arg_ts->mem_base);
2846                     sop->args[2] = arg_ts->mem_offset;
2847 
2848                     arg_ts->state = TS_MEM;
2849                 }
2850                 /* Drop outputs that are dead.  */
2851                 if (IS_DEAD_ARG(i)) {
2852                     arg_ts->state = TS_DEAD;
2853                 }
2854             }
2855         }
2856     }
2857 
2858     return changes;
2859 }
2860 
2861 #ifdef CONFIG_DEBUG_TCG
2862 static void dump_regs(TCGContext *s)
2863 {
2864     TCGTemp *ts;
2865     int i;
2866     char buf[64];
2867 
2868     for(i = 0; i < s->nb_temps; i++) {
2869         ts = &s->temps[i];
2870         printf("  %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2871         switch(ts->val_type) {
2872         case TEMP_VAL_REG:
2873             printf("%s", tcg_target_reg_names[ts->reg]);
2874             break;
2875         case TEMP_VAL_MEM:
2876             printf("%d(%s)", (int)ts->mem_offset,
2877                    tcg_target_reg_names[ts->mem_base->reg]);
2878             break;
2879         case TEMP_VAL_CONST:
2880             printf("$0x%" PRIx64, ts->val);
2881             break;
2882         case TEMP_VAL_DEAD:
2883             printf("D");
2884             break;
2885         default:
2886             printf("???");
2887             break;
2888         }
2889         printf("\n");
2890     }
2891 
2892     for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2893         if (s->reg_to_temp[i] != NULL) {
2894             printf("%s: %s\n",
2895                    tcg_target_reg_names[i],
2896                    tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
2897         }
2898     }
2899 }
2900 
2901 static void check_regs(TCGContext *s)
2902 {
2903     int reg;
2904     int k;
2905     TCGTemp *ts;
2906     char buf[64];
2907 
2908     for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2909         ts = s->reg_to_temp[reg];
2910         if (ts != NULL) {
2911             if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
2912                 printf("Inconsistency for register %s:\n",
2913                        tcg_target_reg_names[reg]);
2914                 goto fail;
2915             }
2916         }
2917     }
2918     for (k = 0; k < s->nb_temps; k++) {
2919         ts = &s->temps[k];
2920         if (ts->val_type == TEMP_VAL_REG
2921             && ts->kind != TEMP_FIXED
2922             && s->reg_to_temp[ts->reg] != ts) {
2923             printf("Inconsistency for temp %s:\n",
2924                    tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2925         fail:
2926             printf("reg state:\n");
2927             dump_regs(s);
2928             tcg_abort();
2929         }
2930     }
2931 }
2932 #endif
2933 
2934 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
2935 {
2936 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2937     /* Sparc64 stack is accessed with offset of 2047 */
2938     s->current_frame_offset = (s->current_frame_offset +
2939                                (tcg_target_long)sizeof(tcg_target_long) - 1) &
2940         ~(sizeof(tcg_target_long) - 1);
2941 #endif
2942     if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
2943         s->frame_end) {
2944         tcg_abort();
2945     }
2946     ts->mem_offset = s->current_frame_offset;
2947     ts->mem_base = s->frame_temp;
2948     ts->mem_allocated = 1;
2949     s->current_frame_offset += sizeof(tcg_target_long);
2950 }
2951 
2952 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
2953 
2954 /* Mark a temporary as free or dead.  If 'free_or_dead' is negative,
2955    mark it free; otherwise mark it dead.  */
2956 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
2957 {
2958     TCGTempVal new_type;
2959 
2960     switch (ts->kind) {
2961     case TEMP_FIXED:
2962         return;
2963     case TEMP_GLOBAL:
2964     case TEMP_LOCAL:
2965         new_type = TEMP_VAL_MEM;
2966         break;
2967     case TEMP_NORMAL:
2968         new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
2969         break;
2970     case TEMP_CONST:
2971         new_type = TEMP_VAL_CONST;
2972         break;
2973     default:
2974         g_assert_not_reached();
2975     }
2976     if (ts->val_type == TEMP_VAL_REG) {
2977         s->reg_to_temp[ts->reg] = NULL;
2978     }
2979     ts->val_type = new_type;
2980 }
2981 
2982 /* Mark a temporary as dead.  */
2983 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
2984 {
2985     temp_free_or_dead(s, ts, 1);
2986 }
2987 
2988 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2989    registers needs to be allocated to store a constant.  If 'free_or_dead'
2990    is non-zero, subsequently release the temporary; if it is positive, the
2991    temp is dead; if it is negative, the temp is free.  */
2992 static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
2993                       TCGRegSet preferred_regs, int free_or_dead)
2994 {
2995     if (!temp_readonly(ts) && !ts->mem_coherent) {
2996         if (!ts->mem_allocated) {
2997             temp_allocate_frame(s, ts);
2998         }
2999         switch (ts->val_type) {
3000         case TEMP_VAL_CONST:
3001             /* If we're going to free the temp immediately, then we won't
3002                require it later in a register, so attempt to store the
3003                constant to memory directly.  */
3004             if (free_or_dead
3005                 && tcg_out_sti(s, ts->type, ts->val,
3006                                ts->mem_base->reg, ts->mem_offset)) {
3007                 break;
3008             }
3009             temp_load(s, ts, tcg_target_available_regs[ts->type],
3010                       allocated_regs, preferred_regs);
3011             /* fallthrough */
3012 
3013         case TEMP_VAL_REG:
3014             tcg_out_st(s, ts->type, ts->reg,
3015                        ts->mem_base->reg, ts->mem_offset);
3016             break;
3017 
3018         case TEMP_VAL_MEM:
3019             break;
3020 
3021         case TEMP_VAL_DEAD:
3022         default:
3023             tcg_abort();
3024         }
3025         ts->mem_coherent = 1;
3026     }
3027     if (free_or_dead) {
3028         temp_free_or_dead(s, ts, free_or_dead);
3029     }
3030 }
3031 
3032 /* free register 'reg' by spilling the corresponding temporary if necessary */
3033 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
3034 {
3035     TCGTemp *ts = s->reg_to_temp[reg];
3036     if (ts != NULL) {
3037         temp_sync(s, ts, allocated_regs, 0, -1);
3038     }
3039 }
3040 
3041 /**
3042  * tcg_reg_alloc:
3043  * @required_regs: Set of registers in which we must allocate.
3044  * @allocated_regs: Set of registers which must be avoided.
3045  * @preferred_regs: Set of registers we should prefer.
3046  * @rev: True if we search the registers in "indirect" order.
3047  *
3048  * The allocated register must be in @required_regs & ~@allocated_regs,
3049  * but if we can put it in @preferred_regs we may save a move later.
3050  */
3051 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
3052                             TCGRegSet allocated_regs,
3053                             TCGRegSet preferred_regs, bool rev)
3054 {
3055     int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3056     TCGRegSet reg_ct[2];
3057     const int *order;
3058 
3059     reg_ct[1] = required_regs & ~allocated_regs;
3060     tcg_debug_assert(reg_ct[1] != 0);
3061     reg_ct[0] = reg_ct[1] & preferred_regs;
3062 
3063     /* Skip the preferred_regs option if it cannot be satisfied,
3064        or if the preference made no difference.  */
3065     f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3066 
3067     order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
3068 
3069     /* Try free registers, preferences first.  */
3070     for (j = f; j < 2; j++) {
3071         TCGRegSet set = reg_ct[j];
3072 
3073         if (tcg_regset_single(set)) {
3074             /* One register in the set.  */
3075             TCGReg reg = tcg_regset_first(set);
3076             if (s->reg_to_temp[reg] == NULL) {
3077                 return reg;
3078             }
3079         } else {
3080             for (i = 0; i < n; i++) {
3081                 TCGReg reg = order[i];
3082                 if (s->reg_to_temp[reg] == NULL &&
3083                     tcg_regset_test_reg(set, reg)) {
3084                     return reg;
3085                 }
3086             }
3087         }
3088     }
3089 
3090     /* We must spill something.  */
3091     for (j = f; j < 2; j++) {
3092         TCGRegSet set = reg_ct[j];
3093 
3094         if (tcg_regset_single(set)) {
3095             /* One register in the set.  */
3096             TCGReg reg = tcg_regset_first(set);
3097             tcg_reg_free(s, reg, allocated_regs);
3098             return reg;
3099         } else {
3100             for (i = 0; i < n; i++) {
3101                 TCGReg reg = order[i];
3102                 if (tcg_regset_test_reg(set, reg)) {
3103                     tcg_reg_free(s, reg, allocated_regs);
3104                     return reg;
3105                 }
3106             }
3107         }
3108     }
3109 
3110     tcg_abort();
3111 }
3112 
3113 /* Make sure the temporary is in a register.  If needed, allocate the register
3114    from DESIRED while avoiding ALLOCATED.  */
3115 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
3116                       TCGRegSet allocated_regs, TCGRegSet preferred_regs)
3117 {
3118     TCGReg reg;
3119 
3120     switch (ts->val_type) {
3121     case TEMP_VAL_REG:
3122         return;
3123     case TEMP_VAL_CONST:
3124         reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
3125                             preferred_regs, ts->indirect_base);
3126         if (ts->type <= TCG_TYPE_I64) {
3127             tcg_out_movi(s, ts->type, reg, ts->val);
3128         } else {
3129             uint64_t val = ts->val;
3130             MemOp vece = MO_64;
3131 
3132             /*
3133              * Find the minimal vector element that matches the constant.
3134              * The targets will, in general, have to do this search anyway,
3135              * do this generically.
3136              */
3137             if (val == dup_const(MO_8, val)) {
3138                 vece = MO_8;
3139             } else if (val == dup_const(MO_16, val)) {
3140                 vece = MO_16;
3141             } else if (val == dup_const(MO_32, val)) {
3142                 vece = MO_32;
3143             }
3144 
3145             tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
3146         }
3147         ts->mem_coherent = 0;
3148         break;
3149     case TEMP_VAL_MEM:
3150         reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
3151                             preferred_regs, ts->indirect_base);
3152         tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
3153         ts->mem_coherent = 1;
3154         break;
3155     case TEMP_VAL_DEAD:
3156     default:
3157         tcg_abort();
3158     }
3159     ts->reg = reg;
3160     ts->val_type = TEMP_VAL_REG;
3161     s->reg_to_temp[reg] = ts;
3162 }
3163 
3164 /* Save a temporary to memory. 'allocated_regs' is used in case a
3165    temporary registers needs to be allocated to store a constant.  */
3166 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
3167 {
3168     /* The liveness analysis already ensures that globals are back
3169        in memory. Keep an tcg_debug_assert for safety. */
3170     tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
3171 }
3172 
3173 /* save globals to their canonical location and assume they can be
3174    modified be the following code. 'allocated_regs' is used in case a
3175    temporary registers needs to be allocated to store a constant. */
3176 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
3177 {
3178     int i, n;
3179 
3180     for (i = 0, n = s->nb_globals; i < n; i++) {
3181         temp_save(s, &s->temps[i], allocated_regs);
3182     }
3183 }
3184 
3185 /* sync globals to their canonical location and assume they can be
3186    read by the following code. 'allocated_regs' is used in case a
3187    temporary registers needs to be allocated to store a constant. */
3188 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
3189 {
3190     int i, n;
3191 
3192     for (i = 0, n = s->nb_globals; i < n; i++) {
3193         TCGTemp *ts = &s->temps[i];
3194         tcg_debug_assert(ts->val_type != TEMP_VAL_REG
3195                          || ts->kind == TEMP_FIXED
3196                          || ts->mem_coherent);
3197     }
3198 }
3199 
3200 /* at the end of a basic block, we assume all temporaries are dead and
3201    all globals are stored at their canonical location. */
3202 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
3203 {
3204     int i;
3205 
3206     for (i = s->nb_globals; i < s->nb_temps; i++) {
3207         TCGTemp *ts = &s->temps[i];
3208 
3209         switch (ts->kind) {
3210         case TEMP_LOCAL:
3211             temp_save(s, ts, allocated_regs);
3212             break;
3213         case TEMP_NORMAL:
3214             /* The liveness analysis already ensures that temps are dead.
3215                Keep an tcg_debug_assert for safety. */
3216             tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
3217             break;
3218         case TEMP_CONST:
3219             /* Similarly, we should have freed any allocated register. */
3220             tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
3221             break;
3222         default:
3223             g_assert_not_reached();
3224         }
3225     }
3226 
3227     save_globals(s, allocated_regs);
3228 }
3229 
3230 /*
3231  * At a conditional branch, we assume all temporaries are dead and
3232  * all globals and local temps are synced to their location.
3233  */
3234 static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
3235 {
3236     sync_globals(s, allocated_regs);
3237 
3238     for (int i = s->nb_globals; i < s->nb_temps; i++) {
3239         TCGTemp *ts = &s->temps[i];
3240         /*
3241          * The liveness analysis already ensures that temps are dead.
3242          * Keep tcg_debug_asserts for safety.
3243          */
3244         switch (ts->kind) {
3245         case TEMP_LOCAL:
3246             tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
3247             break;
3248         case TEMP_NORMAL:
3249             tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
3250             break;
3251         case TEMP_CONST:
3252             break;
3253         default:
3254             g_assert_not_reached();
3255         }
3256     }
3257 }
3258 
3259 /*
3260  * Specialized code generation for INDEX_op_mov_* with a constant.
3261  */
3262 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
3263                                   tcg_target_ulong val, TCGLifeData arg_life,
3264                                   TCGRegSet preferred_regs)
3265 {
3266     /* ENV should not be modified.  */
3267     tcg_debug_assert(!temp_readonly(ots));
3268 
3269     /* The movi is not explicitly generated here.  */
3270     if (ots->val_type == TEMP_VAL_REG) {
3271         s->reg_to_temp[ots->reg] = NULL;
3272     }
3273     ots->val_type = TEMP_VAL_CONST;
3274     ots->val = val;
3275     ots->mem_coherent = 0;
3276     if (NEED_SYNC_ARG(0)) {
3277         temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
3278     } else if (IS_DEAD_ARG(0)) {
3279         temp_dead(s, ots);
3280     }
3281 }
3282 
3283 /*
3284  * Specialized code generation for INDEX_op_mov_*.
3285  */
3286 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
3287 {
3288     const TCGLifeData arg_life = op->life;
3289     TCGRegSet allocated_regs, preferred_regs;
3290     TCGTemp *ts, *ots;
3291     TCGType otype, itype;
3292 
3293     allocated_regs = s->reserved_regs;
3294     preferred_regs = op->output_pref[0];
3295     ots = arg_temp(op->args[0]);
3296     ts = arg_temp(op->args[1]);
3297 
3298     /* ENV should not be modified.  */
3299     tcg_debug_assert(!temp_readonly(ots));
3300 
3301     /* Note that otype != itype for no-op truncation.  */
3302     otype = ots->type;
3303     itype = ts->type;
3304 
3305     if (ts->val_type == TEMP_VAL_CONST) {
3306         /* propagate constant or generate sti */
3307         tcg_target_ulong val = ts->val;
3308         if (IS_DEAD_ARG(1)) {
3309             temp_dead(s, ts);
3310         }
3311         tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
3312         return;
3313     }
3314 
3315     /* If the source value is in memory we're going to be forced
3316        to have it in a register in order to perform the copy.  Copy
3317        the SOURCE value into its own register first, that way we
3318        don't have to reload SOURCE the next time it is used. */
3319     if (ts->val_type == TEMP_VAL_MEM) {
3320         temp_load(s, ts, tcg_target_available_regs[itype],
3321                   allocated_regs, preferred_regs);
3322     }
3323 
3324     tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
3325     if (IS_DEAD_ARG(0)) {
3326         /* mov to a non-saved dead register makes no sense (even with
3327            liveness analysis disabled). */
3328         tcg_debug_assert(NEED_SYNC_ARG(0));
3329         if (!ots->mem_allocated) {
3330             temp_allocate_frame(s, ots);
3331         }
3332         tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
3333         if (IS_DEAD_ARG(1)) {
3334             temp_dead(s, ts);
3335         }
3336         temp_dead(s, ots);
3337     } else {
3338         if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
3339             /* the mov can be suppressed */
3340             if (ots->val_type == TEMP_VAL_REG) {
3341                 s->reg_to_temp[ots->reg] = NULL;
3342             }
3343             ots->reg = ts->reg;
3344             temp_dead(s, ts);
3345         } else {
3346             if (ots->val_type != TEMP_VAL_REG) {
3347                 /* When allocating a new register, make sure to not spill the
3348                    input one. */
3349                 tcg_regset_set_reg(allocated_regs, ts->reg);
3350                 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
3351                                          allocated_regs, preferred_regs,
3352                                          ots->indirect_base);
3353             }
3354             if (!tcg_out_mov(s, otype, ots->reg, ts->reg)) {
3355                 /*
3356                  * Cross register class move not supported.
3357                  * Store the source register into the destination slot
3358                  * and leave the destination temp as TEMP_VAL_MEM.
3359                  */
3360                 assert(!temp_readonly(ots));
3361                 if (!ts->mem_allocated) {
3362                     temp_allocate_frame(s, ots);
3363                 }
3364                 tcg_out_st(s, ts->type, ts->reg,
3365                            ots->mem_base->reg, ots->mem_offset);
3366                 ots->mem_coherent = 1;
3367                 temp_free_or_dead(s, ots, -1);
3368                 return;
3369             }
3370         }
3371         ots->val_type = TEMP_VAL_REG;
3372         ots->mem_coherent = 0;
3373         s->reg_to_temp[ots->reg] = ots;
3374         if (NEED_SYNC_ARG(0)) {
3375             temp_sync(s, ots, allocated_regs, 0, 0);
3376         }
3377     }
3378 }
3379 
3380 /*
3381  * Specialized code generation for INDEX_op_dup_vec.
3382  */
3383 static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
3384 {
3385     const TCGLifeData arg_life = op->life;
3386     TCGRegSet dup_out_regs, dup_in_regs;
3387     TCGTemp *its, *ots;
3388     TCGType itype, vtype;
3389     intptr_t endian_fixup;
3390     unsigned vece;
3391     bool ok;
3392 
3393     ots = arg_temp(op->args[0]);
3394     its = arg_temp(op->args[1]);
3395 
3396     /* ENV should not be modified.  */
3397     tcg_debug_assert(!temp_readonly(ots));
3398 
3399     itype = its->type;
3400     vece = TCGOP_VECE(op);
3401     vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
3402 
3403     if (its->val_type == TEMP_VAL_CONST) {
3404         /* Propagate constant via movi -> dupi.  */
3405         tcg_target_ulong val = its->val;
3406         if (IS_DEAD_ARG(1)) {
3407             temp_dead(s, its);
3408         }
3409         tcg_reg_alloc_do_movi(s, ots, val, arg_life, op->output_pref[0]);
3410         return;
3411     }
3412 
3413     dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
3414     dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
3415 
3416     /* Allocate the output register now.  */
3417     if (ots->val_type != TEMP_VAL_REG) {
3418         TCGRegSet allocated_regs = s->reserved_regs;
3419 
3420         if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
3421             /* Make sure to not spill the input register. */
3422             tcg_regset_set_reg(allocated_regs, its->reg);
3423         }
3424         ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
3425                                  op->output_pref[0], ots->indirect_base);
3426         ots->val_type = TEMP_VAL_REG;
3427         ots->mem_coherent = 0;
3428         s->reg_to_temp[ots->reg] = ots;
3429     }
3430 
3431     switch (its->val_type) {
3432     case TEMP_VAL_REG:
3433         /*
3434          * The dup constriaints must be broad, covering all possible VECE.
3435          * However, tcg_op_dup_vec() gets to see the VECE and we allow it
3436          * to fail, indicating that extra moves are required for that case.
3437          */
3438         if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
3439             if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
3440                 goto done;
3441             }
3442             /* Try again from memory or a vector input register.  */
3443         }
3444         if (!its->mem_coherent) {
3445             /*
3446              * The input register is not synced, and so an extra store
3447              * would be required to use memory.  Attempt an integer-vector
3448              * register move first.  We do not have a TCGRegSet for this.
3449              */
3450             if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
3451                 break;
3452             }
3453             /* Sync the temp back to its slot and load from there.  */
3454             temp_sync(s, its, s->reserved_regs, 0, 0);
3455         }
3456         /* fall through */
3457 
3458     case TEMP_VAL_MEM:
3459 #ifdef HOST_WORDS_BIGENDIAN
3460         endian_fixup = itype == TCG_TYPE_I32 ? 4 : 8;
3461         endian_fixup -= 1 << vece;
3462 #else
3463         endian_fixup = 0;
3464 #endif
3465         if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
3466                              its->mem_offset + endian_fixup)) {
3467             goto done;
3468         }
3469         tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
3470         break;
3471 
3472     default:
3473         g_assert_not_reached();
3474     }
3475 
3476     /* We now have a vector input register, so dup must succeed. */
3477     ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
3478     tcg_debug_assert(ok);
3479 
3480  done:
3481     if (IS_DEAD_ARG(1)) {
3482         temp_dead(s, its);
3483     }
3484     if (NEED_SYNC_ARG(0)) {
3485         temp_sync(s, ots, s->reserved_regs, 0, 0);
3486     }
3487     if (IS_DEAD_ARG(0)) {
3488         temp_dead(s, ots);
3489     }
3490 }
3491 
3492 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
3493 {
3494     const TCGLifeData arg_life = op->life;
3495     const TCGOpDef * const def = &tcg_op_defs[op->opc];
3496     TCGRegSet i_allocated_regs;
3497     TCGRegSet o_allocated_regs;
3498     int i, k, nb_iargs, nb_oargs;
3499     TCGReg reg;
3500     TCGArg arg;
3501     const TCGArgConstraint *arg_ct;
3502     TCGTemp *ts;
3503     TCGArg new_args[TCG_MAX_OP_ARGS];
3504     int const_args[TCG_MAX_OP_ARGS];
3505 
3506     nb_oargs = def->nb_oargs;
3507     nb_iargs = def->nb_iargs;
3508 
3509     /* copy constants */
3510     memcpy(new_args + nb_oargs + nb_iargs,
3511            op->args + nb_oargs + nb_iargs,
3512            sizeof(TCGArg) * def->nb_cargs);
3513 
3514     i_allocated_regs = s->reserved_regs;
3515     o_allocated_regs = s->reserved_regs;
3516 
3517     /* satisfy input constraints */
3518     for (k = 0; k < nb_iargs; k++) {
3519         TCGRegSet i_preferred_regs, o_preferred_regs;
3520 
3521         i = def->args_ct[nb_oargs + k].sort_index;
3522         arg = op->args[i];
3523         arg_ct = &def->args_ct[i];
3524         ts = arg_temp(arg);
3525 
3526         if (ts->val_type == TEMP_VAL_CONST
3527             && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
3528             /* constant is OK for instruction */
3529             const_args[i] = 1;
3530             new_args[i] = ts->val;
3531             continue;
3532         }
3533 
3534         i_preferred_regs = o_preferred_regs = 0;
3535         if (arg_ct->ialias) {
3536             o_preferred_regs = op->output_pref[arg_ct->alias_index];
3537 
3538             /*
3539              * If the input is readonly, then it cannot also be an
3540              * output and aliased to itself.  If the input is not
3541              * dead after the instruction, we must allocate a new
3542              * register and move it.
3543              */
3544             if (temp_readonly(ts) || !IS_DEAD_ARG(i)) {
3545                 goto allocate_in_reg;
3546             }
3547 
3548             /*
3549              * Check if the current register has already been allocated
3550              * for another input aliased to an output.
3551              */
3552             if (ts->val_type == TEMP_VAL_REG) {
3553                 reg = ts->reg;
3554                 for (int k2 = 0; k2 < k; k2++) {
3555                     int i2 = def->args_ct[nb_oargs + k2].sort_index;
3556                     if (def->args_ct[i2].ialias && reg == new_args[i2]) {
3557                         goto allocate_in_reg;
3558                     }
3559                 }
3560             }
3561             i_preferred_regs = o_preferred_regs;
3562         }
3563 
3564         temp_load(s, ts, arg_ct->regs, i_allocated_regs, i_preferred_regs);
3565         reg = ts->reg;
3566 
3567         if (!tcg_regset_test_reg(arg_ct->regs, reg)) {
3568  allocate_in_reg:
3569             /*
3570              * Allocate a new register matching the constraint
3571              * and move the temporary register into it.
3572              */
3573             temp_load(s, ts, tcg_target_available_regs[ts->type],
3574                       i_allocated_regs, 0);
3575             reg = tcg_reg_alloc(s, arg_ct->regs, i_allocated_regs,
3576                                 o_preferred_regs, ts->indirect_base);
3577             if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
3578                 /*
3579                  * Cross register class move not supported.  Sync the
3580                  * temp back to its slot and load from there.
3581                  */
3582                 temp_sync(s, ts, i_allocated_regs, 0, 0);
3583                 tcg_out_ld(s, ts->type, reg,
3584                            ts->mem_base->reg, ts->mem_offset);
3585             }
3586         }
3587         new_args[i] = reg;
3588         const_args[i] = 0;
3589         tcg_regset_set_reg(i_allocated_regs, reg);
3590     }
3591 
3592     /* mark dead temporaries and free the associated registers */
3593     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3594         if (IS_DEAD_ARG(i)) {
3595             temp_dead(s, arg_temp(op->args[i]));
3596         }
3597     }
3598 
3599     if (def->flags & TCG_OPF_COND_BRANCH) {
3600         tcg_reg_alloc_cbranch(s, i_allocated_regs);
3601     } else if (def->flags & TCG_OPF_BB_END) {
3602         tcg_reg_alloc_bb_end(s, i_allocated_regs);
3603     } else {
3604         if (def->flags & TCG_OPF_CALL_CLOBBER) {
3605             /* XXX: permit generic clobber register list ? */
3606             for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3607                 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
3608                     tcg_reg_free(s, i, i_allocated_regs);
3609                 }
3610             }
3611         }
3612         if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3613             /* sync globals if the op has side effects and might trigger
3614                an exception. */
3615             sync_globals(s, i_allocated_regs);
3616         }
3617 
3618         /* satisfy the output constraints */
3619         for(k = 0; k < nb_oargs; k++) {
3620             i = def->args_ct[k].sort_index;
3621             arg = op->args[i];
3622             arg_ct = &def->args_ct[i];
3623             ts = arg_temp(arg);
3624 
3625             /* ENV should not be modified.  */
3626             tcg_debug_assert(!temp_readonly(ts));
3627 
3628             if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
3629                 reg = new_args[arg_ct->alias_index];
3630             } else if (arg_ct->newreg) {
3631                 reg = tcg_reg_alloc(s, arg_ct->regs,
3632                                     i_allocated_regs | o_allocated_regs,
3633                                     op->output_pref[k], ts->indirect_base);
3634             } else {
3635                 reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
3636                                     op->output_pref[k], ts->indirect_base);
3637             }
3638             tcg_regset_set_reg(o_allocated_regs, reg);
3639             if (ts->val_type == TEMP_VAL_REG) {
3640                 s->reg_to_temp[ts->reg] = NULL;
3641             }
3642             ts->val_type = TEMP_VAL_REG;
3643             ts->reg = reg;
3644             /*
3645              * Temp value is modified, so the value kept in memory is
3646              * potentially not the same.
3647              */
3648             ts->mem_coherent = 0;
3649             s->reg_to_temp[reg] = ts;
3650             new_args[i] = reg;
3651         }
3652     }
3653 
3654     /* emit instruction */
3655     if (def->flags & TCG_OPF_VECTOR) {
3656         tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
3657                        new_args, const_args);
3658     } else {
3659         tcg_out_op(s, op->opc, new_args, const_args);
3660     }
3661 
3662     /* move the outputs in the correct register if needed */
3663     for(i = 0; i < nb_oargs; i++) {
3664         ts = arg_temp(op->args[i]);
3665 
3666         /* ENV should not be modified.  */
3667         tcg_debug_assert(!temp_readonly(ts));
3668 
3669         if (NEED_SYNC_ARG(i)) {
3670             temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
3671         } else if (IS_DEAD_ARG(i)) {
3672             temp_dead(s, ts);
3673         }
3674     }
3675 }
3676 
3677 static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
3678 {
3679     const TCGLifeData arg_life = op->life;
3680     TCGTemp *ots, *itsl, *itsh;
3681     TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
3682 
3683     /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
3684     tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
3685     tcg_debug_assert(TCGOP_VECE(op) == MO_64);
3686 
3687     ots = arg_temp(op->args[0]);
3688     itsl = arg_temp(op->args[1]);
3689     itsh = arg_temp(op->args[2]);
3690 
3691     /* ENV should not be modified.  */
3692     tcg_debug_assert(!temp_readonly(ots));
3693 
3694     /* Allocate the output register now.  */
3695     if (ots->val_type != TEMP_VAL_REG) {
3696         TCGRegSet allocated_regs = s->reserved_regs;
3697         TCGRegSet dup_out_regs =
3698             tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
3699 
3700         /* Make sure to not spill the input registers. */
3701         if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
3702             tcg_regset_set_reg(allocated_regs, itsl->reg);
3703         }
3704         if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
3705             tcg_regset_set_reg(allocated_regs, itsh->reg);
3706         }
3707 
3708         ots->reg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
3709                                  op->output_pref[0], ots->indirect_base);
3710         ots->val_type = TEMP_VAL_REG;
3711         ots->mem_coherent = 0;
3712         s->reg_to_temp[ots->reg] = ots;
3713     }
3714 
3715     /* Promote dup2 of immediates to dupi_vec. */
3716     if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
3717         uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
3718         MemOp vece = MO_64;
3719 
3720         if (val == dup_const(MO_8, val)) {
3721             vece = MO_8;
3722         } else if (val == dup_const(MO_16, val)) {
3723             vece = MO_16;
3724         } else if (val == dup_const(MO_32, val)) {
3725             vece = MO_32;
3726         }
3727 
3728         tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
3729         goto done;
3730     }
3731 
3732     /* If the two inputs form one 64-bit value, try dupm_vec. */
3733     if (itsl + 1 == itsh && itsl->base_type == TCG_TYPE_I64) {
3734         if (!itsl->mem_coherent) {
3735             temp_sync(s, itsl, s->reserved_regs, 0, 0);
3736         }
3737         if (!itsh->mem_coherent) {
3738             temp_sync(s, itsh, s->reserved_regs, 0, 0);
3739         }
3740 #ifdef HOST_WORDS_BIGENDIAN
3741         TCGTemp *its = itsh;
3742 #else
3743         TCGTemp *its = itsl;
3744 #endif
3745         if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
3746                              its->mem_base->reg, its->mem_offset)) {
3747             goto done;
3748         }
3749     }
3750 
3751     /* Fall back to generic expansion. */
3752     return false;
3753 
3754  done:
3755     if (IS_DEAD_ARG(1)) {
3756         temp_dead(s, itsl);
3757     }
3758     if (IS_DEAD_ARG(2)) {
3759         temp_dead(s, itsh);
3760     }
3761     if (NEED_SYNC_ARG(0)) {
3762         temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
3763     } else if (IS_DEAD_ARG(0)) {
3764         temp_dead(s, ots);
3765     }
3766     return true;
3767 }
3768 
3769 #ifdef TCG_TARGET_STACK_GROWSUP
3770 #define STACK_DIR(x) (-(x))
3771 #else
3772 #define STACK_DIR(x) (x)
3773 #endif
3774 
3775 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
3776 {
3777     const int nb_oargs = TCGOP_CALLO(op);
3778     const int nb_iargs = TCGOP_CALLI(op);
3779     const TCGLifeData arg_life = op->life;
3780     int flags, nb_regs, i;
3781     TCGReg reg;
3782     TCGArg arg;
3783     TCGTemp *ts;
3784     intptr_t stack_offset;
3785     size_t call_stack_size;
3786     tcg_insn_unit *func_addr;
3787     int allocate_args;
3788     TCGRegSet allocated_regs;
3789 
3790     func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
3791     flags = op->args[nb_oargs + nb_iargs + 1];
3792 
3793     nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
3794     if (nb_regs > nb_iargs) {
3795         nb_regs = nb_iargs;
3796     }
3797 
3798     /* assign stack slots first */
3799     call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
3800     call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
3801         ~(TCG_TARGET_STACK_ALIGN - 1);
3802     allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
3803     if (allocate_args) {
3804         /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3805            preallocate call stack */
3806         tcg_abort();
3807     }
3808 
3809     stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
3810     for (i = nb_regs; i < nb_iargs; i++) {
3811         arg = op->args[nb_oargs + i];
3812 #ifdef TCG_TARGET_STACK_GROWSUP
3813         stack_offset -= sizeof(tcg_target_long);
3814 #endif
3815         if (arg != TCG_CALL_DUMMY_ARG) {
3816             ts = arg_temp(arg);
3817             temp_load(s, ts, tcg_target_available_regs[ts->type],
3818                       s->reserved_regs, 0);
3819             tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
3820         }
3821 #ifndef TCG_TARGET_STACK_GROWSUP
3822         stack_offset += sizeof(tcg_target_long);
3823 #endif
3824     }
3825 
3826     /* assign input registers */
3827     allocated_regs = s->reserved_regs;
3828     for (i = 0; i < nb_regs; i++) {
3829         arg = op->args[nb_oargs + i];
3830         if (arg != TCG_CALL_DUMMY_ARG) {
3831             ts = arg_temp(arg);
3832             reg = tcg_target_call_iarg_regs[i];
3833 
3834             if (ts->val_type == TEMP_VAL_REG) {
3835                 if (ts->reg != reg) {
3836                     tcg_reg_free(s, reg, allocated_regs);
3837                     if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
3838                         /*
3839                          * Cross register class move not supported.  Sync the
3840                          * temp back to its slot and load from there.
3841                          */
3842                         temp_sync(s, ts, allocated_regs, 0, 0);
3843                         tcg_out_ld(s, ts->type, reg,
3844                                    ts->mem_base->reg, ts->mem_offset);
3845                     }
3846                 }
3847             } else {
3848                 TCGRegSet arg_set = 0;
3849 
3850                 tcg_reg_free(s, reg, allocated_regs);
3851                 tcg_regset_set_reg(arg_set, reg);
3852                 temp_load(s, ts, arg_set, allocated_regs, 0);
3853             }
3854 
3855             tcg_regset_set_reg(allocated_regs, reg);
3856         }
3857     }
3858 
3859     /* mark dead temporaries and free the associated registers */
3860     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3861         if (IS_DEAD_ARG(i)) {
3862             temp_dead(s, arg_temp(op->args[i]));
3863         }
3864     }
3865 
3866     /* clobber call registers */
3867     for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3868         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
3869             tcg_reg_free(s, i, allocated_regs);
3870         }
3871     }
3872 
3873     /* Save globals if they might be written by the helper, sync them if
3874        they might be read. */
3875     if (flags & TCG_CALL_NO_READ_GLOBALS) {
3876         /* Nothing to do */
3877     } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
3878         sync_globals(s, allocated_regs);
3879     } else {
3880         save_globals(s, allocated_regs);
3881     }
3882 
3883     tcg_out_call(s, func_addr);
3884 
3885     /* assign output registers and emit moves if needed */
3886     for(i = 0; i < nb_oargs; i++) {
3887         arg = op->args[i];
3888         ts = arg_temp(arg);
3889 
3890         /* ENV should not be modified.  */
3891         tcg_debug_assert(!temp_readonly(ts));
3892 
3893         reg = tcg_target_call_oarg_regs[i];
3894         tcg_debug_assert(s->reg_to_temp[reg] == NULL);
3895         if (ts->val_type == TEMP_VAL_REG) {
3896             s->reg_to_temp[ts->reg] = NULL;
3897         }
3898         ts->val_type = TEMP_VAL_REG;
3899         ts->reg = reg;
3900         ts->mem_coherent = 0;
3901         s->reg_to_temp[reg] = ts;
3902         if (NEED_SYNC_ARG(i)) {
3903             temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i));
3904         } else if (IS_DEAD_ARG(i)) {
3905             temp_dead(s, ts);
3906         }
3907     }
3908 }
3909 
3910 #ifdef CONFIG_PROFILER
3911 
3912 /* avoid copy/paste errors */
3913 #define PROF_ADD(to, from, field)                       \
3914     do {                                                \
3915         (to)->field += qatomic_read(&((from)->field));  \
3916     } while (0)
3917 
3918 #define PROF_MAX(to, from, field)                                       \
3919     do {                                                                \
3920         typeof((from)->field) val__ = qatomic_read(&((from)->field));   \
3921         if (val__ > (to)->field) {                                      \
3922             (to)->field = val__;                                        \
3923         }                                                               \
3924     } while (0)
3925 
3926 /* Pass in a zero'ed @prof */
3927 static inline
3928 void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
3929 {
3930     unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
3931     unsigned int i;
3932 
3933     for (i = 0; i < n_ctxs; i++) {
3934         TCGContext *s = qatomic_read(&tcg_ctxs[i]);
3935         const TCGProfile *orig = &s->prof;
3936 
3937         if (counters) {
3938             PROF_ADD(prof, orig, cpu_exec_time);
3939             PROF_ADD(prof, orig, tb_count1);
3940             PROF_ADD(prof, orig, tb_count);
3941             PROF_ADD(prof, orig, op_count);
3942             PROF_MAX(prof, orig, op_count_max);
3943             PROF_ADD(prof, orig, temp_count);
3944             PROF_MAX(prof, orig, temp_count_max);
3945             PROF_ADD(prof, orig, del_op_count);
3946             PROF_ADD(prof, orig, code_in_len);
3947             PROF_ADD(prof, orig, code_out_len);
3948             PROF_ADD(prof, orig, search_out_len);
3949             PROF_ADD(prof, orig, interm_time);
3950             PROF_ADD(prof, orig, code_time);
3951             PROF_ADD(prof, orig, la_time);
3952             PROF_ADD(prof, orig, opt_time);
3953             PROF_ADD(prof, orig, restore_count);
3954             PROF_ADD(prof, orig, restore_time);
3955         }
3956         if (table) {
3957             int i;
3958 
3959             for (i = 0; i < NB_OPS; i++) {
3960                 PROF_ADD(prof, orig, table_op_count[i]);
3961             }
3962         }
3963     }
3964 }
3965 
3966 #undef PROF_ADD
3967 #undef PROF_MAX
3968 
3969 static void tcg_profile_snapshot_counters(TCGProfile *prof)
3970 {
3971     tcg_profile_snapshot(prof, true, false);
3972 }
3973 
3974 static void tcg_profile_snapshot_table(TCGProfile *prof)
3975 {
3976     tcg_profile_snapshot(prof, false, true);
3977 }
3978 
3979 void tcg_dump_op_count(void)
3980 {
3981     TCGProfile prof = {};
3982     int i;
3983 
3984     tcg_profile_snapshot_table(&prof);
3985     for (i = 0; i < NB_OPS; i++) {
3986         qemu_printf("%s %" PRId64 "\n", tcg_op_defs[i].name,
3987                     prof.table_op_count[i]);
3988     }
3989 }
3990 
3991 int64_t tcg_cpu_exec_time(void)
3992 {
3993     unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
3994     unsigned int i;
3995     int64_t ret = 0;
3996 
3997     for (i = 0; i < n_ctxs; i++) {
3998         const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
3999         const TCGProfile *prof = &s->prof;
4000 
4001         ret += qatomic_read(&prof->cpu_exec_time);
4002     }
4003     return ret;
4004 }
4005 #else
4006 void tcg_dump_op_count(void)
4007 {
4008     qemu_printf("[TCG profiler not compiled]\n");
4009 }
4010 
4011 int64_t tcg_cpu_exec_time(void)
4012 {
4013     error_report("%s: TCG profiler not compiled", __func__);
4014     exit(EXIT_FAILURE);
4015 }
4016 #endif
4017 
4018 
4019 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
4020 {
4021 #ifdef CONFIG_PROFILER
4022     TCGProfile *prof = &s->prof;
4023 #endif
4024     int i, num_insns;
4025     TCGOp *op;
4026 
4027 #ifdef CONFIG_PROFILER
4028     {
4029         int n = 0;
4030 
4031         QTAILQ_FOREACH(op, &s->ops, link) {
4032             n++;
4033         }
4034         qatomic_set(&prof->op_count, prof->op_count + n);
4035         if (n > prof->op_count_max) {
4036             qatomic_set(&prof->op_count_max, n);
4037         }
4038 
4039         n = s->nb_temps;
4040         qatomic_set(&prof->temp_count, prof->temp_count + n);
4041         if (n > prof->temp_count_max) {
4042             qatomic_set(&prof->temp_count_max, n);
4043         }
4044     }
4045 #endif
4046 
4047 #ifdef DEBUG_DISAS
4048     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
4049                  && qemu_log_in_addr_range(tb->pc))) {
4050         FILE *logfile = qemu_log_lock();
4051         qemu_log("OP:\n");
4052         tcg_dump_ops(s, false);
4053         qemu_log("\n");
4054         qemu_log_unlock(logfile);
4055     }
4056 #endif
4057 
4058 #ifdef CONFIG_DEBUG_TCG
4059     /* Ensure all labels referenced have been emitted.  */
4060     {
4061         TCGLabel *l;
4062         bool error = false;
4063 
4064         QSIMPLEQ_FOREACH(l, &s->labels, next) {
4065             if (unlikely(!l->present) && l->refs) {
4066                 qemu_log_mask(CPU_LOG_TB_OP,
4067                               "$L%d referenced but not present.\n", l->id);
4068                 error = true;
4069             }
4070         }
4071         assert(!error);
4072     }
4073 #endif
4074 
4075 #ifdef CONFIG_PROFILER
4076     qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
4077 #endif
4078 
4079 #ifdef USE_TCG_OPTIMIZATIONS
4080     tcg_optimize(s);
4081 #endif
4082 
4083 #ifdef CONFIG_PROFILER
4084     qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
4085     qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
4086 #endif
4087 
4088     reachable_code_pass(s);
4089     liveness_pass_1(s);
4090 
4091     if (s->nb_indirects > 0) {
4092 #ifdef DEBUG_DISAS
4093         if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
4094                      && qemu_log_in_addr_range(tb->pc))) {
4095             FILE *logfile = qemu_log_lock();
4096             qemu_log("OP before indirect lowering:\n");
4097             tcg_dump_ops(s, false);
4098             qemu_log("\n");
4099             qemu_log_unlock(logfile);
4100         }
4101 #endif
4102         /* Replace indirect temps with direct temps.  */
4103         if (liveness_pass_2(s)) {
4104             /* If changes were made, re-run liveness.  */
4105             liveness_pass_1(s);
4106         }
4107     }
4108 
4109 #ifdef CONFIG_PROFILER
4110     qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
4111 #endif
4112 
4113 #ifdef DEBUG_DISAS
4114     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
4115                  && qemu_log_in_addr_range(tb->pc))) {
4116         FILE *logfile = qemu_log_lock();
4117         qemu_log("OP after optimization and liveness analysis:\n");
4118         tcg_dump_ops(s, true);
4119         qemu_log("\n");
4120         qemu_log_unlock(logfile);
4121     }
4122 #endif
4123 
4124     tcg_reg_alloc_start(s);
4125 
4126     /*
4127      * Reset the buffer pointers when restarting after overflow.
4128      * TODO: Move this into translate-all.c with the rest of the
4129      * buffer management.  Having only this done here is confusing.
4130      */
4131     s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
4132     s->code_ptr = s->code_buf;
4133 
4134 #ifdef TCG_TARGET_NEED_LDST_LABELS
4135     QSIMPLEQ_INIT(&s->ldst_labels);
4136 #endif
4137 #ifdef TCG_TARGET_NEED_POOL_LABELS
4138     s->pool_labels = NULL;
4139 #endif
4140 
4141     num_insns = -1;
4142     QTAILQ_FOREACH(op, &s->ops, link) {
4143         TCGOpcode opc = op->opc;
4144 
4145 #ifdef CONFIG_PROFILER
4146         qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
4147 #endif
4148 
4149         switch (opc) {
4150         case INDEX_op_mov_i32:
4151         case INDEX_op_mov_i64:
4152         case INDEX_op_mov_vec:
4153             tcg_reg_alloc_mov(s, op);
4154             break;
4155         case INDEX_op_dup_vec:
4156             tcg_reg_alloc_dup(s, op);
4157             break;
4158         case INDEX_op_insn_start:
4159             if (num_insns >= 0) {
4160                 size_t off = tcg_current_code_size(s);
4161                 s->gen_insn_end_off[num_insns] = off;
4162                 /* Assert that we do not overflow our stored offset.  */
4163                 assert(s->gen_insn_end_off[num_insns] == off);
4164             }
4165             num_insns++;
4166             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
4167                 target_ulong a;
4168 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
4169                 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
4170 #else
4171                 a = op->args[i];
4172 #endif
4173                 s->gen_insn_data[num_insns][i] = a;
4174             }
4175             break;
4176         case INDEX_op_discard:
4177             temp_dead(s, arg_temp(op->args[0]));
4178             break;
4179         case INDEX_op_set_label:
4180             tcg_reg_alloc_bb_end(s, s->reserved_regs);
4181             tcg_out_label(s, arg_label(op->args[0]));
4182             break;
4183         case INDEX_op_call:
4184             tcg_reg_alloc_call(s, op);
4185             break;
4186         case INDEX_op_dup2_vec:
4187             if (tcg_reg_alloc_dup2(s, op)) {
4188                 break;
4189             }
4190             /* fall through */
4191         default:
4192             /* Sanity check that we've not introduced any unhandled opcodes. */
4193             tcg_debug_assert(tcg_op_supported(opc));
4194             /* Note: in order to speed up the code, it would be much
4195                faster to have specialized register allocator functions for
4196                some common argument patterns */
4197             tcg_reg_alloc_op(s, op);
4198             break;
4199         }
4200 #ifdef CONFIG_DEBUG_TCG
4201         check_regs(s);
4202 #endif
4203         /* Test for (pending) buffer overflow.  The assumption is that any
4204            one operation beginning below the high water mark cannot overrun
4205            the buffer completely.  Thus we can test for overflow after
4206            generating code without having to check during generation.  */
4207         if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
4208             return -1;
4209         }
4210         /* Test for TB overflow, as seen by gen_insn_end_off.  */
4211         if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
4212             return -2;
4213         }
4214     }
4215     tcg_debug_assert(num_insns >= 0);
4216     s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
4217 
4218     /* Generate TB finalization at the end of block */
4219 #ifdef TCG_TARGET_NEED_LDST_LABELS
4220     i = tcg_out_ldst_finalize(s);
4221     if (i < 0) {
4222         return i;
4223     }
4224 #endif
4225 #ifdef TCG_TARGET_NEED_POOL_LABELS
4226     i = tcg_out_pool_finalize(s);
4227     if (i < 0) {
4228         return i;
4229     }
4230 #endif
4231     if (!tcg_resolve_relocs(s)) {
4232         return -2;
4233     }
4234 
4235 #ifndef CONFIG_TCG_INTERPRETER
4236     /* flush instruction cache */
4237     flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
4238                         (uintptr_t)s->code_buf,
4239                         tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
4240 #endif
4241 
4242     return tcg_current_code_size(s);
4243 }
4244 
4245 #ifdef CONFIG_PROFILER
4246 void tcg_dump_info(void)
4247 {
4248     TCGProfile prof = {};
4249     const TCGProfile *s;
4250     int64_t tb_count;
4251     int64_t tb_div_count;
4252     int64_t tot;
4253 
4254     tcg_profile_snapshot_counters(&prof);
4255     s = &prof;
4256     tb_count = s->tb_count;
4257     tb_div_count = tb_count ? tb_count : 1;
4258     tot = s->interm_time + s->code_time;
4259 
4260     qemu_printf("JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
4261                 tot, tot / 2.4e9);
4262     qemu_printf("translated TBs      %" PRId64 " (aborted=%" PRId64
4263                 " %0.1f%%)\n",
4264                 tb_count, s->tb_count1 - tb_count,
4265                 (double)(s->tb_count1 - s->tb_count)
4266                 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
4267     qemu_printf("avg ops/TB          %0.1f max=%d\n",
4268                 (double)s->op_count / tb_div_count, s->op_count_max);
4269     qemu_printf("deleted ops/TB      %0.2f\n",
4270                 (double)s->del_op_count / tb_div_count);
4271     qemu_printf("avg temps/TB        %0.2f max=%d\n",
4272                 (double)s->temp_count / tb_div_count, s->temp_count_max);
4273     qemu_printf("avg host code/TB    %0.1f\n",
4274                 (double)s->code_out_len / tb_div_count);
4275     qemu_printf("avg search data/TB  %0.1f\n",
4276                 (double)s->search_out_len / tb_div_count);
4277 
4278     qemu_printf("cycles/op           %0.1f\n",
4279                 s->op_count ? (double)tot / s->op_count : 0);
4280     qemu_printf("cycles/in byte      %0.1f\n",
4281                 s->code_in_len ? (double)tot / s->code_in_len : 0);
4282     qemu_printf("cycles/out byte     %0.1f\n",
4283                 s->code_out_len ? (double)tot / s->code_out_len : 0);
4284     qemu_printf("cycles/search byte     %0.1f\n",
4285                 s->search_out_len ? (double)tot / s->search_out_len : 0);
4286     if (tot == 0) {
4287         tot = 1;
4288     }
4289     qemu_printf("  gen_interm time   %0.1f%%\n",
4290                 (double)s->interm_time / tot * 100.0);
4291     qemu_printf("  gen_code time     %0.1f%%\n",
4292                 (double)s->code_time / tot * 100.0);
4293     qemu_printf("optim./code time    %0.1f%%\n",
4294                 (double)s->opt_time / (s->code_time ? s->code_time : 1)
4295                 * 100.0);
4296     qemu_printf("liveness/code time  %0.1f%%\n",
4297                 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
4298     qemu_printf("cpu_restore count   %" PRId64 "\n",
4299                 s->restore_count);
4300     qemu_printf("  avg cycles        %0.1f\n",
4301                 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
4302 }
4303 #else
4304 void tcg_dump_info(void)
4305 {
4306     qemu_printf("[TCG profiler not compiled]\n");
4307 }
4308 #endif
4309 
4310 #ifdef ELF_HOST_MACHINE
4311 /* In order to use this feature, the backend needs to do three things:
4312 
4313    (1) Define ELF_HOST_MACHINE to indicate both what value to
4314        put into the ELF image and to indicate support for the feature.
4315 
4316    (2) Define tcg_register_jit.  This should create a buffer containing
4317        the contents of a .debug_frame section that describes the post-
4318        prologue unwind info for the tcg machine.
4319 
4320    (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4321 */
4322 
4323 /* Begin GDB interface.  THE FOLLOWING MUST MATCH GDB DOCS.  */
4324 typedef enum {
4325     JIT_NOACTION = 0,
4326     JIT_REGISTER_FN,
4327     JIT_UNREGISTER_FN
4328 } jit_actions_t;
4329 
4330 struct jit_code_entry {
4331     struct jit_code_entry *next_entry;
4332     struct jit_code_entry *prev_entry;
4333     const void *symfile_addr;
4334     uint64_t symfile_size;
4335 };
4336 
4337 struct jit_descriptor {
4338     uint32_t version;
4339     uint32_t action_flag;
4340     struct jit_code_entry *relevant_entry;
4341     struct jit_code_entry *first_entry;
4342 };
4343 
4344 void __jit_debug_register_code(void) __attribute__((noinline));
4345 void __jit_debug_register_code(void)
4346 {
4347     asm("");
4348 }
4349 
4350 /* Must statically initialize the version, because GDB may check
4351    the version before we can set it.  */
4352 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
4353 
4354 /* End GDB interface.  */
4355 
4356 static int find_string(const char *strtab, const char *str)
4357 {
4358     const char *p = strtab + 1;
4359 
4360     while (1) {
4361         if (strcmp(p, str) == 0) {
4362             return p - strtab;
4363         }
4364         p += strlen(p) + 1;
4365     }
4366 }
4367 
4368 static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
4369                                  const void *debug_frame,
4370                                  size_t debug_frame_size)
4371 {
4372     struct __attribute__((packed)) DebugInfo {
4373         uint32_t  len;
4374         uint16_t  version;
4375         uint32_t  abbrev;
4376         uint8_t   ptr_size;
4377         uint8_t   cu_die;
4378         uint16_t  cu_lang;
4379         uintptr_t cu_low_pc;
4380         uintptr_t cu_high_pc;
4381         uint8_t   fn_die;
4382         char      fn_name[16];
4383         uintptr_t fn_low_pc;
4384         uintptr_t fn_high_pc;
4385         uint8_t   cu_eoc;
4386     };
4387 
4388     struct ElfImage {
4389         ElfW(Ehdr) ehdr;
4390         ElfW(Phdr) phdr;
4391         ElfW(Shdr) shdr[7];
4392         ElfW(Sym)  sym[2];
4393         struct DebugInfo di;
4394         uint8_t    da[24];
4395         char       str[80];
4396     };
4397 
4398     struct ElfImage *img;
4399 
4400     static const struct ElfImage img_template = {
4401         .ehdr = {
4402             .e_ident[EI_MAG0] = ELFMAG0,
4403             .e_ident[EI_MAG1] = ELFMAG1,
4404             .e_ident[EI_MAG2] = ELFMAG2,
4405             .e_ident[EI_MAG3] = ELFMAG3,
4406             .e_ident[EI_CLASS] = ELF_CLASS,
4407             .e_ident[EI_DATA] = ELF_DATA,
4408             .e_ident[EI_VERSION] = EV_CURRENT,
4409             .e_type = ET_EXEC,
4410             .e_machine = ELF_HOST_MACHINE,
4411             .e_version = EV_CURRENT,
4412             .e_phoff = offsetof(struct ElfImage, phdr),
4413             .e_shoff = offsetof(struct ElfImage, shdr),
4414             .e_ehsize = sizeof(ElfW(Shdr)),
4415             .e_phentsize = sizeof(ElfW(Phdr)),
4416             .e_phnum = 1,
4417             .e_shentsize = sizeof(ElfW(Shdr)),
4418             .e_shnum = ARRAY_SIZE(img->shdr),
4419             .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
4420 #ifdef ELF_HOST_FLAGS
4421             .e_flags = ELF_HOST_FLAGS,
4422 #endif
4423 #ifdef ELF_OSABI
4424             .e_ident[EI_OSABI] = ELF_OSABI,
4425 #endif
4426         },
4427         .phdr = {
4428             .p_type = PT_LOAD,
4429             .p_flags = PF_X,
4430         },
4431         .shdr = {
4432             [0] = { .sh_type = SHT_NULL },
4433             /* Trick: The contents of code_gen_buffer are not present in
4434                this fake ELF file; that got allocated elsewhere.  Therefore
4435                we mark .text as SHT_NOBITS (similar to .bss) so that readers
4436                will not look for contents.  We can record any address.  */
4437             [1] = { /* .text */
4438                 .sh_type = SHT_NOBITS,
4439                 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
4440             },
4441             [2] = { /* .debug_info */
4442                 .sh_type = SHT_PROGBITS,
4443                 .sh_offset = offsetof(struct ElfImage, di),
4444                 .sh_size = sizeof(struct DebugInfo),
4445             },
4446             [3] = { /* .debug_abbrev */
4447                 .sh_type = SHT_PROGBITS,
4448                 .sh_offset = offsetof(struct ElfImage, da),
4449                 .sh_size = sizeof(img->da),
4450             },
4451             [4] = { /* .debug_frame */
4452                 .sh_type = SHT_PROGBITS,
4453                 .sh_offset = sizeof(struct ElfImage),
4454             },
4455             [5] = { /* .symtab */
4456                 .sh_type = SHT_SYMTAB,
4457                 .sh_offset = offsetof(struct ElfImage, sym),
4458                 .sh_size = sizeof(img->sym),
4459                 .sh_info = 1,
4460                 .sh_link = ARRAY_SIZE(img->shdr) - 1,
4461                 .sh_entsize = sizeof(ElfW(Sym)),
4462             },
4463             [6] = { /* .strtab */
4464                 .sh_type = SHT_STRTAB,
4465                 .sh_offset = offsetof(struct ElfImage, str),
4466                 .sh_size = sizeof(img->str),
4467             }
4468         },
4469         .sym = {
4470             [1] = { /* code_gen_buffer */
4471                 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
4472                 .st_shndx = 1,
4473             }
4474         },
4475         .di = {
4476             .len = sizeof(struct DebugInfo) - 4,
4477             .version = 2,
4478             .ptr_size = sizeof(void *),
4479             .cu_die = 1,
4480             .cu_lang = 0x8001,  /* DW_LANG_Mips_Assembler */
4481             .fn_die = 2,
4482             .fn_name = "code_gen_buffer"
4483         },
4484         .da = {
4485             1,          /* abbrev number (the cu) */
4486             0x11, 1,    /* DW_TAG_compile_unit, has children */
4487             0x13, 0x5,  /* DW_AT_language, DW_FORM_data2 */
4488             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
4489             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
4490             0, 0,       /* end of abbrev */
4491             2,          /* abbrev number (the fn) */
4492             0x2e, 0,    /* DW_TAG_subprogram, no children */
4493             0x3, 0x8,   /* DW_AT_name, DW_FORM_string */
4494             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
4495             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
4496             0, 0,       /* end of abbrev */
4497             0           /* no more abbrev */
4498         },
4499         .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
4500                ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
4501     };
4502 
4503     /* We only need a single jit entry; statically allocate it.  */
4504     static struct jit_code_entry one_entry;
4505 
4506     uintptr_t buf = (uintptr_t)buf_ptr;
4507     size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
4508     DebugFrameHeader *dfh;
4509 
4510     img = g_malloc(img_size);
4511     *img = img_template;
4512 
4513     img->phdr.p_vaddr = buf;
4514     img->phdr.p_paddr = buf;
4515     img->phdr.p_memsz = buf_size;
4516 
4517     img->shdr[1].sh_name = find_string(img->str, ".text");
4518     img->shdr[1].sh_addr = buf;
4519     img->shdr[1].sh_size = buf_size;
4520 
4521     img->shdr[2].sh_name = find_string(img->str, ".debug_info");
4522     img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
4523 
4524     img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
4525     img->shdr[4].sh_size = debug_frame_size;
4526 
4527     img->shdr[5].sh_name = find_string(img->str, ".symtab");
4528     img->shdr[6].sh_name = find_string(img->str, ".strtab");
4529 
4530     img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
4531     img->sym[1].st_value = buf;
4532     img->sym[1].st_size = buf_size;
4533 
4534     img->di.cu_low_pc = buf;
4535     img->di.cu_high_pc = buf + buf_size;
4536     img->di.fn_low_pc = buf;
4537     img->di.fn_high_pc = buf + buf_size;
4538 
4539     dfh = (DebugFrameHeader *)(img + 1);
4540     memcpy(dfh, debug_frame, debug_frame_size);
4541     dfh->fde.func_start = buf;
4542     dfh->fde.func_len = buf_size;
4543 
4544 #ifdef DEBUG_JIT
4545     /* Enable this block to be able to debug the ELF image file creation.
4546        One can use readelf, objdump, or other inspection utilities.  */
4547     {
4548         FILE *f = fopen("/tmp/qemu.jit", "w+b");
4549         if (f) {
4550             if (fwrite(img, img_size, 1, f) != img_size) {
4551                 /* Avoid stupid unused return value warning for fwrite.  */
4552             }
4553             fclose(f);
4554         }
4555     }
4556 #endif
4557 
4558     one_entry.symfile_addr = img;
4559     one_entry.symfile_size = img_size;
4560 
4561     __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
4562     __jit_debug_descriptor.relevant_entry = &one_entry;
4563     __jit_debug_descriptor.first_entry = &one_entry;
4564     __jit_debug_register_code();
4565 }
4566 #else
4567 /* No support for the feature.  Provide the entry point expected by exec.c,
4568    and implement the internal function we declared earlier.  */
4569 
4570 static void tcg_register_jit_int(const void *buf, size_t size,
4571                                  const void *debug_frame,
4572                                  size_t debug_frame_size)
4573 {
4574 }
4575 
4576 void tcg_register_jit(const void *buf, size_t buf_size)
4577 {
4578 }
4579 #endif /* ELF_HOST_MACHINE */
4580 
4581 #if !TCG_TARGET_MAYBE_vec
4582 void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
4583 {
4584     g_assert_not_reached();
4585 }
4586 #endif
4587