xref: /openbmc/qemu/tcg/tcg.c (revision 89de4b91)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
27 
28 #include "qemu/osdep.h"
29 
30 /* Define to jump the ELF file used to communicate with GDB.  */
31 #undef DEBUG_JIT
32 
33 #include "qemu/cutils.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/timer.h"
36 
37 /* Note: the long term plan is to reduce the dependencies on the QEMU
38    CPU definitions. Currently they are used for qemu_ld/st
39    instructions */
40 #define NO_CPU_IO_DEFS
41 #include "cpu.h"
42 
43 #include "exec/cpu-common.h"
44 #include "exec/exec-all.h"
45 
46 #include "tcg-op.h"
47 
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS  ELFCLASS32
50 #else
51 # define ELF_CLASS  ELFCLASS64
52 #endif
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA   ELFDATA2MSB
55 #else
56 # define ELF_DATA   ELFDATA2LSB
57 #endif
58 
59 #include "elf.h"
60 #include "exec/log.h"
61 
62 /* Forward declarations for functions declared in tcg-target.inc.c and
63    used here. */
64 static void tcg_target_init(TCGContext *s);
65 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
68                         intptr_t value, intptr_t addend);
69 
70 /* The CIE and FDE header definitions will be common to all hosts.  */
71 typedef struct {
72     uint32_t len __attribute__((aligned((sizeof(void *)))));
73     uint32_t id;
74     uint8_t version;
75     char augmentation[1];
76     uint8_t code_align;
77     uint8_t data_align;
78     uint8_t return_column;
79 } DebugFrameCIE;
80 
81 typedef struct QEMU_PACKED {
82     uint32_t len __attribute__((aligned((sizeof(void *)))));
83     uint32_t cie_offset;
84     uintptr_t func_start;
85     uintptr_t func_len;
86 } DebugFrameFDEHeader;
87 
88 typedef struct QEMU_PACKED {
89     DebugFrameCIE cie;
90     DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
92 
93 static void tcg_register_jit_int(void *buf, size_t size,
94                                  const void *debug_frame,
95                                  size_t debug_frame_size)
96     __attribute__((unused));
97 
98 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
99 static const char *target_parse_constraint(TCGArgConstraint *ct,
100                                            const char *ct_str, TCGType type);
101 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
102                        intptr_t arg2);
103 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
104 static void tcg_out_movi(TCGContext *s, TCGType type,
105                          TCGReg ret, tcg_target_long arg);
106 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
107                        const int *const_args);
108 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
109                        intptr_t arg2);
110 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
111                         TCGReg base, intptr_t ofs);
112 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
113 static int tcg_target_const_match(tcg_target_long val, TCGType type,
114                                   const TCGArgConstraint *arg_ct);
115 #ifdef TCG_TARGET_NEED_LDST_LABELS
116 static bool tcg_out_ldst_finalize(TCGContext *s);
117 #endif
118 
119 static TCGRegSet tcg_target_available_regs[2];
120 static TCGRegSet tcg_target_call_clobber_regs;
121 
122 #if TCG_TARGET_INSN_UNIT_SIZE == 1
123 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
124 {
125     *s->code_ptr++ = v;
126 }
127 
128 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
129                                                       uint8_t v)
130 {
131     *p = v;
132 }
133 #endif
134 
135 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
136 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
137 {
138     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
139         *s->code_ptr++ = v;
140     } else {
141         tcg_insn_unit *p = s->code_ptr;
142         memcpy(p, &v, sizeof(v));
143         s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
144     }
145 }
146 
147 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
148                                                        uint16_t v)
149 {
150     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
151         *p = v;
152     } else {
153         memcpy(p, &v, sizeof(v));
154     }
155 }
156 #endif
157 
158 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
159 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
160 {
161     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
162         *s->code_ptr++ = v;
163     } else {
164         tcg_insn_unit *p = s->code_ptr;
165         memcpy(p, &v, sizeof(v));
166         s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
167     }
168 }
169 
170 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
171                                                        uint32_t v)
172 {
173     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
174         *p = v;
175     } else {
176         memcpy(p, &v, sizeof(v));
177     }
178 }
179 #endif
180 
181 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
182 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
183 {
184     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
185         *s->code_ptr++ = v;
186     } else {
187         tcg_insn_unit *p = s->code_ptr;
188         memcpy(p, &v, sizeof(v));
189         s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
190     }
191 }
192 
193 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
194                                                        uint64_t v)
195 {
196     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
197         *p = v;
198     } else {
199         memcpy(p, &v, sizeof(v));
200     }
201 }
202 #endif
203 
204 /* label relocation processing */
205 
206 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
207                           TCGLabel *l, intptr_t addend)
208 {
209     TCGRelocation *r;
210 
211     if (l->has_value) {
212         /* FIXME: This may break relocations on RISC targets that
213            modify instruction fields in place.  The caller may not have
214            written the initial value.  */
215         patch_reloc(code_ptr, type, l->u.value, addend);
216     } else {
217         /* add a new relocation entry */
218         r = tcg_malloc(sizeof(TCGRelocation));
219         r->type = type;
220         r->ptr = code_ptr;
221         r->addend = addend;
222         r->next = l->u.first_reloc;
223         l->u.first_reloc = r;
224     }
225 }
226 
227 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
228 {
229     intptr_t value = (intptr_t)ptr;
230     TCGRelocation *r;
231 
232     tcg_debug_assert(!l->has_value);
233 
234     for (r = l->u.first_reloc; r != NULL; r = r->next) {
235         patch_reloc(r->ptr, r->type, value, r->addend);
236     }
237 
238     l->has_value = 1;
239     l->u.value_ptr = ptr;
240 }
241 
242 TCGLabel *gen_new_label(void)
243 {
244     TCGContext *s = &tcg_ctx;
245     TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
246 
247     *l = (TCGLabel){
248         .id = s->nb_labels++
249     };
250 
251     return l;
252 }
253 
254 #include "tcg-target.inc.c"
255 
256 /* pool based memory allocation */
257 void *tcg_malloc_internal(TCGContext *s, int size)
258 {
259     TCGPool *p;
260     int pool_size;
261 
262     if (size > TCG_POOL_CHUNK_SIZE) {
263         /* big malloc: insert a new pool (XXX: could optimize) */
264         p = g_malloc(sizeof(TCGPool) + size);
265         p->size = size;
266         p->next = s->pool_first_large;
267         s->pool_first_large = p;
268         return p->data;
269     } else {
270         p = s->pool_current;
271         if (!p) {
272             p = s->pool_first;
273             if (!p)
274                 goto new_pool;
275         } else {
276             if (!p->next) {
277             new_pool:
278                 pool_size = TCG_POOL_CHUNK_SIZE;
279                 p = g_malloc(sizeof(TCGPool) + pool_size);
280                 p->size = pool_size;
281                 p->next = NULL;
282                 if (s->pool_current)
283                     s->pool_current->next = p;
284                 else
285                     s->pool_first = p;
286             } else {
287                 p = p->next;
288             }
289         }
290     }
291     s->pool_current = p;
292     s->pool_cur = p->data + size;
293     s->pool_end = p->data + p->size;
294     return p->data;
295 }
296 
297 void tcg_pool_reset(TCGContext *s)
298 {
299     TCGPool *p, *t;
300     for (p = s->pool_first_large; p; p = t) {
301         t = p->next;
302         g_free(p);
303     }
304     s->pool_first_large = NULL;
305     s->pool_cur = s->pool_end = NULL;
306     s->pool_current = NULL;
307 }
308 
309 typedef struct TCGHelperInfo {
310     void *func;
311     const char *name;
312     unsigned flags;
313     unsigned sizemask;
314 } TCGHelperInfo;
315 
316 #include "exec/helper-proto.h"
317 
318 static const TCGHelperInfo all_helpers[] = {
319 #include "exec/helper-tcg.h"
320 };
321 
322 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
323 static void process_op_defs(TCGContext *s);
324 
325 void tcg_context_init(TCGContext *s)
326 {
327     int op, total_args, n, i;
328     TCGOpDef *def;
329     TCGArgConstraint *args_ct;
330     int *sorted_args;
331     GHashTable *helper_table;
332 
333     memset(s, 0, sizeof(*s));
334     s->nb_globals = 0;
335 
336     /* Count total number of arguments and allocate the corresponding
337        space */
338     total_args = 0;
339     for(op = 0; op < NB_OPS; op++) {
340         def = &tcg_op_defs[op];
341         n = def->nb_iargs + def->nb_oargs;
342         total_args += n;
343     }
344 
345     args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
346     sorted_args = g_malloc(sizeof(int) * total_args);
347 
348     for(op = 0; op < NB_OPS; op++) {
349         def = &tcg_op_defs[op];
350         def->args_ct = args_ct;
351         def->sorted_args = sorted_args;
352         n = def->nb_iargs + def->nb_oargs;
353         sorted_args += n;
354         args_ct += n;
355     }
356 
357     /* Register helpers.  */
358     /* Use g_direct_hash/equal for direct pointer comparisons on func.  */
359     s->helpers = helper_table = g_hash_table_new(NULL, NULL);
360 
361     for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
362         g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
363                             (gpointer)&all_helpers[i]);
364     }
365 
366     tcg_target_init(s);
367     process_op_defs(s);
368 
369     /* Reverse the order of the saved registers, assuming they're all at
370        the start of tcg_target_reg_alloc_order.  */
371     for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
372         int r = tcg_target_reg_alloc_order[n];
373         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
374             break;
375         }
376     }
377     for (i = 0; i < n; ++i) {
378         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
379     }
380     for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
381         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
382     }
383 }
384 
385 /*
386  * Allocate TBs right before their corresponding translated code, making
387  * sure that TBs and code are on different cache lines.
388  */
389 TranslationBlock *tcg_tb_alloc(TCGContext *s)
390 {
391     uintptr_t align = qemu_icache_linesize;
392     TranslationBlock *tb;
393     void *next;
394 
395     tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
396     next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
397 
398     if (unlikely(next > s->code_gen_highwater)) {
399         return NULL;
400     }
401     s->code_gen_ptr = next;
402     s->data_gen_ptr = NULL;
403     return tb;
404 }
405 
406 void tcg_prologue_init(TCGContext *s)
407 {
408     size_t prologue_size, total_size;
409     void *buf0, *buf1;
410 
411     /* Put the prologue at the beginning of code_gen_buffer.  */
412     buf0 = s->code_gen_buffer;
413     s->code_ptr = buf0;
414     s->code_buf = buf0;
415     s->code_gen_prologue = buf0;
416 
417     /* Generate the prologue.  */
418     tcg_target_qemu_prologue(s);
419     buf1 = s->code_ptr;
420     flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
421 
422     /* Deduct the prologue from the buffer.  */
423     prologue_size = tcg_current_code_size(s);
424     s->code_gen_ptr = buf1;
425     s->code_gen_buffer = buf1;
426     s->code_buf = buf1;
427     total_size = s->code_gen_buffer_size - prologue_size;
428     s->code_gen_buffer_size = total_size;
429 
430     /* Compute a high-water mark, at which we voluntarily flush the buffer
431        and start over.  The size here is arbitrary, significantly larger
432        than we expect the code generation for any one opcode to require.  */
433     s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
434 
435     tcg_register_jit(s->code_gen_buffer, total_size);
436 
437 #ifdef DEBUG_DISAS
438     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
439         qemu_log_lock();
440         qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
441         log_disas(buf0, prologue_size);
442         qemu_log("\n");
443         qemu_log_flush();
444         qemu_log_unlock();
445     }
446 #endif
447 
448     /* Assert that goto_ptr is implemented completely.  */
449     if (TCG_TARGET_HAS_goto_ptr) {
450         tcg_debug_assert(s->code_gen_epilogue != NULL);
451     }
452 }
453 
454 void tcg_func_start(TCGContext *s)
455 {
456     tcg_pool_reset(s);
457     s->nb_temps = s->nb_globals;
458 
459     /* No temps have been previously allocated for size or locality.  */
460     memset(s->free_temps, 0, sizeof(s->free_temps));
461 
462     s->nb_labels = 0;
463     s->current_frame_offset = s->frame_start;
464 
465 #ifdef CONFIG_DEBUG_TCG
466     s->goto_tb_issue_mask = 0;
467 #endif
468 
469     s->gen_op_buf[0].next = 1;
470     s->gen_op_buf[0].prev = 0;
471     s->gen_next_op_idx = 1;
472     s->gen_next_parm_idx = 0;
473 }
474 
475 static inline int temp_idx(TCGContext *s, TCGTemp *ts)
476 {
477     ptrdiff_t n = ts - s->temps;
478     tcg_debug_assert(n >= 0 && n < s->nb_temps);
479     return n;
480 }
481 
482 static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
483 {
484     int n = s->nb_temps++;
485     tcg_debug_assert(n < TCG_MAX_TEMPS);
486     return memset(&s->temps[n], 0, sizeof(TCGTemp));
487 }
488 
489 static inline TCGTemp *tcg_global_alloc(TCGContext *s)
490 {
491     tcg_debug_assert(s->nb_globals == s->nb_temps);
492     s->nb_globals++;
493     return tcg_temp_alloc(s);
494 }
495 
496 static int tcg_global_reg_new_internal(TCGContext *s, TCGType type,
497                                        TCGReg reg, const char *name)
498 {
499     TCGTemp *ts;
500 
501     if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
502         tcg_abort();
503     }
504 
505     ts = tcg_global_alloc(s);
506     ts->base_type = type;
507     ts->type = type;
508     ts->fixed_reg = 1;
509     ts->reg = reg;
510     ts->name = name;
511     tcg_regset_set_reg(s->reserved_regs, reg);
512 
513     return temp_idx(s, ts);
514 }
515 
516 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
517 {
518     int idx;
519     s->frame_start = start;
520     s->frame_end = start + size;
521     idx = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
522     s->frame_temp = &s->temps[idx];
523 }
524 
525 TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
526 {
527     TCGContext *s = &tcg_ctx;
528     int idx;
529 
530     if (tcg_regset_test_reg(s->reserved_regs, reg)) {
531         tcg_abort();
532     }
533     idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
534     return MAKE_TCGV_I32(idx);
535 }
536 
537 TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
538 {
539     TCGContext *s = &tcg_ctx;
540     int idx;
541 
542     if (tcg_regset_test_reg(s->reserved_regs, reg)) {
543         tcg_abort();
544     }
545     idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
546     return MAKE_TCGV_I64(idx);
547 }
548 
549 int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
550                                 intptr_t offset, const char *name)
551 {
552     TCGContext *s = &tcg_ctx;
553     TCGTemp *base_ts = &s->temps[GET_TCGV_PTR(base)];
554     TCGTemp *ts = tcg_global_alloc(s);
555     int indirect_reg = 0, bigendian = 0;
556 #ifdef HOST_WORDS_BIGENDIAN
557     bigendian = 1;
558 #endif
559 
560     if (!base_ts->fixed_reg) {
561         /* We do not support double-indirect registers.  */
562         tcg_debug_assert(!base_ts->indirect_reg);
563         base_ts->indirect_base = 1;
564         s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
565                             ? 2 : 1);
566         indirect_reg = 1;
567     }
568 
569     if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
570         TCGTemp *ts2 = tcg_global_alloc(s);
571         char buf[64];
572 
573         ts->base_type = TCG_TYPE_I64;
574         ts->type = TCG_TYPE_I32;
575         ts->indirect_reg = indirect_reg;
576         ts->mem_allocated = 1;
577         ts->mem_base = base_ts;
578         ts->mem_offset = offset + bigendian * 4;
579         pstrcpy(buf, sizeof(buf), name);
580         pstrcat(buf, sizeof(buf), "_0");
581         ts->name = strdup(buf);
582 
583         tcg_debug_assert(ts2 == ts + 1);
584         ts2->base_type = TCG_TYPE_I64;
585         ts2->type = TCG_TYPE_I32;
586         ts2->indirect_reg = indirect_reg;
587         ts2->mem_allocated = 1;
588         ts2->mem_base = base_ts;
589         ts2->mem_offset = offset + (1 - bigendian) * 4;
590         pstrcpy(buf, sizeof(buf), name);
591         pstrcat(buf, sizeof(buf), "_1");
592         ts2->name = strdup(buf);
593     } else {
594         ts->base_type = type;
595         ts->type = type;
596         ts->indirect_reg = indirect_reg;
597         ts->mem_allocated = 1;
598         ts->mem_base = base_ts;
599         ts->mem_offset = offset;
600         ts->name = name;
601     }
602     return temp_idx(s, ts);
603 }
604 
605 static int tcg_temp_new_internal(TCGType type, int temp_local)
606 {
607     TCGContext *s = &tcg_ctx;
608     TCGTemp *ts;
609     int idx, k;
610 
611     k = type + (temp_local ? TCG_TYPE_COUNT : 0);
612     idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
613     if (idx < TCG_MAX_TEMPS) {
614         /* There is already an available temp with the right type.  */
615         clear_bit(idx, s->free_temps[k].l);
616 
617         ts = &s->temps[idx];
618         ts->temp_allocated = 1;
619         tcg_debug_assert(ts->base_type == type);
620         tcg_debug_assert(ts->temp_local == temp_local);
621     } else {
622         ts = tcg_temp_alloc(s);
623         if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
624             TCGTemp *ts2 = tcg_temp_alloc(s);
625 
626             ts->base_type = type;
627             ts->type = TCG_TYPE_I32;
628             ts->temp_allocated = 1;
629             ts->temp_local = temp_local;
630 
631             tcg_debug_assert(ts2 == ts + 1);
632             ts2->base_type = TCG_TYPE_I64;
633             ts2->type = TCG_TYPE_I32;
634             ts2->temp_allocated = 1;
635             ts2->temp_local = temp_local;
636         } else {
637             ts->base_type = type;
638             ts->type = type;
639             ts->temp_allocated = 1;
640             ts->temp_local = temp_local;
641         }
642         idx = temp_idx(s, ts);
643     }
644 
645 #if defined(CONFIG_DEBUG_TCG)
646     s->temps_in_use++;
647 #endif
648     return idx;
649 }
650 
651 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
652 {
653     int idx;
654 
655     idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
656     return MAKE_TCGV_I32(idx);
657 }
658 
659 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
660 {
661     int idx;
662 
663     idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
664     return MAKE_TCGV_I64(idx);
665 }
666 
667 static void tcg_temp_free_internal(int idx)
668 {
669     TCGContext *s = &tcg_ctx;
670     TCGTemp *ts;
671     int k;
672 
673 #if defined(CONFIG_DEBUG_TCG)
674     s->temps_in_use--;
675     if (s->temps_in_use < 0) {
676         fprintf(stderr, "More temporaries freed than allocated!\n");
677     }
678 #endif
679 
680     tcg_debug_assert(idx >= s->nb_globals && idx < s->nb_temps);
681     ts = &s->temps[idx];
682     tcg_debug_assert(ts->temp_allocated != 0);
683     ts->temp_allocated = 0;
684 
685     k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
686     set_bit(idx, s->free_temps[k].l);
687 }
688 
689 void tcg_temp_free_i32(TCGv_i32 arg)
690 {
691     tcg_temp_free_internal(GET_TCGV_I32(arg));
692 }
693 
694 void tcg_temp_free_i64(TCGv_i64 arg)
695 {
696     tcg_temp_free_internal(GET_TCGV_I64(arg));
697 }
698 
699 TCGv_i32 tcg_const_i32(int32_t val)
700 {
701     TCGv_i32 t0;
702     t0 = tcg_temp_new_i32();
703     tcg_gen_movi_i32(t0, val);
704     return t0;
705 }
706 
707 TCGv_i64 tcg_const_i64(int64_t val)
708 {
709     TCGv_i64 t0;
710     t0 = tcg_temp_new_i64();
711     tcg_gen_movi_i64(t0, val);
712     return t0;
713 }
714 
715 TCGv_i32 tcg_const_local_i32(int32_t val)
716 {
717     TCGv_i32 t0;
718     t0 = tcg_temp_local_new_i32();
719     tcg_gen_movi_i32(t0, val);
720     return t0;
721 }
722 
723 TCGv_i64 tcg_const_local_i64(int64_t val)
724 {
725     TCGv_i64 t0;
726     t0 = tcg_temp_local_new_i64();
727     tcg_gen_movi_i64(t0, val);
728     return t0;
729 }
730 
731 #if defined(CONFIG_DEBUG_TCG)
732 void tcg_clear_temp_count(void)
733 {
734     TCGContext *s = &tcg_ctx;
735     s->temps_in_use = 0;
736 }
737 
738 int tcg_check_temp_count(void)
739 {
740     TCGContext *s = &tcg_ctx;
741     if (s->temps_in_use) {
742         /* Clear the count so that we don't give another
743          * warning immediately next time around.
744          */
745         s->temps_in_use = 0;
746         return 1;
747     }
748     return 0;
749 }
750 #endif
751 
752 /* Return true if OP may appear in the opcode stream.
753    Test the runtime variable that controls each opcode.  */
754 bool tcg_op_supported(TCGOpcode op)
755 {
756     switch (op) {
757     case INDEX_op_discard:
758     case INDEX_op_set_label:
759     case INDEX_op_call:
760     case INDEX_op_br:
761     case INDEX_op_mb:
762     case INDEX_op_insn_start:
763     case INDEX_op_exit_tb:
764     case INDEX_op_goto_tb:
765     case INDEX_op_qemu_ld_i32:
766     case INDEX_op_qemu_st_i32:
767     case INDEX_op_qemu_ld_i64:
768     case INDEX_op_qemu_st_i64:
769         return true;
770 
771     case INDEX_op_goto_ptr:
772         return TCG_TARGET_HAS_goto_ptr;
773 
774     case INDEX_op_mov_i32:
775     case INDEX_op_movi_i32:
776     case INDEX_op_setcond_i32:
777     case INDEX_op_brcond_i32:
778     case INDEX_op_ld8u_i32:
779     case INDEX_op_ld8s_i32:
780     case INDEX_op_ld16u_i32:
781     case INDEX_op_ld16s_i32:
782     case INDEX_op_ld_i32:
783     case INDEX_op_st8_i32:
784     case INDEX_op_st16_i32:
785     case INDEX_op_st_i32:
786     case INDEX_op_add_i32:
787     case INDEX_op_sub_i32:
788     case INDEX_op_mul_i32:
789     case INDEX_op_and_i32:
790     case INDEX_op_or_i32:
791     case INDEX_op_xor_i32:
792     case INDEX_op_shl_i32:
793     case INDEX_op_shr_i32:
794     case INDEX_op_sar_i32:
795         return true;
796 
797     case INDEX_op_movcond_i32:
798         return TCG_TARGET_HAS_movcond_i32;
799     case INDEX_op_div_i32:
800     case INDEX_op_divu_i32:
801         return TCG_TARGET_HAS_div_i32;
802     case INDEX_op_rem_i32:
803     case INDEX_op_remu_i32:
804         return TCG_TARGET_HAS_rem_i32;
805     case INDEX_op_div2_i32:
806     case INDEX_op_divu2_i32:
807         return TCG_TARGET_HAS_div2_i32;
808     case INDEX_op_rotl_i32:
809     case INDEX_op_rotr_i32:
810         return TCG_TARGET_HAS_rot_i32;
811     case INDEX_op_deposit_i32:
812         return TCG_TARGET_HAS_deposit_i32;
813     case INDEX_op_extract_i32:
814         return TCG_TARGET_HAS_extract_i32;
815     case INDEX_op_sextract_i32:
816         return TCG_TARGET_HAS_sextract_i32;
817     case INDEX_op_add2_i32:
818         return TCG_TARGET_HAS_add2_i32;
819     case INDEX_op_sub2_i32:
820         return TCG_TARGET_HAS_sub2_i32;
821     case INDEX_op_mulu2_i32:
822         return TCG_TARGET_HAS_mulu2_i32;
823     case INDEX_op_muls2_i32:
824         return TCG_TARGET_HAS_muls2_i32;
825     case INDEX_op_muluh_i32:
826         return TCG_TARGET_HAS_muluh_i32;
827     case INDEX_op_mulsh_i32:
828         return TCG_TARGET_HAS_mulsh_i32;
829     case INDEX_op_ext8s_i32:
830         return TCG_TARGET_HAS_ext8s_i32;
831     case INDEX_op_ext16s_i32:
832         return TCG_TARGET_HAS_ext16s_i32;
833     case INDEX_op_ext8u_i32:
834         return TCG_TARGET_HAS_ext8u_i32;
835     case INDEX_op_ext16u_i32:
836         return TCG_TARGET_HAS_ext16u_i32;
837     case INDEX_op_bswap16_i32:
838         return TCG_TARGET_HAS_bswap16_i32;
839     case INDEX_op_bswap32_i32:
840         return TCG_TARGET_HAS_bswap32_i32;
841     case INDEX_op_not_i32:
842         return TCG_TARGET_HAS_not_i32;
843     case INDEX_op_neg_i32:
844         return TCG_TARGET_HAS_neg_i32;
845     case INDEX_op_andc_i32:
846         return TCG_TARGET_HAS_andc_i32;
847     case INDEX_op_orc_i32:
848         return TCG_TARGET_HAS_orc_i32;
849     case INDEX_op_eqv_i32:
850         return TCG_TARGET_HAS_eqv_i32;
851     case INDEX_op_nand_i32:
852         return TCG_TARGET_HAS_nand_i32;
853     case INDEX_op_nor_i32:
854         return TCG_TARGET_HAS_nor_i32;
855     case INDEX_op_clz_i32:
856         return TCG_TARGET_HAS_clz_i32;
857     case INDEX_op_ctz_i32:
858         return TCG_TARGET_HAS_ctz_i32;
859     case INDEX_op_ctpop_i32:
860         return TCG_TARGET_HAS_ctpop_i32;
861 
862     case INDEX_op_brcond2_i32:
863     case INDEX_op_setcond2_i32:
864         return TCG_TARGET_REG_BITS == 32;
865 
866     case INDEX_op_mov_i64:
867     case INDEX_op_movi_i64:
868     case INDEX_op_setcond_i64:
869     case INDEX_op_brcond_i64:
870     case INDEX_op_ld8u_i64:
871     case INDEX_op_ld8s_i64:
872     case INDEX_op_ld16u_i64:
873     case INDEX_op_ld16s_i64:
874     case INDEX_op_ld32u_i64:
875     case INDEX_op_ld32s_i64:
876     case INDEX_op_ld_i64:
877     case INDEX_op_st8_i64:
878     case INDEX_op_st16_i64:
879     case INDEX_op_st32_i64:
880     case INDEX_op_st_i64:
881     case INDEX_op_add_i64:
882     case INDEX_op_sub_i64:
883     case INDEX_op_mul_i64:
884     case INDEX_op_and_i64:
885     case INDEX_op_or_i64:
886     case INDEX_op_xor_i64:
887     case INDEX_op_shl_i64:
888     case INDEX_op_shr_i64:
889     case INDEX_op_sar_i64:
890     case INDEX_op_ext_i32_i64:
891     case INDEX_op_extu_i32_i64:
892         return TCG_TARGET_REG_BITS == 64;
893 
894     case INDEX_op_movcond_i64:
895         return TCG_TARGET_HAS_movcond_i64;
896     case INDEX_op_div_i64:
897     case INDEX_op_divu_i64:
898         return TCG_TARGET_HAS_div_i64;
899     case INDEX_op_rem_i64:
900     case INDEX_op_remu_i64:
901         return TCG_TARGET_HAS_rem_i64;
902     case INDEX_op_div2_i64:
903     case INDEX_op_divu2_i64:
904         return TCG_TARGET_HAS_div2_i64;
905     case INDEX_op_rotl_i64:
906     case INDEX_op_rotr_i64:
907         return TCG_TARGET_HAS_rot_i64;
908     case INDEX_op_deposit_i64:
909         return TCG_TARGET_HAS_deposit_i64;
910     case INDEX_op_extract_i64:
911         return TCG_TARGET_HAS_extract_i64;
912     case INDEX_op_sextract_i64:
913         return TCG_TARGET_HAS_sextract_i64;
914     case INDEX_op_extrl_i64_i32:
915         return TCG_TARGET_HAS_extrl_i64_i32;
916     case INDEX_op_extrh_i64_i32:
917         return TCG_TARGET_HAS_extrh_i64_i32;
918     case INDEX_op_ext8s_i64:
919         return TCG_TARGET_HAS_ext8s_i64;
920     case INDEX_op_ext16s_i64:
921         return TCG_TARGET_HAS_ext16s_i64;
922     case INDEX_op_ext32s_i64:
923         return TCG_TARGET_HAS_ext32s_i64;
924     case INDEX_op_ext8u_i64:
925         return TCG_TARGET_HAS_ext8u_i64;
926     case INDEX_op_ext16u_i64:
927         return TCG_TARGET_HAS_ext16u_i64;
928     case INDEX_op_ext32u_i64:
929         return TCG_TARGET_HAS_ext32u_i64;
930     case INDEX_op_bswap16_i64:
931         return TCG_TARGET_HAS_bswap16_i64;
932     case INDEX_op_bswap32_i64:
933         return TCG_TARGET_HAS_bswap32_i64;
934     case INDEX_op_bswap64_i64:
935         return TCG_TARGET_HAS_bswap64_i64;
936     case INDEX_op_not_i64:
937         return TCG_TARGET_HAS_not_i64;
938     case INDEX_op_neg_i64:
939         return TCG_TARGET_HAS_neg_i64;
940     case INDEX_op_andc_i64:
941         return TCG_TARGET_HAS_andc_i64;
942     case INDEX_op_orc_i64:
943         return TCG_TARGET_HAS_orc_i64;
944     case INDEX_op_eqv_i64:
945         return TCG_TARGET_HAS_eqv_i64;
946     case INDEX_op_nand_i64:
947         return TCG_TARGET_HAS_nand_i64;
948     case INDEX_op_nor_i64:
949         return TCG_TARGET_HAS_nor_i64;
950     case INDEX_op_clz_i64:
951         return TCG_TARGET_HAS_clz_i64;
952     case INDEX_op_ctz_i64:
953         return TCG_TARGET_HAS_ctz_i64;
954     case INDEX_op_ctpop_i64:
955         return TCG_TARGET_HAS_ctpop_i64;
956     case INDEX_op_add2_i64:
957         return TCG_TARGET_HAS_add2_i64;
958     case INDEX_op_sub2_i64:
959         return TCG_TARGET_HAS_sub2_i64;
960     case INDEX_op_mulu2_i64:
961         return TCG_TARGET_HAS_mulu2_i64;
962     case INDEX_op_muls2_i64:
963         return TCG_TARGET_HAS_muls2_i64;
964     case INDEX_op_muluh_i64:
965         return TCG_TARGET_HAS_muluh_i64;
966     case INDEX_op_mulsh_i64:
967         return TCG_TARGET_HAS_mulsh_i64;
968 
969     case NB_OPS:
970         break;
971     }
972     g_assert_not_reached();
973 }
974 
975 /* Note: we convert the 64 bit args to 32 bit and do some alignment
976    and endian swap. Maybe it would be better to do the alignment
977    and endian swap in tcg_reg_alloc_call(). */
978 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
979                    int nargs, TCGArg *args)
980 {
981     int i, real_args, nb_rets, pi, pi_first;
982     unsigned sizemask, flags;
983     TCGHelperInfo *info;
984 
985     info = g_hash_table_lookup(s->helpers, (gpointer)func);
986     flags = info->flags;
987     sizemask = info->sizemask;
988 
989 #if defined(__sparc__) && !defined(__arch64__) \
990     && !defined(CONFIG_TCG_INTERPRETER)
991     /* We have 64-bit values in one register, but need to pass as two
992        separate parameters.  Split them.  */
993     int orig_sizemask = sizemask;
994     int orig_nargs = nargs;
995     TCGv_i64 retl, reth;
996 
997     TCGV_UNUSED_I64(retl);
998     TCGV_UNUSED_I64(reth);
999     if (sizemask != 0) {
1000         TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
1001         for (i = real_args = 0; i < nargs; ++i) {
1002             int is_64bit = sizemask & (1 << (i+1)*2);
1003             if (is_64bit) {
1004                 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
1005                 TCGv_i32 h = tcg_temp_new_i32();
1006                 TCGv_i32 l = tcg_temp_new_i32();
1007                 tcg_gen_extr_i64_i32(l, h, orig);
1008                 split_args[real_args++] = GET_TCGV_I32(h);
1009                 split_args[real_args++] = GET_TCGV_I32(l);
1010             } else {
1011                 split_args[real_args++] = args[i];
1012             }
1013         }
1014         nargs = real_args;
1015         args = split_args;
1016         sizemask = 0;
1017     }
1018 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1019     for (i = 0; i < nargs; ++i) {
1020         int is_64bit = sizemask & (1 << (i+1)*2);
1021         int is_signed = sizemask & (2 << (i+1)*2);
1022         if (!is_64bit) {
1023             TCGv_i64 temp = tcg_temp_new_i64();
1024             TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
1025             if (is_signed) {
1026                 tcg_gen_ext32s_i64(temp, orig);
1027             } else {
1028                 tcg_gen_ext32u_i64(temp, orig);
1029             }
1030             args[i] = GET_TCGV_I64(temp);
1031         }
1032     }
1033 #endif /* TCG_TARGET_EXTEND_ARGS */
1034 
1035     pi_first = pi = s->gen_next_parm_idx;
1036     if (ret != TCG_CALL_DUMMY_ARG) {
1037 #if defined(__sparc__) && !defined(__arch64__) \
1038     && !defined(CONFIG_TCG_INTERPRETER)
1039         if (orig_sizemask & 1) {
1040             /* The 32-bit ABI is going to return the 64-bit value in
1041                the %o0/%o1 register pair.  Prepare for this by using
1042                two return temporaries, and reassemble below.  */
1043             retl = tcg_temp_new_i64();
1044             reth = tcg_temp_new_i64();
1045             s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
1046             s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
1047             nb_rets = 2;
1048         } else {
1049             s->gen_opparam_buf[pi++] = ret;
1050             nb_rets = 1;
1051         }
1052 #else
1053         if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
1054 #ifdef HOST_WORDS_BIGENDIAN
1055             s->gen_opparam_buf[pi++] = ret + 1;
1056             s->gen_opparam_buf[pi++] = ret;
1057 #else
1058             s->gen_opparam_buf[pi++] = ret;
1059             s->gen_opparam_buf[pi++] = ret + 1;
1060 #endif
1061             nb_rets = 2;
1062         } else {
1063             s->gen_opparam_buf[pi++] = ret;
1064             nb_rets = 1;
1065         }
1066 #endif
1067     } else {
1068         nb_rets = 0;
1069     }
1070     real_args = 0;
1071     for (i = 0; i < nargs; i++) {
1072         int is_64bit = sizemask & (1 << (i+1)*2);
1073         if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
1074 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1075             /* some targets want aligned 64 bit args */
1076             if (real_args & 1) {
1077                 s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
1078                 real_args++;
1079             }
1080 #endif
1081            /* If stack grows up, then we will be placing successive
1082               arguments at lower addresses, which means we need to
1083               reverse the order compared to how we would normally
1084               treat either big or little-endian.  For those arguments
1085               that will wind up in registers, this still works for
1086               HPPA (the only current STACK_GROWSUP target) since the
1087               argument registers are *also* allocated in decreasing
1088               order.  If another such target is added, this logic may
1089               have to get more complicated to differentiate between
1090               stack arguments and register arguments.  */
1091 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1092             s->gen_opparam_buf[pi++] = args[i] + 1;
1093             s->gen_opparam_buf[pi++] = args[i];
1094 #else
1095             s->gen_opparam_buf[pi++] = args[i];
1096             s->gen_opparam_buf[pi++] = args[i] + 1;
1097 #endif
1098             real_args += 2;
1099             continue;
1100         }
1101 
1102         s->gen_opparam_buf[pi++] = args[i];
1103         real_args++;
1104     }
1105     s->gen_opparam_buf[pi++] = (uintptr_t)func;
1106     s->gen_opparam_buf[pi++] = flags;
1107 
1108     i = s->gen_next_op_idx;
1109     tcg_debug_assert(i < OPC_BUF_SIZE);
1110     tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
1111 
1112     /* Set links for sequential allocation during translation.  */
1113     s->gen_op_buf[i] = (TCGOp){
1114         .opc = INDEX_op_call,
1115         .callo = nb_rets,
1116         .calli = real_args,
1117         .args = pi_first,
1118         .prev = i - 1,
1119         .next = i + 1
1120     };
1121 
1122     /* Make sure the calli field didn't overflow.  */
1123     tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
1124 
1125     s->gen_op_buf[0].prev = i;
1126     s->gen_next_op_idx = i + 1;
1127     s->gen_next_parm_idx = pi;
1128 
1129 #if defined(__sparc__) && !defined(__arch64__) \
1130     && !defined(CONFIG_TCG_INTERPRETER)
1131     /* Free all of the parts we allocated above.  */
1132     for (i = real_args = 0; i < orig_nargs; ++i) {
1133         int is_64bit = orig_sizemask & (1 << (i+1)*2);
1134         if (is_64bit) {
1135             TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
1136             TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
1137             tcg_temp_free_i32(h);
1138             tcg_temp_free_i32(l);
1139         } else {
1140             real_args++;
1141         }
1142     }
1143     if (orig_sizemask & 1) {
1144         /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
1145            Note that describing these as TCGv_i64 eliminates an unnecessary
1146            zero-extension that tcg_gen_concat_i32_i64 would create.  */
1147         tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
1148         tcg_temp_free_i64(retl);
1149         tcg_temp_free_i64(reth);
1150     }
1151 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1152     for (i = 0; i < nargs; ++i) {
1153         int is_64bit = sizemask & (1 << (i+1)*2);
1154         if (!is_64bit) {
1155             TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
1156             tcg_temp_free_i64(temp);
1157         }
1158     }
1159 #endif /* TCG_TARGET_EXTEND_ARGS */
1160 }
1161 
1162 static void tcg_reg_alloc_start(TCGContext *s)
1163 {
1164     int i;
1165     TCGTemp *ts;
1166     for(i = 0; i < s->nb_globals; i++) {
1167         ts = &s->temps[i];
1168         if (ts->fixed_reg) {
1169             ts->val_type = TEMP_VAL_REG;
1170         } else {
1171             ts->val_type = TEMP_VAL_MEM;
1172         }
1173     }
1174     for(i = s->nb_globals; i < s->nb_temps; i++) {
1175         ts = &s->temps[i];
1176         if (ts->temp_local) {
1177             ts->val_type = TEMP_VAL_MEM;
1178         } else {
1179             ts->val_type = TEMP_VAL_DEAD;
1180         }
1181         ts->mem_allocated = 0;
1182         ts->fixed_reg = 0;
1183     }
1184 
1185     memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
1186 }
1187 
1188 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1189                                  TCGTemp *ts)
1190 {
1191     int idx = temp_idx(s, ts);
1192 
1193     if (idx < s->nb_globals) {
1194         pstrcpy(buf, buf_size, ts->name);
1195     } else if (ts->temp_local) {
1196         snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
1197     } else {
1198         snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
1199     }
1200     return buf;
1201 }
1202 
1203 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf,
1204                                  int buf_size, int idx)
1205 {
1206     tcg_debug_assert(idx >= 0 && idx < s->nb_temps);
1207     return tcg_get_arg_str_ptr(s, buf, buf_size, &s->temps[idx]);
1208 }
1209 
1210 /* Find helper name.  */
1211 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
1212 {
1213     const char *ret = NULL;
1214     if (s->helpers) {
1215         TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
1216         if (info) {
1217             ret = info->name;
1218         }
1219     }
1220     return ret;
1221 }
1222 
1223 static const char * const cond_name[] =
1224 {
1225     [TCG_COND_NEVER] = "never",
1226     [TCG_COND_ALWAYS] = "always",
1227     [TCG_COND_EQ] = "eq",
1228     [TCG_COND_NE] = "ne",
1229     [TCG_COND_LT] = "lt",
1230     [TCG_COND_GE] = "ge",
1231     [TCG_COND_LE] = "le",
1232     [TCG_COND_GT] = "gt",
1233     [TCG_COND_LTU] = "ltu",
1234     [TCG_COND_GEU] = "geu",
1235     [TCG_COND_LEU] = "leu",
1236     [TCG_COND_GTU] = "gtu"
1237 };
1238 
1239 static const char * const ldst_name[] =
1240 {
1241     [MO_UB]   = "ub",
1242     [MO_SB]   = "sb",
1243     [MO_LEUW] = "leuw",
1244     [MO_LESW] = "lesw",
1245     [MO_LEUL] = "leul",
1246     [MO_LESL] = "lesl",
1247     [MO_LEQ]  = "leq",
1248     [MO_BEUW] = "beuw",
1249     [MO_BESW] = "besw",
1250     [MO_BEUL] = "beul",
1251     [MO_BESL] = "besl",
1252     [MO_BEQ]  = "beq",
1253 };
1254 
1255 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1256 #ifdef ALIGNED_ONLY
1257     [MO_UNALN >> MO_ASHIFT]    = "un+",
1258     [MO_ALIGN >> MO_ASHIFT]    = "",
1259 #else
1260     [MO_UNALN >> MO_ASHIFT]    = "",
1261     [MO_ALIGN >> MO_ASHIFT]    = "al+",
1262 #endif
1263     [MO_ALIGN_2 >> MO_ASHIFT]  = "al2+",
1264     [MO_ALIGN_4 >> MO_ASHIFT]  = "al4+",
1265     [MO_ALIGN_8 >> MO_ASHIFT]  = "al8+",
1266     [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1267     [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1268     [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1269 };
1270 
1271 void tcg_dump_ops(TCGContext *s)
1272 {
1273     char buf[128];
1274     TCGOp *op;
1275     int oi;
1276 
1277     for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) {
1278         int i, k, nb_oargs, nb_iargs, nb_cargs;
1279         const TCGOpDef *def;
1280         const TCGArg *args;
1281         TCGOpcode c;
1282         int col = 0;
1283 
1284         op = &s->gen_op_buf[oi];
1285         c = op->opc;
1286         def = &tcg_op_defs[c];
1287         args = &s->gen_opparam_buf[op->args];
1288 
1289         if (c == INDEX_op_insn_start) {
1290             col += qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : "");
1291 
1292             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1293                 target_ulong a;
1294 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1295                 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1296 #else
1297                 a = args[i];
1298 #endif
1299                 col += qemu_log(" " TARGET_FMT_lx, a);
1300             }
1301         } else if (c == INDEX_op_call) {
1302             /* variable number of arguments */
1303             nb_oargs = op->callo;
1304             nb_iargs = op->calli;
1305             nb_cargs = def->nb_cargs;
1306 
1307             /* function name, flags, out args */
1308             col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1309                             tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1310                             args[nb_oargs + nb_iargs + 1], nb_oargs);
1311             for (i = 0; i < nb_oargs; i++) {
1312                 col += qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1313                                                            args[i]));
1314             }
1315             for (i = 0; i < nb_iargs; i++) {
1316                 TCGArg arg = args[nb_oargs + i];
1317                 const char *t = "<dummy>";
1318                 if (arg != TCG_CALL_DUMMY_ARG) {
1319                     t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1320                 }
1321                 col += qemu_log(",%s", t);
1322             }
1323         } else {
1324             col += qemu_log(" %s ", def->name);
1325 
1326             nb_oargs = def->nb_oargs;
1327             nb_iargs = def->nb_iargs;
1328             nb_cargs = def->nb_cargs;
1329 
1330             k = 0;
1331             for (i = 0; i < nb_oargs; i++) {
1332                 if (k != 0) {
1333                     col += qemu_log(",");
1334                 }
1335                 col += qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1336                                                           args[k++]));
1337             }
1338             for (i = 0; i < nb_iargs; i++) {
1339                 if (k != 0) {
1340                     col += qemu_log(",");
1341                 }
1342                 col += qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1343                                                           args[k++]));
1344             }
1345             switch (c) {
1346             case INDEX_op_brcond_i32:
1347             case INDEX_op_setcond_i32:
1348             case INDEX_op_movcond_i32:
1349             case INDEX_op_brcond2_i32:
1350             case INDEX_op_setcond2_i32:
1351             case INDEX_op_brcond_i64:
1352             case INDEX_op_setcond_i64:
1353             case INDEX_op_movcond_i64:
1354                 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1355                     col += qemu_log(",%s", cond_name[args[k++]]);
1356                 } else {
1357                     col += qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1358                 }
1359                 i = 1;
1360                 break;
1361             case INDEX_op_qemu_ld_i32:
1362             case INDEX_op_qemu_st_i32:
1363             case INDEX_op_qemu_ld_i64:
1364             case INDEX_op_qemu_st_i64:
1365                 {
1366                     TCGMemOpIdx oi = args[k++];
1367                     TCGMemOp op = get_memop(oi);
1368                     unsigned ix = get_mmuidx(oi);
1369 
1370                     if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1371                         col += qemu_log(",$0x%x,%u", op, ix);
1372                     } else {
1373                         const char *s_al, *s_op;
1374                         s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
1375                         s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1376                         col += qemu_log(",%s%s,%u", s_al, s_op, ix);
1377                     }
1378                     i = 1;
1379                 }
1380                 break;
1381             default:
1382                 i = 0;
1383                 break;
1384             }
1385             switch (c) {
1386             case INDEX_op_set_label:
1387             case INDEX_op_br:
1388             case INDEX_op_brcond_i32:
1389             case INDEX_op_brcond_i64:
1390             case INDEX_op_brcond2_i32:
1391                 col += qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1392                 i++, k++;
1393                 break;
1394             default:
1395                 break;
1396             }
1397             for (; i < nb_cargs; i++, k++) {
1398                 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1399             }
1400         }
1401         if (op->life) {
1402             unsigned life = op->life;
1403 
1404             for (; col < 48; ++col) {
1405                 putc(' ', qemu_logfile);
1406             }
1407 
1408             if (life & (SYNC_ARG * 3)) {
1409                 qemu_log("  sync:");
1410                 for (i = 0; i < 2; ++i) {
1411                     if (life & (SYNC_ARG << i)) {
1412                         qemu_log(" %d", i);
1413                     }
1414                 }
1415             }
1416             life /= DEAD_ARG;
1417             if (life) {
1418                 qemu_log("  dead:");
1419                 for (i = 0; life; ++i, life >>= 1) {
1420                     if (life & 1) {
1421                         qemu_log(" %d", i);
1422                     }
1423                 }
1424             }
1425         }
1426         qemu_log("\n");
1427     }
1428 }
1429 
1430 /* we give more priority to constraints with less registers */
1431 static int get_constraint_priority(const TCGOpDef *def, int k)
1432 {
1433     const TCGArgConstraint *arg_ct;
1434 
1435     int i, n;
1436     arg_ct = &def->args_ct[k];
1437     if (arg_ct->ct & TCG_CT_ALIAS) {
1438         /* an alias is equivalent to a single register */
1439         n = 1;
1440     } else {
1441         if (!(arg_ct->ct & TCG_CT_REG))
1442             return 0;
1443         n = 0;
1444         for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1445             if (tcg_regset_test_reg(arg_ct->u.regs, i))
1446                 n++;
1447         }
1448     }
1449     return TCG_TARGET_NB_REGS - n + 1;
1450 }
1451 
1452 /* sort from highest priority to lowest */
1453 static void sort_constraints(TCGOpDef *def, int start, int n)
1454 {
1455     int i, j, p1, p2, tmp;
1456 
1457     for(i = 0; i < n; i++)
1458         def->sorted_args[start + i] = start + i;
1459     if (n <= 1)
1460         return;
1461     for(i = 0; i < n - 1; i++) {
1462         for(j = i + 1; j < n; j++) {
1463             p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1464             p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1465             if (p1 < p2) {
1466                 tmp = def->sorted_args[start + i];
1467                 def->sorted_args[start + i] = def->sorted_args[start + j];
1468                 def->sorted_args[start + j] = tmp;
1469             }
1470         }
1471     }
1472 }
1473 
1474 static void process_op_defs(TCGContext *s)
1475 {
1476     TCGOpcode op;
1477 
1478     for (op = 0; op < NB_OPS; op++) {
1479         TCGOpDef *def = &tcg_op_defs[op];
1480         const TCGTargetOpDef *tdefs;
1481         TCGType type;
1482         int i, nb_args;
1483 
1484         if (def->flags & TCG_OPF_NOT_PRESENT) {
1485             continue;
1486         }
1487 
1488         nb_args = def->nb_iargs + def->nb_oargs;
1489         if (nb_args == 0) {
1490             continue;
1491         }
1492 
1493         tdefs = tcg_target_op_def(op);
1494         /* Missing TCGTargetOpDef entry. */
1495         tcg_debug_assert(tdefs != NULL);
1496 
1497         type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
1498         for (i = 0; i < nb_args; i++) {
1499             const char *ct_str = tdefs->args_ct_str[i];
1500             /* Incomplete TCGTargetOpDef entry. */
1501             tcg_debug_assert(ct_str != NULL);
1502 
1503             def->args_ct[i].u.regs = 0;
1504             def->args_ct[i].ct = 0;
1505             while (*ct_str != '\0') {
1506                 switch(*ct_str) {
1507                 case '0' ... '9':
1508                     {
1509                         int oarg = *ct_str - '0';
1510                         tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
1511                         tcg_debug_assert(oarg < def->nb_oargs);
1512                         tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1513                         /* TCG_CT_ALIAS is for the output arguments.
1514                            The input is tagged with TCG_CT_IALIAS. */
1515                         def->args_ct[i] = def->args_ct[oarg];
1516                         def->args_ct[oarg].ct |= TCG_CT_ALIAS;
1517                         def->args_ct[oarg].alias_index = i;
1518                         def->args_ct[i].ct |= TCG_CT_IALIAS;
1519                         def->args_ct[i].alias_index = oarg;
1520                     }
1521                     ct_str++;
1522                     break;
1523                 case '&':
1524                     def->args_ct[i].ct |= TCG_CT_NEWREG;
1525                     ct_str++;
1526                     break;
1527                 case 'i':
1528                     def->args_ct[i].ct |= TCG_CT_CONST;
1529                     ct_str++;
1530                     break;
1531                 default:
1532                     ct_str = target_parse_constraint(&def->args_ct[i],
1533                                                      ct_str, type);
1534                     /* Typo in TCGTargetOpDef constraint. */
1535                     tcg_debug_assert(ct_str != NULL);
1536                 }
1537             }
1538         }
1539 
1540         /* TCGTargetOpDef entry with too much information? */
1541         tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1542 
1543         /* sort the constraints (XXX: this is just an heuristic) */
1544         sort_constraints(def, 0, def->nb_oargs);
1545         sort_constraints(def, def->nb_oargs, def->nb_iargs);
1546     }
1547 }
1548 
1549 void tcg_op_remove(TCGContext *s, TCGOp *op)
1550 {
1551     int next = op->next;
1552     int prev = op->prev;
1553 
1554     /* We should never attempt to remove the list terminator.  */
1555     tcg_debug_assert(op != &s->gen_op_buf[0]);
1556 
1557     s->gen_op_buf[next].prev = prev;
1558     s->gen_op_buf[prev].next = next;
1559 
1560     memset(op, 0, sizeof(*op));
1561 
1562 #ifdef CONFIG_PROFILER
1563     s->del_op_count++;
1564 #endif
1565 }
1566 
1567 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
1568                             TCGOpcode opc, int nargs)
1569 {
1570     int oi = s->gen_next_op_idx;
1571     int pi = s->gen_next_parm_idx;
1572     int prev = old_op->prev;
1573     int next = old_op - s->gen_op_buf;
1574     TCGOp *new_op;
1575 
1576     tcg_debug_assert(oi < OPC_BUF_SIZE);
1577     tcg_debug_assert(pi + nargs <= OPPARAM_BUF_SIZE);
1578     s->gen_next_op_idx = oi + 1;
1579     s->gen_next_parm_idx = pi + nargs;
1580 
1581     new_op = &s->gen_op_buf[oi];
1582     *new_op = (TCGOp){
1583         .opc = opc,
1584         .args = pi,
1585         .prev = prev,
1586         .next = next
1587     };
1588     s->gen_op_buf[prev].next = oi;
1589     old_op->prev = oi;
1590 
1591     return new_op;
1592 }
1593 
1594 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
1595                            TCGOpcode opc, int nargs)
1596 {
1597     int oi = s->gen_next_op_idx;
1598     int pi = s->gen_next_parm_idx;
1599     int prev = old_op - s->gen_op_buf;
1600     int next = old_op->next;
1601     TCGOp *new_op;
1602 
1603     tcg_debug_assert(oi < OPC_BUF_SIZE);
1604     tcg_debug_assert(pi + nargs <= OPPARAM_BUF_SIZE);
1605     s->gen_next_op_idx = oi + 1;
1606     s->gen_next_parm_idx = pi + nargs;
1607 
1608     new_op = &s->gen_op_buf[oi];
1609     *new_op = (TCGOp){
1610         .opc = opc,
1611         .args = pi,
1612         .prev = prev,
1613         .next = next
1614     };
1615     s->gen_op_buf[next].prev = oi;
1616     old_op->next = oi;
1617 
1618     return new_op;
1619 }
1620 
1621 #define TS_DEAD  1
1622 #define TS_MEM   2
1623 
1624 #define IS_DEAD_ARG(n)   (arg_life & (DEAD_ARG << (n)))
1625 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
1626 
1627 /* liveness analysis: end of function: all temps are dead, and globals
1628    should be in memory. */
1629 static inline void tcg_la_func_end(TCGContext *s, uint8_t *temp_state)
1630 {
1631     memset(temp_state, TS_DEAD | TS_MEM, s->nb_globals);
1632     memset(temp_state + s->nb_globals, TS_DEAD, s->nb_temps - s->nb_globals);
1633 }
1634 
1635 /* liveness analysis: end of basic block: all temps are dead, globals
1636    and local temps should be in memory. */
1637 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *temp_state)
1638 {
1639     int i, n;
1640 
1641     tcg_la_func_end(s, temp_state);
1642     for (i = s->nb_globals, n = s->nb_temps; i < n; i++) {
1643         if (s->temps[i].temp_local) {
1644             temp_state[i] |= TS_MEM;
1645         }
1646     }
1647 }
1648 
1649 /* Liveness analysis : update the opc_arg_life array to tell if a
1650    given input arguments is dead. Instructions updating dead
1651    temporaries are removed. */
1652 static void liveness_pass_1(TCGContext *s, uint8_t *temp_state)
1653 {
1654     int nb_globals = s->nb_globals;
1655     int oi, oi_prev;
1656 
1657     tcg_la_func_end(s, temp_state);
1658 
1659     for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) {
1660         int i, nb_iargs, nb_oargs;
1661         TCGOpcode opc_new, opc_new2;
1662         bool have_opc_new2;
1663         TCGLifeData arg_life = 0;
1664         TCGArg arg;
1665 
1666         TCGOp * const op = &s->gen_op_buf[oi];
1667         TCGArg * const args = &s->gen_opparam_buf[op->args];
1668         TCGOpcode opc = op->opc;
1669         const TCGOpDef *def = &tcg_op_defs[opc];
1670 
1671         oi_prev = op->prev;
1672 
1673         switch (opc) {
1674         case INDEX_op_call:
1675             {
1676                 int call_flags;
1677 
1678                 nb_oargs = op->callo;
1679                 nb_iargs = op->calli;
1680                 call_flags = args[nb_oargs + nb_iargs + 1];
1681 
1682                 /* pure functions can be removed if their result is unused */
1683                 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1684                     for (i = 0; i < nb_oargs; i++) {
1685                         arg = args[i];
1686                         if (temp_state[arg] != TS_DEAD) {
1687                             goto do_not_remove_call;
1688                         }
1689                     }
1690                     goto do_remove;
1691                 } else {
1692                 do_not_remove_call:
1693 
1694                     /* output args are dead */
1695                     for (i = 0; i < nb_oargs; i++) {
1696                         arg = args[i];
1697                         if (temp_state[arg] & TS_DEAD) {
1698                             arg_life |= DEAD_ARG << i;
1699                         }
1700                         if (temp_state[arg] & TS_MEM) {
1701                             arg_life |= SYNC_ARG << i;
1702                         }
1703                         temp_state[arg] = TS_DEAD;
1704                     }
1705 
1706                     if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1707                                         TCG_CALL_NO_READ_GLOBALS))) {
1708                         /* globals should go back to memory */
1709                         memset(temp_state, TS_DEAD | TS_MEM, nb_globals);
1710                     } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1711                         /* globals should be synced to memory */
1712                         for (i = 0; i < nb_globals; i++) {
1713                             temp_state[i] |= TS_MEM;
1714                         }
1715                     }
1716 
1717                     /* record arguments that die in this helper */
1718                     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1719                         arg = args[i];
1720                         if (arg != TCG_CALL_DUMMY_ARG) {
1721                             if (temp_state[arg] & TS_DEAD) {
1722                                 arg_life |= DEAD_ARG << i;
1723                             }
1724                         }
1725                     }
1726                     /* input arguments are live for preceding opcodes */
1727                     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1728                         arg = args[i];
1729                         if (arg != TCG_CALL_DUMMY_ARG) {
1730                             temp_state[arg] &= ~TS_DEAD;
1731                         }
1732                     }
1733                 }
1734             }
1735             break;
1736         case INDEX_op_insn_start:
1737             break;
1738         case INDEX_op_discard:
1739             /* mark the temporary as dead */
1740             temp_state[args[0]] = TS_DEAD;
1741             break;
1742 
1743         case INDEX_op_add2_i32:
1744             opc_new = INDEX_op_add_i32;
1745             goto do_addsub2;
1746         case INDEX_op_sub2_i32:
1747             opc_new = INDEX_op_sub_i32;
1748             goto do_addsub2;
1749         case INDEX_op_add2_i64:
1750             opc_new = INDEX_op_add_i64;
1751             goto do_addsub2;
1752         case INDEX_op_sub2_i64:
1753             opc_new = INDEX_op_sub_i64;
1754         do_addsub2:
1755             nb_iargs = 4;
1756             nb_oargs = 2;
1757             /* Test if the high part of the operation is dead, but not
1758                the low part.  The result can be optimized to a simple
1759                add or sub.  This happens often for x86_64 guest when the
1760                cpu mode is set to 32 bit.  */
1761             if (temp_state[args[1]] == TS_DEAD) {
1762                 if (temp_state[args[0]] == TS_DEAD) {
1763                     goto do_remove;
1764                 }
1765                 /* Replace the opcode and adjust the args in place,
1766                    leaving 3 unused args at the end.  */
1767                 op->opc = opc = opc_new;
1768                 args[1] = args[2];
1769                 args[2] = args[4];
1770                 /* Fall through and mark the single-word operation live.  */
1771                 nb_iargs = 2;
1772                 nb_oargs = 1;
1773             }
1774             goto do_not_remove;
1775 
1776         case INDEX_op_mulu2_i32:
1777             opc_new = INDEX_op_mul_i32;
1778             opc_new2 = INDEX_op_muluh_i32;
1779             have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1780             goto do_mul2;
1781         case INDEX_op_muls2_i32:
1782             opc_new = INDEX_op_mul_i32;
1783             opc_new2 = INDEX_op_mulsh_i32;
1784             have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1785             goto do_mul2;
1786         case INDEX_op_mulu2_i64:
1787             opc_new = INDEX_op_mul_i64;
1788             opc_new2 = INDEX_op_muluh_i64;
1789             have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1790             goto do_mul2;
1791         case INDEX_op_muls2_i64:
1792             opc_new = INDEX_op_mul_i64;
1793             opc_new2 = INDEX_op_mulsh_i64;
1794             have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1795             goto do_mul2;
1796         do_mul2:
1797             nb_iargs = 2;
1798             nb_oargs = 2;
1799             if (temp_state[args[1]] == TS_DEAD) {
1800                 if (temp_state[args[0]] == TS_DEAD) {
1801                     /* Both parts of the operation are dead.  */
1802                     goto do_remove;
1803                 }
1804                 /* The high part of the operation is dead; generate the low. */
1805                 op->opc = opc = opc_new;
1806                 args[1] = args[2];
1807                 args[2] = args[3];
1808             } else if (temp_state[args[0]] == TS_DEAD && have_opc_new2) {
1809                 /* The low part of the operation is dead; generate the high. */
1810                 op->opc = opc = opc_new2;
1811                 args[0] = args[1];
1812                 args[1] = args[2];
1813                 args[2] = args[3];
1814             } else {
1815                 goto do_not_remove;
1816             }
1817             /* Mark the single-word operation live.  */
1818             nb_oargs = 1;
1819             goto do_not_remove;
1820 
1821         default:
1822             /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1823             nb_iargs = def->nb_iargs;
1824             nb_oargs = def->nb_oargs;
1825 
1826             /* Test if the operation can be removed because all
1827                its outputs are dead. We assume that nb_oargs == 0
1828                implies side effects */
1829             if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1830                 for (i = 0; i < nb_oargs; i++) {
1831                     if (temp_state[args[i]] != TS_DEAD) {
1832                         goto do_not_remove;
1833                     }
1834                 }
1835             do_remove:
1836                 tcg_op_remove(s, op);
1837             } else {
1838             do_not_remove:
1839                 /* output args are dead */
1840                 for (i = 0; i < nb_oargs; i++) {
1841                     arg = args[i];
1842                     if (temp_state[arg] & TS_DEAD) {
1843                         arg_life |= DEAD_ARG << i;
1844                     }
1845                     if (temp_state[arg] & TS_MEM) {
1846                         arg_life |= SYNC_ARG << i;
1847                     }
1848                     temp_state[arg] = TS_DEAD;
1849                 }
1850 
1851                 /* if end of basic block, update */
1852                 if (def->flags & TCG_OPF_BB_END) {
1853                     tcg_la_bb_end(s, temp_state);
1854                 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1855                     /* globals should be synced to memory */
1856                     for (i = 0; i < nb_globals; i++) {
1857                         temp_state[i] |= TS_MEM;
1858                     }
1859                 }
1860 
1861                 /* record arguments that die in this opcode */
1862                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1863                     arg = args[i];
1864                     if (temp_state[arg] & TS_DEAD) {
1865                         arg_life |= DEAD_ARG << i;
1866                     }
1867                 }
1868                 /* input arguments are live for preceding opcodes */
1869                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1870                     temp_state[args[i]] &= ~TS_DEAD;
1871                 }
1872             }
1873             break;
1874         }
1875         op->life = arg_life;
1876     }
1877 }
1878 
1879 /* Liveness analysis: Convert indirect regs to direct temporaries.  */
1880 static bool liveness_pass_2(TCGContext *s, uint8_t *temp_state)
1881 {
1882     int nb_globals = s->nb_globals;
1883     int16_t *dir_temps;
1884     int i, oi, oi_next;
1885     bool changes = false;
1886 
1887     dir_temps = tcg_malloc(nb_globals * sizeof(int16_t));
1888     memset(dir_temps, 0, nb_globals * sizeof(int16_t));
1889 
1890     /* Create a temporary for each indirect global.  */
1891     for (i = 0; i < nb_globals; ++i) {
1892         TCGTemp *its = &s->temps[i];
1893         if (its->indirect_reg) {
1894             TCGTemp *dts = tcg_temp_alloc(s);
1895             dts->type = its->type;
1896             dts->base_type = its->base_type;
1897             dir_temps[i] = temp_idx(s, dts);
1898         }
1899     }
1900 
1901     memset(temp_state, TS_DEAD, nb_globals);
1902 
1903     for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
1904         TCGOp *op = &s->gen_op_buf[oi];
1905         TCGArg *args = &s->gen_opparam_buf[op->args];
1906         TCGOpcode opc = op->opc;
1907         const TCGOpDef *def = &tcg_op_defs[opc];
1908         TCGLifeData arg_life = op->life;
1909         int nb_iargs, nb_oargs, call_flags;
1910         TCGArg arg, dir;
1911 
1912         oi_next = op->next;
1913 
1914         if (opc == INDEX_op_call) {
1915             nb_oargs = op->callo;
1916             nb_iargs = op->calli;
1917             call_flags = args[nb_oargs + nb_iargs + 1];
1918         } else {
1919             nb_iargs = def->nb_iargs;
1920             nb_oargs = def->nb_oargs;
1921 
1922             /* Set flags similar to how calls require.  */
1923             if (def->flags & TCG_OPF_BB_END) {
1924                 /* Like writing globals: save_globals */
1925                 call_flags = 0;
1926             } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1927                 /* Like reading globals: sync_globals */
1928                 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
1929             } else {
1930                 /* No effect on globals.  */
1931                 call_flags = (TCG_CALL_NO_READ_GLOBALS |
1932                               TCG_CALL_NO_WRITE_GLOBALS);
1933             }
1934         }
1935 
1936         /* Make sure that input arguments are available.  */
1937         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1938             arg = args[i];
1939             /* Note this unsigned test catches TCG_CALL_ARG_DUMMY too.  */
1940             if (arg < nb_globals) {
1941                 dir = dir_temps[arg];
1942                 if (dir != 0 && temp_state[arg] == TS_DEAD) {
1943                     TCGTemp *its = &s->temps[arg];
1944                     TCGOpcode lopc = (its->type == TCG_TYPE_I32
1945                                       ? INDEX_op_ld_i32
1946                                       : INDEX_op_ld_i64);
1947                     TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
1948                     TCGArg *largs = &s->gen_opparam_buf[lop->args];
1949 
1950                     largs[0] = dir;
1951                     largs[1] = temp_idx(s, its->mem_base);
1952                     largs[2] = its->mem_offset;
1953 
1954                     /* Loaded, but synced with memory.  */
1955                     temp_state[arg] = TS_MEM;
1956                 }
1957             }
1958         }
1959 
1960         /* Perform input replacement, and mark inputs that became dead.
1961            No action is required except keeping temp_state up to date
1962            so that we reload when needed.  */
1963         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1964             arg = args[i];
1965             if (arg < nb_globals) {
1966                 dir = dir_temps[arg];
1967                 if (dir != 0) {
1968                     args[i] = dir;
1969                     changes = true;
1970                     if (IS_DEAD_ARG(i)) {
1971                         temp_state[arg] = TS_DEAD;
1972                     }
1973                 }
1974             }
1975         }
1976 
1977         /* Liveness analysis should ensure that the following are
1978            all correct, for call sites and basic block end points.  */
1979         if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
1980             /* Nothing to do */
1981         } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
1982             for (i = 0; i < nb_globals; ++i) {
1983                 /* Liveness should see that globals are synced back,
1984                    that is, either TS_DEAD or TS_MEM.  */
1985                 tcg_debug_assert(dir_temps[i] == 0
1986                                  || temp_state[i] != 0);
1987             }
1988         } else {
1989             for (i = 0; i < nb_globals; ++i) {
1990                 /* Liveness should see that globals are saved back,
1991                    that is, TS_DEAD, waiting to be reloaded.  */
1992                 tcg_debug_assert(dir_temps[i] == 0
1993                                  || temp_state[i] == TS_DEAD);
1994             }
1995         }
1996 
1997         /* Outputs become available.  */
1998         for (i = 0; i < nb_oargs; i++) {
1999             arg = args[i];
2000             if (arg >= nb_globals) {
2001                 continue;
2002             }
2003             dir = dir_temps[arg];
2004             if (dir == 0) {
2005                 continue;
2006             }
2007             args[i] = dir;
2008             changes = true;
2009 
2010             /* The output is now live and modified.  */
2011             temp_state[arg] = 0;
2012 
2013             /* Sync outputs upon their last write.  */
2014             if (NEED_SYNC_ARG(i)) {
2015                 TCGTemp *its = &s->temps[arg];
2016                 TCGOpcode sopc = (its->type == TCG_TYPE_I32
2017                                   ? INDEX_op_st_i32
2018                                   : INDEX_op_st_i64);
2019                 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
2020                 TCGArg *sargs = &s->gen_opparam_buf[sop->args];
2021 
2022                 sargs[0] = dir;
2023                 sargs[1] = temp_idx(s, its->mem_base);
2024                 sargs[2] = its->mem_offset;
2025 
2026                 temp_state[arg] = TS_MEM;
2027             }
2028             /* Drop outputs that are dead.  */
2029             if (IS_DEAD_ARG(i)) {
2030                 temp_state[arg] = TS_DEAD;
2031             }
2032         }
2033     }
2034 
2035     return changes;
2036 }
2037 
2038 #ifdef CONFIG_DEBUG_TCG
2039 static void dump_regs(TCGContext *s)
2040 {
2041     TCGTemp *ts;
2042     int i;
2043     char buf[64];
2044 
2045     for(i = 0; i < s->nb_temps; i++) {
2046         ts = &s->temps[i];
2047         printf("  %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
2048         switch(ts->val_type) {
2049         case TEMP_VAL_REG:
2050             printf("%s", tcg_target_reg_names[ts->reg]);
2051             break;
2052         case TEMP_VAL_MEM:
2053             printf("%d(%s)", (int)ts->mem_offset,
2054                    tcg_target_reg_names[ts->mem_base->reg]);
2055             break;
2056         case TEMP_VAL_CONST:
2057             printf("$0x%" TCG_PRIlx, ts->val);
2058             break;
2059         case TEMP_VAL_DEAD:
2060             printf("D");
2061             break;
2062         default:
2063             printf("???");
2064             break;
2065         }
2066         printf("\n");
2067     }
2068 
2069     for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2070         if (s->reg_to_temp[i] != NULL) {
2071             printf("%s: %s\n",
2072                    tcg_target_reg_names[i],
2073                    tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
2074         }
2075     }
2076 }
2077 
2078 static void check_regs(TCGContext *s)
2079 {
2080     int reg;
2081     int k;
2082     TCGTemp *ts;
2083     char buf[64];
2084 
2085     for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2086         ts = s->reg_to_temp[reg];
2087         if (ts != NULL) {
2088             if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
2089                 printf("Inconsistency for register %s:\n",
2090                        tcg_target_reg_names[reg]);
2091                 goto fail;
2092             }
2093         }
2094     }
2095     for (k = 0; k < s->nb_temps; k++) {
2096         ts = &s->temps[k];
2097         if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
2098             && s->reg_to_temp[ts->reg] != ts) {
2099             printf("Inconsistency for temp %s:\n",
2100                    tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2101         fail:
2102             printf("reg state:\n");
2103             dump_regs(s);
2104             tcg_abort();
2105         }
2106     }
2107 }
2108 #endif
2109 
2110 static void temp_allocate_frame(TCGContext *s, int temp)
2111 {
2112     TCGTemp *ts;
2113     ts = &s->temps[temp];
2114 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2115     /* Sparc64 stack is accessed with offset of 2047 */
2116     s->current_frame_offset = (s->current_frame_offset +
2117                                (tcg_target_long)sizeof(tcg_target_long) - 1) &
2118         ~(sizeof(tcg_target_long) - 1);
2119 #endif
2120     if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
2121         s->frame_end) {
2122         tcg_abort();
2123     }
2124     ts->mem_offset = s->current_frame_offset;
2125     ts->mem_base = s->frame_temp;
2126     ts->mem_allocated = 1;
2127     s->current_frame_offset += sizeof(tcg_target_long);
2128 }
2129 
2130 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
2131 
2132 /* Mark a temporary as free or dead.  If 'free_or_dead' is negative,
2133    mark it free; otherwise mark it dead.  */
2134 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
2135 {
2136     if (ts->fixed_reg) {
2137         return;
2138     }
2139     if (ts->val_type == TEMP_VAL_REG) {
2140         s->reg_to_temp[ts->reg] = NULL;
2141     }
2142     ts->val_type = (free_or_dead < 0
2143                     || ts->temp_local
2144                     || temp_idx(s, ts) < s->nb_globals
2145                     ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
2146 }
2147 
2148 /* Mark a temporary as dead.  */
2149 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
2150 {
2151     temp_free_or_dead(s, ts, 1);
2152 }
2153 
2154 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2155    registers needs to be allocated to store a constant.  If 'free_or_dead'
2156    is non-zero, subsequently release the temporary; if it is positive, the
2157    temp is dead; if it is negative, the temp is free.  */
2158 static void temp_sync(TCGContext *s, TCGTemp *ts,
2159                       TCGRegSet allocated_regs, int free_or_dead)
2160 {
2161     if (ts->fixed_reg) {
2162         return;
2163     }
2164     if (!ts->mem_coherent) {
2165         if (!ts->mem_allocated) {
2166             temp_allocate_frame(s, temp_idx(s, ts));
2167         }
2168         switch (ts->val_type) {
2169         case TEMP_VAL_CONST:
2170             /* If we're going to free the temp immediately, then we won't
2171                require it later in a register, so attempt to store the
2172                constant to memory directly.  */
2173             if (free_or_dead
2174                 && tcg_out_sti(s, ts->type, ts->val,
2175                                ts->mem_base->reg, ts->mem_offset)) {
2176                 break;
2177             }
2178             temp_load(s, ts, tcg_target_available_regs[ts->type],
2179                       allocated_regs);
2180             /* fallthrough */
2181 
2182         case TEMP_VAL_REG:
2183             tcg_out_st(s, ts->type, ts->reg,
2184                        ts->mem_base->reg, ts->mem_offset);
2185             break;
2186 
2187         case TEMP_VAL_MEM:
2188             break;
2189 
2190         case TEMP_VAL_DEAD:
2191         default:
2192             tcg_abort();
2193         }
2194         ts->mem_coherent = 1;
2195     }
2196     if (free_or_dead) {
2197         temp_free_or_dead(s, ts, free_or_dead);
2198     }
2199 }
2200 
2201 /* free register 'reg' by spilling the corresponding temporary if necessary */
2202 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
2203 {
2204     TCGTemp *ts = s->reg_to_temp[reg];
2205     if (ts != NULL) {
2206         temp_sync(s, ts, allocated_regs, -1);
2207     }
2208 }
2209 
2210 /* Allocate a register belonging to reg1 & ~reg2 */
2211 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
2212                             TCGRegSet allocated_regs, bool rev)
2213 {
2214     int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
2215     const int *order;
2216     TCGReg reg;
2217     TCGRegSet reg_ct;
2218 
2219     reg_ct = desired_regs & ~allocated_regs;
2220     order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
2221 
2222     /* first try free registers */
2223     for(i = 0; i < n; i++) {
2224         reg = order[i];
2225         if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
2226             return reg;
2227     }
2228 
2229     /* XXX: do better spill choice */
2230     for(i = 0; i < n; i++) {
2231         reg = order[i];
2232         if (tcg_regset_test_reg(reg_ct, reg)) {
2233             tcg_reg_free(s, reg, allocated_regs);
2234             return reg;
2235         }
2236     }
2237 
2238     tcg_abort();
2239 }
2240 
2241 /* Make sure the temporary is in a register.  If needed, allocate the register
2242    from DESIRED while avoiding ALLOCATED.  */
2243 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
2244                       TCGRegSet allocated_regs)
2245 {
2246     TCGReg reg;
2247 
2248     switch (ts->val_type) {
2249     case TEMP_VAL_REG:
2250         return;
2251     case TEMP_VAL_CONST:
2252         reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2253         tcg_out_movi(s, ts->type, reg, ts->val);
2254         ts->mem_coherent = 0;
2255         break;
2256     case TEMP_VAL_MEM:
2257         reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2258         tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
2259         ts->mem_coherent = 1;
2260         break;
2261     case TEMP_VAL_DEAD:
2262     default:
2263         tcg_abort();
2264     }
2265     ts->reg = reg;
2266     ts->val_type = TEMP_VAL_REG;
2267     s->reg_to_temp[reg] = ts;
2268 }
2269 
2270 /* Save a temporary to memory. 'allocated_regs' is used in case a
2271    temporary registers needs to be allocated to store a constant.  */
2272 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
2273 {
2274     /* The liveness analysis already ensures that globals are back
2275        in memory. Keep an tcg_debug_assert for safety. */
2276     tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
2277 }
2278 
2279 /* save globals to their canonical location and assume they can be
2280    modified be the following code. 'allocated_regs' is used in case a
2281    temporary registers needs to be allocated to store a constant. */
2282 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
2283 {
2284     int i;
2285 
2286     for (i = 0; i < s->nb_globals; i++) {
2287         temp_save(s, &s->temps[i], allocated_regs);
2288     }
2289 }
2290 
2291 /* sync globals to their canonical location and assume they can be
2292    read by the following code. 'allocated_regs' is used in case a
2293    temporary registers needs to be allocated to store a constant. */
2294 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
2295 {
2296     int i;
2297 
2298     for (i = 0; i < s->nb_globals; i++) {
2299         TCGTemp *ts = &s->temps[i];
2300         tcg_debug_assert(ts->val_type != TEMP_VAL_REG
2301                          || ts->fixed_reg
2302                          || ts->mem_coherent);
2303     }
2304 }
2305 
2306 /* at the end of a basic block, we assume all temporaries are dead and
2307    all globals are stored at their canonical location. */
2308 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
2309 {
2310     int i;
2311 
2312     for (i = s->nb_globals; i < s->nb_temps; i++) {
2313         TCGTemp *ts = &s->temps[i];
2314         if (ts->temp_local) {
2315             temp_save(s, ts, allocated_regs);
2316         } else {
2317             /* The liveness analysis already ensures that temps are dead.
2318                Keep an tcg_debug_assert for safety. */
2319             tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
2320         }
2321     }
2322 
2323     save_globals(s, allocated_regs);
2324 }
2325 
2326 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
2327                                   tcg_target_ulong val, TCGLifeData arg_life)
2328 {
2329     if (ots->fixed_reg) {
2330         /* For fixed registers, we do not do any constant propagation.  */
2331         tcg_out_movi(s, ots->type, ots->reg, val);
2332         return;
2333     }
2334 
2335     /* The movi is not explicitly generated here.  */
2336     if (ots->val_type == TEMP_VAL_REG) {
2337         s->reg_to_temp[ots->reg] = NULL;
2338     }
2339     ots->val_type = TEMP_VAL_CONST;
2340     ots->val = val;
2341     ots->mem_coherent = 0;
2342     if (NEED_SYNC_ARG(0)) {
2343         temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
2344     } else if (IS_DEAD_ARG(0)) {
2345         temp_dead(s, ots);
2346     }
2347 }
2348 
2349 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
2350                                TCGLifeData arg_life)
2351 {
2352     TCGTemp *ots = &s->temps[args[0]];
2353     tcg_target_ulong val = args[1];
2354 
2355     tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2356 }
2357 
2358 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
2359                               const TCGArg *args, TCGLifeData arg_life)
2360 {
2361     TCGRegSet allocated_regs;
2362     TCGTemp *ts, *ots;
2363     TCGType otype, itype;
2364 
2365     allocated_regs = s->reserved_regs;
2366     ots = &s->temps[args[0]];
2367     ts = &s->temps[args[1]];
2368 
2369     /* Note that otype != itype for no-op truncation.  */
2370     otype = ots->type;
2371     itype = ts->type;
2372 
2373     if (ts->val_type == TEMP_VAL_CONST) {
2374         /* propagate constant or generate sti */
2375         tcg_target_ulong val = ts->val;
2376         if (IS_DEAD_ARG(1)) {
2377             temp_dead(s, ts);
2378         }
2379         tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2380         return;
2381     }
2382 
2383     /* If the source value is in memory we're going to be forced
2384        to have it in a register in order to perform the copy.  Copy
2385        the SOURCE value into its own register first, that way we
2386        don't have to reload SOURCE the next time it is used. */
2387     if (ts->val_type == TEMP_VAL_MEM) {
2388         temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
2389     }
2390 
2391     tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
2392     if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2393         /* mov to a non-saved dead register makes no sense (even with
2394            liveness analysis disabled). */
2395         tcg_debug_assert(NEED_SYNC_ARG(0));
2396         if (!ots->mem_allocated) {
2397             temp_allocate_frame(s, args[0]);
2398         }
2399         tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
2400         if (IS_DEAD_ARG(1)) {
2401             temp_dead(s, ts);
2402         }
2403         temp_dead(s, ots);
2404     } else {
2405         if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
2406             /* the mov can be suppressed */
2407             if (ots->val_type == TEMP_VAL_REG) {
2408                 s->reg_to_temp[ots->reg] = NULL;
2409             }
2410             ots->reg = ts->reg;
2411             temp_dead(s, ts);
2412         } else {
2413             if (ots->val_type != TEMP_VAL_REG) {
2414                 /* When allocating a new register, make sure to not spill the
2415                    input one. */
2416                 tcg_regset_set_reg(allocated_regs, ts->reg);
2417                 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
2418                                          allocated_regs, ots->indirect_base);
2419             }
2420             tcg_out_mov(s, otype, ots->reg, ts->reg);
2421         }
2422         ots->val_type = TEMP_VAL_REG;
2423         ots->mem_coherent = 0;
2424         s->reg_to_temp[ots->reg] = ots;
2425         if (NEED_SYNC_ARG(0)) {
2426             temp_sync(s, ots, allocated_regs, 0);
2427         }
2428     }
2429 }
2430 
2431 static void tcg_reg_alloc_op(TCGContext *s,
2432                              const TCGOpDef *def, TCGOpcode opc,
2433                              const TCGArg *args, TCGLifeData arg_life)
2434 {
2435     TCGRegSet i_allocated_regs;
2436     TCGRegSet o_allocated_regs;
2437     int i, k, nb_iargs, nb_oargs;
2438     TCGReg reg;
2439     TCGArg arg;
2440     const TCGArgConstraint *arg_ct;
2441     TCGTemp *ts;
2442     TCGArg new_args[TCG_MAX_OP_ARGS];
2443     int const_args[TCG_MAX_OP_ARGS];
2444 
2445     nb_oargs = def->nb_oargs;
2446     nb_iargs = def->nb_iargs;
2447 
2448     /* copy constants */
2449     memcpy(new_args + nb_oargs + nb_iargs,
2450            args + nb_oargs + nb_iargs,
2451            sizeof(TCGArg) * def->nb_cargs);
2452 
2453     i_allocated_regs = s->reserved_regs;
2454     o_allocated_regs = s->reserved_regs;
2455 
2456     /* satisfy input constraints */
2457     for(k = 0; k < nb_iargs; k++) {
2458         i = def->sorted_args[nb_oargs + k];
2459         arg = args[i];
2460         arg_ct = &def->args_ct[i];
2461         ts = &s->temps[arg];
2462 
2463         if (ts->val_type == TEMP_VAL_CONST
2464             && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2465             /* constant is OK for instruction */
2466             const_args[i] = 1;
2467             new_args[i] = ts->val;
2468             goto iarg_end;
2469         }
2470 
2471         temp_load(s, ts, arg_ct->u.regs, i_allocated_regs);
2472 
2473         if (arg_ct->ct & TCG_CT_IALIAS) {
2474             if (ts->fixed_reg) {
2475                 /* if fixed register, we must allocate a new register
2476                    if the alias is not the same register */
2477                 if (arg != args[arg_ct->alias_index])
2478                     goto allocate_in_reg;
2479             } else {
2480                 /* if the input is aliased to an output and if it is
2481                    not dead after the instruction, we must allocate
2482                    a new register and move it */
2483                 if (!IS_DEAD_ARG(i)) {
2484                     goto allocate_in_reg;
2485                 }
2486                 /* check if the current register has already been allocated
2487                    for another input aliased to an output */
2488                 int k2, i2;
2489                 for (k2 = 0 ; k2 < k ; k2++) {
2490                     i2 = def->sorted_args[nb_oargs + k2];
2491                     if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2492                         (new_args[i2] == ts->reg)) {
2493                         goto allocate_in_reg;
2494                     }
2495                 }
2496             }
2497         }
2498         reg = ts->reg;
2499         if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2500             /* nothing to do : the constraint is satisfied */
2501         } else {
2502         allocate_in_reg:
2503             /* allocate a new register matching the constraint
2504                and move the temporary register into it */
2505             reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
2506                                 ts->indirect_base);
2507             tcg_out_mov(s, ts->type, reg, ts->reg);
2508         }
2509         new_args[i] = reg;
2510         const_args[i] = 0;
2511         tcg_regset_set_reg(i_allocated_regs, reg);
2512     iarg_end: ;
2513     }
2514 
2515     /* mark dead temporaries and free the associated registers */
2516     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2517         if (IS_DEAD_ARG(i)) {
2518             temp_dead(s, &s->temps[args[i]]);
2519         }
2520     }
2521 
2522     if (def->flags & TCG_OPF_BB_END) {
2523         tcg_reg_alloc_bb_end(s, i_allocated_regs);
2524     } else {
2525         if (def->flags & TCG_OPF_CALL_CLOBBER) {
2526             /* XXX: permit generic clobber register list ? */
2527             for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2528                 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2529                     tcg_reg_free(s, i, i_allocated_regs);
2530                 }
2531             }
2532         }
2533         if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2534             /* sync globals if the op has side effects and might trigger
2535                an exception. */
2536             sync_globals(s, i_allocated_regs);
2537         }
2538 
2539         /* satisfy the output constraints */
2540         for(k = 0; k < nb_oargs; k++) {
2541             i = def->sorted_args[k];
2542             arg = args[i];
2543             arg_ct = &def->args_ct[i];
2544             ts = &s->temps[arg];
2545             if ((arg_ct->ct & TCG_CT_ALIAS)
2546                 && !const_args[arg_ct->alias_index]) {
2547                 reg = new_args[arg_ct->alias_index];
2548             } else if (arg_ct->ct & TCG_CT_NEWREG) {
2549                 reg = tcg_reg_alloc(s, arg_ct->u.regs,
2550                                     i_allocated_regs | o_allocated_regs,
2551                                     ts->indirect_base);
2552             } else {
2553                 /* if fixed register, we try to use it */
2554                 reg = ts->reg;
2555                 if (ts->fixed_reg &&
2556                     tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2557                     goto oarg_end;
2558                 }
2559                 reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
2560                                     ts->indirect_base);
2561             }
2562             tcg_regset_set_reg(o_allocated_regs, reg);
2563             /* if a fixed register is used, then a move will be done afterwards */
2564             if (!ts->fixed_reg) {
2565                 if (ts->val_type == TEMP_VAL_REG) {
2566                     s->reg_to_temp[ts->reg] = NULL;
2567                 }
2568                 ts->val_type = TEMP_VAL_REG;
2569                 ts->reg = reg;
2570                 /* temp value is modified, so the value kept in memory is
2571                    potentially not the same */
2572                 ts->mem_coherent = 0;
2573                 s->reg_to_temp[reg] = ts;
2574             }
2575         oarg_end:
2576             new_args[i] = reg;
2577         }
2578     }
2579 
2580     /* emit instruction */
2581     tcg_out_op(s, opc, new_args, const_args);
2582 
2583     /* move the outputs in the correct register if needed */
2584     for(i = 0; i < nb_oargs; i++) {
2585         ts = &s->temps[args[i]];
2586         reg = new_args[i];
2587         if (ts->fixed_reg && ts->reg != reg) {
2588             tcg_out_mov(s, ts->type, ts->reg, reg);
2589         }
2590         if (NEED_SYNC_ARG(i)) {
2591             temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i));
2592         } else if (IS_DEAD_ARG(i)) {
2593             temp_dead(s, ts);
2594         }
2595     }
2596 }
2597 
2598 #ifdef TCG_TARGET_STACK_GROWSUP
2599 #define STACK_DIR(x) (-(x))
2600 #else
2601 #define STACK_DIR(x) (x)
2602 #endif
2603 
2604 static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2605                                const TCGArg * const args, TCGLifeData arg_life)
2606 {
2607     int flags, nb_regs, i;
2608     TCGReg reg;
2609     TCGArg arg;
2610     TCGTemp *ts;
2611     intptr_t stack_offset;
2612     size_t call_stack_size;
2613     tcg_insn_unit *func_addr;
2614     int allocate_args;
2615     TCGRegSet allocated_regs;
2616 
2617     func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2618     flags = args[nb_oargs + nb_iargs + 1];
2619 
2620     nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2621     if (nb_regs > nb_iargs) {
2622         nb_regs = nb_iargs;
2623     }
2624 
2625     /* assign stack slots first */
2626     call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2627     call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2628         ~(TCG_TARGET_STACK_ALIGN - 1);
2629     allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2630     if (allocate_args) {
2631         /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2632            preallocate call stack */
2633         tcg_abort();
2634     }
2635 
2636     stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2637     for(i = nb_regs; i < nb_iargs; i++) {
2638         arg = args[nb_oargs + i];
2639 #ifdef TCG_TARGET_STACK_GROWSUP
2640         stack_offset -= sizeof(tcg_target_long);
2641 #endif
2642         if (arg != TCG_CALL_DUMMY_ARG) {
2643             ts = &s->temps[arg];
2644             temp_load(s, ts, tcg_target_available_regs[ts->type],
2645                       s->reserved_regs);
2646             tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2647         }
2648 #ifndef TCG_TARGET_STACK_GROWSUP
2649         stack_offset += sizeof(tcg_target_long);
2650 #endif
2651     }
2652 
2653     /* assign input registers */
2654     allocated_regs = s->reserved_regs;
2655     for(i = 0; i < nb_regs; i++) {
2656         arg = args[nb_oargs + i];
2657         if (arg != TCG_CALL_DUMMY_ARG) {
2658             ts = &s->temps[arg];
2659             reg = tcg_target_call_iarg_regs[i];
2660             tcg_reg_free(s, reg, allocated_regs);
2661 
2662             if (ts->val_type == TEMP_VAL_REG) {
2663                 if (ts->reg != reg) {
2664                     tcg_out_mov(s, ts->type, reg, ts->reg);
2665                 }
2666             } else {
2667                 TCGRegSet arg_set = 0;
2668 
2669                 tcg_regset_set_reg(arg_set, reg);
2670                 temp_load(s, ts, arg_set, allocated_regs);
2671             }
2672 
2673             tcg_regset_set_reg(allocated_regs, reg);
2674         }
2675     }
2676 
2677     /* mark dead temporaries and free the associated registers */
2678     for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2679         if (IS_DEAD_ARG(i)) {
2680             temp_dead(s, &s->temps[args[i]]);
2681         }
2682     }
2683 
2684     /* clobber call registers */
2685     for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2686         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2687             tcg_reg_free(s, i, allocated_regs);
2688         }
2689     }
2690 
2691     /* Save globals if they might be written by the helper, sync them if
2692        they might be read. */
2693     if (flags & TCG_CALL_NO_READ_GLOBALS) {
2694         /* Nothing to do */
2695     } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2696         sync_globals(s, allocated_regs);
2697     } else {
2698         save_globals(s, allocated_regs);
2699     }
2700 
2701     tcg_out_call(s, func_addr);
2702 
2703     /* assign output registers and emit moves if needed */
2704     for(i = 0; i < nb_oargs; i++) {
2705         arg = args[i];
2706         ts = &s->temps[arg];
2707         reg = tcg_target_call_oarg_regs[i];
2708         tcg_debug_assert(s->reg_to_temp[reg] == NULL);
2709 
2710         if (ts->fixed_reg) {
2711             if (ts->reg != reg) {
2712                 tcg_out_mov(s, ts->type, ts->reg, reg);
2713             }
2714         } else {
2715             if (ts->val_type == TEMP_VAL_REG) {
2716                 s->reg_to_temp[ts->reg] = NULL;
2717             }
2718             ts->val_type = TEMP_VAL_REG;
2719             ts->reg = reg;
2720             ts->mem_coherent = 0;
2721             s->reg_to_temp[reg] = ts;
2722             if (NEED_SYNC_ARG(i)) {
2723                 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2724             } else if (IS_DEAD_ARG(i)) {
2725                 temp_dead(s, ts);
2726             }
2727         }
2728     }
2729 }
2730 
2731 #ifdef CONFIG_PROFILER
2732 
2733 static int64_t tcg_table_op_count[NB_OPS];
2734 
2735 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2736 {
2737     int i;
2738 
2739     for (i = 0; i < NB_OPS; i++) {
2740         cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2741                     tcg_table_op_count[i]);
2742     }
2743 }
2744 #else
2745 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2746 {
2747     cpu_fprintf(f, "[TCG profiler not compiled]\n");
2748 }
2749 #endif
2750 
2751 
2752 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
2753 {
2754     int i, oi, oi_next, num_insns;
2755 
2756 #ifdef CONFIG_PROFILER
2757     {
2758         int n;
2759 
2760         n = s->gen_op_buf[0].prev + 1;
2761         s->op_count += n;
2762         if (n > s->op_count_max) {
2763             s->op_count_max = n;
2764         }
2765 
2766         n = s->nb_temps;
2767         s->temp_count += n;
2768         if (n > s->temp_count_max) {
2769             s->temp_count_max = n;
2770         }
2771     }
2772 #endif
2773 
2774 #ifdef DEBUG_DISAS
2775     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
2776                  && qemu_log_in_addr_range(tb->pc))) {
2777         qemu_log_lock();
2778         qemu_log("OP:\n");
2779         tcg_dump_ops(s);
2780         qemu_log("\n");
2781         qemu_log_unlock();
2782     }
2783 #endif
2784 
2785 #ifdef CONFIG_PROFILER
2786     s->opt_time -= profile_getclock();
2787 #endif
2788 
2789 #ifdef USE_TCG_OPTIMIZATIONS
2790     tcg_optimize(s);
2791 #endif
2792 
2793 #ifdef CONFIG_PROFILER
2794     s->opt_time += profile_getclock();
2795     s->la_time -= profile_getclock();
2796 #endif
2797 
2798     {
2799         uint8_t *temp_state = tcg_malloc(s->nb_temps + s->nb_indirects);
2800 
2801         liveness_pass_1(s, temp_state);
2802 
2803         if (s->nb_indirects > 0) {
2804 #ifdef DEBUG_DISAS
2805             if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
2806                          && qemu_log_in_addr_range(tb->pc))) {
2807                 qemu_log_lock();
2808                 qemu_log("OP before indirect lowering:\n");
2809                 tcg_dump_ops(s);
2810                 qemu_log("\n");
2811                 qemu_log_unlock();
2812             }
2813 #endif
2814             /* Replace indirect temps with direct temps.  */
2815             if (liveness_pass_2(s, temp_state)) {
2816                 /* If changes were made, re-run liveness.  */
2817                 liveness_pass_1(s, temp_state);
2818             }
2819         }
2820     }
2821 
2822 #ifdef CONFIG_PROFILER
2823     s->la_time += profile_getclock();
2824 #endif
2825 
2826 #ifdef DEBUG_DISAS
2827     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
2828                  && qemu_log_in_addr_range(tb->pc))) {
2829         qemu_log_lock();
2830         qemu_log("OP after optimization and liveness analysis:\n");
2831         tcg_dump_ops(s);
2832         qemu_log("\n");
2833         qemu_log_unlock();
2834     }
2835 #endif
2836 
2837     tcg_reg_alloc_start(s);
2838 
2839     s->code_buf = tb->tc_ptr;
2840     s->code_ptr = tb->tc_ptr;
2841 
2842 #ifdef TCG_TARGET_NEED_LDST_LABELS
2843     s->ldst_labels = NULL;
2844 #endif
2845 #ifdef TCG_TARGET_NEED_POOL_LABELS
2846     s->pool_labels = NULL;
2847 #endif
2848 
2849     num_insns = -1;
2850     for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
2851         TCGOp * const op = &s->gen_op_buf[oi];
2852         TCGArg * const args = &s->gen_opparam_buf[op->args];
2853         TCGOpcode opc = op->opc;
2854         const TCGOpDef *def = &tcg_op_defs[opc];
2855         TCGLifeData arg_life = op->life;
2856 
2857         oi_next = op->next;
2858 #ifdef CONFIG_PROFILER
2859         tcg_table_op_count[opc]++;
2860 #endif
2861 
2862         switch (opc) {
2863         case INDEX_op_mov_i32:
2864         case INDEX_op_mov_i64:
2865             tcg_reg_alloc_mov(s, def, args, arg_life);
2866             break;
2867         case INDEX_op_movi_i32:
2868         case INDEX_op_movi_i64:
2869             tcg_reg_alloc_movi(s, args, arg_life);
2870             break;
2871         case INDEX_op_insn_start:
2872             if (num_insns >= 0) {
2873                 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2874             }
2875             num_insns++;
2876             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2877                 target_ulong a;
2878 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2879                 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2880 #else
2881                 a = args[i];
2882 #endif
2883                 s->gen_insn_data[num_insns][i] = a;
2884             }
2885             break;
2886         case INDEX_op_discard:
2887             temp_dead(s, &s->temps[args[0]]);
2888             break;
2889         case INDEX_op_set_label:
2890             tcg_reg_alloc_bb_end(s, s->reserved_regs);
2891             tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2892             break;
2893         case INDEX_op_call:
2894             tcg_reg_alloc_call(s, op->callo, op->calli, args, arg_life);
2895             break;
2896         default:
2897             /* Sanity check that we've not introduced any unhandled opcodes. */
2898             tcg_debug_assert(tcg_op_supported(opc));
2899             /* Note: in order to speed up the code, it would be much
2900                faster to have specialized register allocator functions for
2901                some common argument patterns */
2902             tcg_reg_alloc_op(s, def, opc, args, arg_life);
2903             break;
2904         }
2905 #ifdef CONFIG_DEBUG_TCG
2906         check_regs(s);
2907 #endif
2908         /* Test for (pending) buffer overflow.  The assumption is that any
2909            one operation beginning below the high water mark cannot overrun
2910            the buffer completely.  Thus we can test for overflow after
2911            generating code without having to check during generation.  */
2912         if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
2913             return -1;
2914         }
2915     }
2916     tcg_debug_assert(num_insns >= 0);
2917     s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2918 
2919     /* Generate TB finalization at the end of block */
2920 #ifdef TCG_TARGET_NEED_LDST_LABELS
2921     if (!tcg_out_ldst_finalize(s)) {
2922         return -1;
2923     }
2924 #endif
2925 #ifdef TCG_TARGET_NEED_POOL_LABELS
2926     if (!tcg_out_pool_finalize(s)) {
2927         return -1;
2928     }
2929 #endif
2930 
2931     /* flush instruction cache */
2932     flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2933 
2934     return tcg_current_code_size(s);
2935 }
2936 
2937 #ifdef CONFIG_PROFILER
2938 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2939 {
2940     TCGContext *s = &tcg_ctx;
2941     int64_t tb_count = s->tb_count;
2942     int64_t tb_div_count = tb_count ? tb_count : 1;
2943     int64_t tot = s->interm_time + s->code_time;
2944 
2945     cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2946                 tot, tot / 2.4e9);
2947     cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2948                 tb_count, s->tb_count1 - tb_count,
2949                 (double)(s->tb_count1 - s->tb_count)
2950                 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2951     cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n",
2952                 (double)s->op_count / tb_div_count, s->op_count_max);
2953     cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
2954                 (double)s->del_op_count / tb_div_count);
2955     cpu_fprintf(f, "avg temps/TB        %0.2f max=%d\n",
2956                 (double)s->temp_count / tb_div_count, s->temp_count_max);
2957     cpu_fprintf(f, "avg host code/TB    %0.1f\n",
2958                 (double)s->code_out_len / tb_div_count);
2959     cpu_fprintf(f, "avg search data/TB  %0.1f\n",
2960                 (double)s->search_out_len / tb_div_count);
2961 
2962     cpu_fprintf(f, "cycles/op           %0.1f\n",
2963                 s->op_count ? (double)tot / s->op_count : 0);
2964     cpu_fprintf(f, "cycles/in byte      %0.1f\n",
2965                 s->code_in_len ? (double)tot / s->code_in_len : 0);
2966     cpu_fprintf(f, "cycles/out byte     %0.1f\n",
2967                 s->code_out_len ? (double)tot / s->code_out_len : 0);
2968     cpu_fprintf(f, "cycles/search byte     %0.1f\n",
2969                 s->search_out_len ? (double)tot / s->search_out_len : 0);
2970     if (tot == 0) {
2971         tot = 1;
2972     }
2973     cpu_fprintf(f, "  gen_interm time   %0.1f%%\n",
2974                 (double)s->interm_time / tot * 100.0);
2975     cpu_fprintf(f, "  gen_code time     %0.1f%%\n",
2976                 (double)s->code_time / tot * 100.0);
2977     cpu_fprintf(f, "optim./code time    %0.1f%%\n",
2978                 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2979                 * 100.0);
2980     cpu_fprintf(f, "liveness/code time  %0.1f%%\n",
2981                 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2982     cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
2983                 s->restore_count);
2984     cpu_fprintf(f, "  avg cycles        %0.1f\n",
2985                 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2986 }
2987 #else
2988 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2989 {
2990     cpu_fprintf(f, "[TCG profiler not compiled]\n");
2991 }
2992 #endif
2993 
2994 #ifdef ELF_HOST_MACHINE
2995 /* In order to use this feature, the backend needs to do three things:
2996 
2997    (1) Define ELF_HOST_MACHINE to indicate both what value to
2998        put into the ELF image and to indicate support for the feature.
2999 
3000    (2) Define tcg_register_jit.  This should create a buffer containing
3001        the contents of a .debug_frame section that describes the post-
3002        prologue unwind info for the tcg machine.
3003 
3004    (3) Call tcg_register_jit_int, with the constructed .debug_frame.
3005 */
3006 
3007 /* Begin GDB interface.  THE FOLLOWING MUST MATCH GDB DOCS.  */
3008 typedef enum {
3009     JIT_NOACTION = 0,
3010     JIT_REGISTER_FN,
3011     JIT_UNREGISTER_FN
3012 } jit_actions_t;
3013 
3014 struct jit_code_entry {
3015     struct jit_code_entry *next_entry;
3016     struct jit_code_entry *prev_entry;
3017     const void *symfile_addr;
3018     uint64_t symfile_size;
3019 };
3020 
3021 struct jit_descriptor {
3022     uint32_t version;
3023     uint32_t action_flag;
3024     struct jit_code_entry *relevant_entry;
3025     struct jit_code_entry *first_entry;
3026 };
3027 
3028 void __jit_debug_register_code(void) __attribute__((noinline));
3029 void __jit_debug_register_code(void)
3030 {
3031     asm("");
3032 }
3033 
3034 /* Must statically initialize the version, because GDB may check
3035    the version before we can set it.  */
3036 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
3037 
3038 /* End GDB interface.  */
3039 
3040 static int find_string(const char *strtab, const char *str)
3041 {
3042     const char *p = strtab + 1;
3043 
3044     while (1) {
3045         if (strcmp(p, str) == 0) {
3046             return p - strtab;
3047         }
3048         p += strlen(p) + 1;
3049     }
3050 }
3051 
3052 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
3053                                  const void *debug_frame,
3054                                  size_t debug_frame_size)
3055 {
3056     struct __attribute__((packed)) DebugInfo {
3057         uint32_t  len;
3058         uint16_t  version;
3059         uint32_t  abbrev;
3060         uint8_t   ptr_size;
3061         uint8_t   cu_die;
3062         uint16_t  cu_lang;
3063         uintptr_t cu_low_pc;
3064         uintptr_t cu_high_pc;
3065         uint8_t   fn_die;
3066         char      fn_name[16];
3067         uintptr_t fn_low_pc;
3068         uintptr_t fn_high_pc;
3069         uint8_t   cu_eoc;
3070     };
3071 
3072     struct ElfImage {
3073         ElfW(Ehdr) ehdr;
3074         ElfW(Phdr) phdr;
3075         ElfW(Shdr) shdr[7];
3076         ElfW(Sym)  sym[2];
3077         struct DebugInfo di;
3078         uint8_t    da[24];
3079         char       str[80];
3080     };
3081 
3082     struct ElfImage *img;
3083 
3084     static const struct ElfImage img_template = {
3085         .ehdr = {
3086             .e_ident[EI_MAG0] = ELFMAG0,
3087             .e_ident[EI_MAG1] = ELFMAG1,
3088             .e_ident[EI_MAG2] = ELFMAG2,
3089             .e_ident[EI_MAG3] = ELFMAG3,
3090             .e_ident[EI_CLASS] = ELF_CLASS,
3091             .e_ident[EI_DATA] = ELF_DATA,
3092             .e_ident[EI_VERSION] = EV_CURRENT,
3093             .e_type = ET_EXEC,
3094             .e_machine = ELF_HOST_MACHINE,
3095             .e_version = EV_CURRENT,
3096             .e_phoff = offsetof(struct ElfImage, phdr),
3097             .e_shoff = offsetof(struct ElfImage, shdr),
3098             .e_ehsize = sizeof(ElfW(Shdr)),
3099             .e_phentsize = sizeof(ElfW(Phdr)),
3100             .e_phnum = 1,
3101             .e_shentsize = sizeof(ElfW(Shdr)),
3102             .e_shnum = ARRAY_SIZE(img->shdr),
3103             .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
3104 #ifdef ELF_HOST_FLAGS
3105             .e_flags = ELF_HOST_FLAGS,
3106 #endif
3107 #ifdef ELF_OSABI
3108             .e_ident[EI_OSABI] = ELF_OSABI,
3109 #endif
3110         },
3111         .phdr = {
3112             .p_type = PT_LOAD,
3113             .p_flags = PF_X,
3114         },
3115         .shdr = {
3116             [0] = { .sh_type = SHT_NULL },
3117             /* Trick: The contents of code_gen_buffer are not present in
3118                this fake ELF file; that got allocated elsewhere.  Therefore
3119                we mark .text as SHT_NOBITS (similar to .bss) so that readers
3120                will not look for contents.  We can record any address.  */
3121             [1] = { /* .text */
3122                 .sh_type = SHT_NOBITS,
3123                 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
3124             },
3125             [2] = { /* .debug_info */
3126                 .sh_type = SHT_PROGBITS,
3127                 .sh_offset = offsetof(struct ElfImage, di),
3128                 .sh_size = sizeof(struct DebugInfo),
3129             },
3130             [3] = { /* .debug_abbrev */
3131                 .sh_type = SHT_PROGBITS,
3132                 .sh_offset = offsetof(struct ElfImage, da),
3133                 .sh_size = sizeof(img->da),
3134             },
3135             [4] = { /* .debug_frame */
3136                 .sh_type = SHT_PROGBITS,
3137                 .sh_offset = sizeof(struct ElfImage),
3138             },
3139             [5] = { /* .symtab */
3140                 .sh_type = SHT_SYMTAB,
3141                 .sh_offset = offsetof(struct ElfImage, sym),
3142                 .sh_size = sizeof(img->sym),
3143                 .sh_info = 1,
3144                 .sh_link = ARRAY_SIZE(img->shdr) - 1,
3145                 .sh_entsize = sizeof(ElfW(Sym)),
3146             },
3147             [6] = { /* .strtab */
3148                 .sh_type = SHT_STRTAB,
3149                 .sh_offset = offsetof(struct ElfImage, str),
3150                 .sh_size = sizeof(img->str),
3151             }
3152         },
3153         .sym = {
3154             [1] = { /* code_gen_buffer */
3155                 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
3156                 .st_shndx = 1,
3157             }
3158         },
3159         .di = {
3160             .len = sizeof(struct DebugInfo) - 4,
3161             .version = 2,
3162             .ptr_size = sizeof(void *),
3163             .cu_die = 1,
3164             .cu_lang = 0x8001,  /* DW_LANG_Mips_Assembler */
3165             .fn_die = 2,
3166             .fn_name = "code_gen_buffer"
3167         },
3168         .da = {
3169             1,          /* abbrev number (the cu) */
3170             0x11, 1,    /* DW_TAG_compile_unit, has children */
3171             0x13, 0x5,  /* DW_AT_language, DW_FORM_data2 */
3172             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
3173             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
3174             0, 0,       /* end of abbrev */
3175             2,          /* abbrev number (the fn) */
3176             0x2e, 0,    /* DW_TAG_subprogram, no children */
3177             0x3, 0x8,   /* DW_AT_name, DW_FORM_string */
3178             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
3179             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
3180             0, 0,       /* end of abbrev */
3181             0           /* no more abbrev */
3182         },
3183         .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
3184                ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
3185     };
3186 
3187     /* We only need a single jit entry; statically allocate it.  */
3188     static struct jit_code_entry one_entry;
3189 
3190     uintptr_t buf = (uintptr_t)buf_ptr;
3191     size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
3192     DebugFrameHeader *dfh;
3193 
3194     img = g_malloc(img_size);
3195     *img = img_template;
3196 
3197     img->phdr.p_vaddr = buf;
3198     img->phdr.p_paddr = buf;
3199     img->phdr.p_memsz = buf_size;
3200 
3201     img->shdr[1].sh_name = find_string(img->str, ".text");
3202     img->shdr[1].sh_addr = buf;
3203     img->shdr[1].sh_size = buf_size;
3204 
3205     img->shdr[2].sh_name = find_string(img->str, ".debug_info");
3206     img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
3207 
3208     img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
3209     img->shdr[4].sh_size = debug_frame_size;
3210 
3211     img->shdr[5].sh_name = find_string(img->str, ".symtab");
3212     img->shdr[6].sh_name = find_string(img->str, ".strtab");
3213 
3214     img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
3215     img->sym[1].st_value = buf;
3216     img->sym[1].st_size = buf_size;
3217 
3218     img->di.cu_low_pc = buf;
3219     img->di.cu_high_pc = buf + buf_size;
3220     img->di.fn_low_pc = buf;
3221     img->di.fn_high_pc = buf + buf_size;
3222 
3223     dfh = (DebugFrameHeader *)(img + 1);
3224     memcpy(dfh, debug_frame, debug_frame_size);
3225     dfh->fde.func_start = buf;
3226     dfh->fde.func_len = buf_size;
3227 
3228 #ifdef DEBUG_JIT
3229     /* Enable this block to be able to debug the ELF image file creation.
3230        One can use readelf, objdump, or other inspection utilities.  */
3231     {
3232         FILE *f = fopen("/tmp/qemu.jit", "w+b");
3233         if (f) {
3234             if (fwrite(img, img_size, 1, f) != img_size) {
3235                 /* Avoid stupid unused return value warning for fwrite.  */
3236             }
3237             fclose(f);
3238         }
3239     }
3240 #endif
3241 
3242     one_entry.symfile_addr = img;
3243     one_entry.symfile_size = img_size;
3244 
3245     __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
3246     __jit_debug_descriptor.relevant_entry = &one_entry;
3247     __jit_debug_descriptor.first_entry = &one_entry;
3248     __jit_debug_register_code();
3249 }
3250 #else
3251 /* No support for the feature.  Provide the entry point expected by exec.c,
3252    and implement the internal function we declared earlier.  */
3253 
3254 static void tcg_register_jit_int(void *buf, size_t size,
3255                                  const void *debug_frame,
3256                                  size_t debug_frame_size)
3257 {
3258 }
3259 
3260 void tcg_register_jit(void *buf, size_t buf_size)
3261 {
3262 }
3263 #endif /* ELF_HOST_MACHINE */
3264