xref: /openbmc/qemu/tcg/tcg.c (revision 9884abee)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
28 
29 #include "qemu/osdep.h"
30 
31 /* Define to jump the ELF file used to communicate with GDB.  */
32 #undef DEBUG_JIT
33 
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
36 #define NDEBUG
37 #endif
38 
39 #include "qemu-common.h"
40 #include "qemu/host-utils.h"
41 #include "qemu/timer.h"
42 
43 /* Note: the long term plan is to reduce the dependencies on the QEMU
44    CPU definitions. Currently they are used for qemu_ld/st
45    instructions */
46 #define NO_CPU_IO_DEFS
47 #include "cpu.h"
48 
49 #include "tcg-op.h"
50 
51 #if UINTPTR_MAX == UINT32_MAX
52 # define ELF_CLASS  ELFCLASS32
53 #else
54 # define ELF_CLASS  ELFCLASS64
55 #endif
56 #ifdef HOST_WORDS_BIGENDIAN
57 # define ELF_DATA   ELFDATA2MSB
58 #else
59 # define ELF_DATA   ELFDATA2LSB
60 #endif
61 
62 #include "elf.h"
63 
64 /* Forward declarations for functions declared in tcg-target.c and used here. */
65 static void tcg_target_init(TCGContext *s);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
68                         intptr_t value, intptr_t addend);
69 
70 /* The CIE and FDE header definitions will be common to all hosts.  */
71 typedef struct {
72     uint32_t len __attribute__((aligned((sizeof(void *)))));
73     uint32_t id;
74     uint8_t version;
75     char augmentation[1];
76     uint8_t code_align;
77     uint8_t data_align;
78     uint8_t return_column;
79 } DebugFrameCIE;
80 
81 typedef struct QEMU_PACKED {
82     uint32_t len __attribute__((aligned((sizeof(void *)))));
83     uint32_t cie_offset;
84     uintptr_t func_start;
85     uintptr_t func_len;
86 } DebugFrameFDEHeader;
87 
88 typedef struct QEMU_PACKED {
89     DebugFrameCIE cie;
90     DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
92 
93 static void tcg_register_jit_int(void *buf, size_t size,
94                                  const void *debug_frame,
95                                  size_t debug_frame_size)
96     __attribute__((unused));
97 
98 /* Forward declarations for functions declared and used in tcg-target.c. */
99 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
100 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
101                        intptr_t arg2);
102 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
103 static void tcg_out_movi(TCGContext *s, TCGType type,
104                          TCGReg ret, tcg_target_long arg);
105 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
106                        const int *const_args);
107 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
108                        intptr_t arg2);
109 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
110 static int tcg_target_const_match(tcg_target_long val, TCGType type,
111                                   const TCGArgConstraint *arg_ct);
112 static void tcg_out_tb_init(TCGContext *s);
113 static void tcg_out_tb_finalize(TCGContext *s);
114 
115 
116 
117 static TCGRegSet tcg_target_available_regs[2];
118 static TCGRegSet tcg_target_call_clobber_regs;
119 
120 #if TCG_TARGET_INSN_UNIT_SIZE == 1
121 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
122 {
123     *s->code_ptr++ = v;
124 }
125 
126 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
127                                                       uint8_t v)
128 {
129     *p = v;
130 }
131 #endif
132 
133 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
134 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
135 {
136     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
137         *s->code_ptr++ = v;
138     } else {
139         tcg_insn_unit *p = s->code_ptr;
140         memcpy(p, &v, sizeof(v));
141         s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
142     }
143 }
144 
145 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
146                                                        uint16_t v)
147 {
148     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
149         *p = v;
150     } else {
151         memcpy(p, &v, sizeof(v));
152     }
153 }
154 #endif
155 
156 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
157 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
158 {
159     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
160         *s->code_ptr++ = v;
161     } else {
162         tcg_insn_unit *p = s->code_ptr;
163         memcpy(p, &v, sizeof(v));
164         s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
165     }
166 }
167 
168 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
169                                                        uint32_t v)
170 {
171     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
172         *p = v;
173     } else {
174         memcpy(p, &v, sizeof(v));
175     }
176 }
177 #endif
178 
179 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
180 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
181 {
182     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
183         *s->code_ptr++ = v;
184     } else {
185         tcg_insn_unit *p = s->code_ptr;
186         memcpy(p, &v, sizeof(v));
187         s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
188     }
189 }
190 
191 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
192                                                        uint64_t v)
193 {
194     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
195         *p = v;
196     } else {
197         memcpy(p, &v, sizeof(v));
198     }
199 }
200 #endif
201 
202 /* label relocation processing */
203 
204 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
205                           TCGLabel *l, intptr_t addend)
206 {
207     TCGRelocation *r;
208 
209     if (l->has_value) {
210         /* FIXME: This may break relocations on RISC targets that
211            modify instruction fields in place.  The caller may not have
212            written the initial value.  */
213         patch_reloc(code_ptr, type, l->u.value, addend);
214     } else {
215         /* add a new relocation entry */
216         r = tcg_malloc(sizeof(TCGRelocation));
217         r->type = type;
218         r->ptr = code_ptr;
219         r->addend = addend;
220         r->next = l->u.first_reloc;
221         l->u.first_reloc = r;
222     }
223 }
224 
225 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
226 {
227     intptr_t value = (intptr_t)ptr;
228     TCGRelocation *r;
229 
230     assert(!l->has_value);
231 
232     for (r = l->u.first_reloc; r != NULL; r = r->next) {
233         patch_reloc(r->ptr, r->type, value, r->addend);
234     }
235 
236     l->has_value = 1;
237     l->u.value_ptr = ptr;
238 }
239 
240 TCGLabel *gen_new_label(void)
241 {
242     TCGContext *s = &tcg_ctx;
243     TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
244 
245     *l = (TCGLabel){
246         .id = s->nb_labels++
247     };
248 
249     return l;
250 }
251 
252 #include "tcg-target.c"
253 
254 /* pool based memory allocation */
255 void *tcg_malloc_internal(TCGContext *s, int size)
256 {
257     TCGPool *p;
258     int pool_size;
259 
260     if (size > TCG_POOL_CHUNK_SIZE) {
261         /* big malloc: insert a new pool (XXX: could optimize) */
262         p = g_malloc(sizeof(TCGPool) + size);
263         p->size = size;
264         p->next = s->pool_first_large;
265         s->pool_first_large = p;
266         return p->data;
267     } else {
268         p = s->pool_current;
269         if (!p) {
270             p = s->pool_first;
271             if (!p)
272                 goto new_pool;
273         } else {
274             if (!p->next) {
275             new_pool:
276                 pool_size = TCG_POOL_CHUNK_SIZE;
277                 p = g_malloc(sizeof(TCGPool) + pool_size);
278                 p->size = pool_size;
279                 p->next = NULL;
280                 if (s->pool_current)
281                     s->pool_current->next = p;
282                 else
283                     s->pool_first = p;
284             } else {
285                 p = p->next;
286             }
287         }
288     }
289     s->pool_current = p;
290     s->pool_cur = p->data + size;
291     s->pool_end = p->data + p->size;
292     return p->data;
293 }
294 
295 void tcg_pool_reset(TCGContext *s)
296 {
297     TCGPool *p, *t;
298     for (p = s->pool_first_large; p; p = t) {
299         t = p->next;
300         g_free(p);
301     }
302     s->pool_first_large = NULL;
303     s->pool_cur = s->pool_end = NULL;
304     s->pool_current = NULL;
305 }
306 
307 typedef struct TCGHelperInfo {
308     void *func;
309     const char *name;
310     unsigned flags;
311     unsigned sizemask;
312 } TCGHelperInfo;
313 
314 #include "exec/helper-proto.h"
315 
316 static const TCGHelperInfo all_helpers[] = {
317 #include "exec/helper-tcg.h"
318 };
319 
320 void tcg_context_init(TCGContext *s)
321 {
322     int op, total_args, n, i;
323     TCGOpDef *def;
324     TCGArgConstraint *args_ct;
325     int *sorted_args;
326     GHashTable *helper_table;
327 
328     memset(s, 0, sizeof(*s));
329     s->nb_globals = 0;
330 
331     /* Count total number of arguments and allocate the corresponding
332        space */
333     total_args = 0;
334     for(op = 0; op < NB_OPS; op++) {
335         def = &tcg_op_defs[op];
336         n = def->nb_iargs + def->nb_oargs;
337         total_args += n;
338     }
339 
340     args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
341     sorted_args = g_malloc(sizeof(int) * total_args);
342 
343     for(op = 0; op < NB_OPS; op++) {
344         def = &tcg_op_defs[op];
345         def->args_ct = args_ct;
346         def->sorted_args = sorted_args;
347         n = def->nb_iargs + def->nb_oargs;
348         sorted_args += n;
349         args_ct += n;
350     }
351 
352     /* Register helpers.  */
353     /* Use g_direct_hash/equal for direct pointer comparisons on func.  */
354     s->helpers = helper_table = g_hash_table_new(NULL, NULL);
355 
356     for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
357         g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
358                             (gpointer)&all_helpers[i]);
359     }
360 
361     tcg_target_init(s);
362 }
363 
364 void tcg_prologue_init(TCGContext *s)
365 {
366     size_t prologue_size, total_size;
367     void *buf0, *buf1;
368 
369     /* Put the prologue at the beginning of code_gen_buffer.  */
370     buf0 = s->code_gen_buffer;
371     s->code_ptr = buf0;
372     s->code_buf = buf0;
373     s->code_gen_prologue = buf0;
374 
375     /* Generate the prologue.  */
376     tcg_target_qemu_prologue(s);
377     buf1 = s->code_ptr;
378     flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
379 
380     /* Deduct the prologue from the buffer.  */
381     prologue_size = tcg_current_code_size(s);
382     s->code_gen_ptr = buf1;
383     s->code_gen_buffer = buf1;
384     s->code_buf = buf1;
385     total_size = s->code_gen_buffer_size - prologue_size;
386     s->code_gen_buffer_size = total_size;
387 
388     /* Compute a high-water mark, at which we voluntarily flush the buffer
389        and start over.  The size here is arbitrary, significantly larger
390        than we expect the code generation for any one opcode to require.  */
391     /* ??? We currently have no good estimate for, or checks in,
392        tcg_out_tb_finalize.  If there are quite a lot of guest memory ops,
393        the number of out-of-line fragments could be quite high.  In the
394        short-term, increase the highwater buffer.  */
395     s->code_gen_highwater = s->code_gen_buffer + (total_size - 64*1024);
396 
397     tcg_register_jit(s->code_gen_buffer, total_size);
398 
399 #ifdef DEBUG_DISAS
400     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
401         qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
402         log_disas(buf0, prologue_size);
403         qemu_log("\n");
404         qemu_log_flush();
405     }
406 #endif
407 }
408 
409 void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
410 {
411     s->frame_start = start;
412     s->frame_end = start + size;
413     s->frame_reg = reg;
414 }
415 
416 void tcg_func_start(TCGContext *s)
417 {
418     tcg_pool_reset(s);
419     s->nb_temps = s->nb_globals;
420 
421     /* No temps have been previously allocated for size or locality.  */
422     memset(s->free_temps, 0, sizeof(s->free_temps));
423 
424     s->nb_labels = 0;
425     s->current_frame_offset = s->frame_start;
426 
427 #ifdef CONFIG_DEBUG_TCG
428     s->goto_tb_issue_mask = 0;
429 #endif
430 
431     s->gen_first_op_idx = 0;
432     s->gen_last_op_idx = -1;
433     s->gen_next_op_idx = 0;
434     s->gen_next_parm_idx = 0;
435 
436     s->be = tcg_malloc(sizeof(TCGBackendData));
437 }
438 
439 static inline void tcg_temp_alloc(TCGContext *s, int n)
440 {
441     if (n > TCG_MAX_TEMPS)
442         tcg_abort();
443 }
444 
445 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
446                                               const char *name)
447 {
448     TCGContext *s = &tcg_ctx;
449     TCGTemp *ts;
450     int idx;
451 
452 #if TCG_TARGET_REG_BITS == 32
453     if (type != TCG_TYPE_I32)
454         tcg_abort();
455 #endif
456     if (tcg_regset_test_reg(s->reserved_regs, reg))
457         tcg_abort();
458     idx = s->nb_globals;
459     tcg_temp_alloc(s, s->nb_globals + 1);
460     ts = &s->temps[s->nb_globals];
461     ts->base_type = type;
462     ts->type = type;
463     ts->fixed_reg = 1;
464     ts->reg = reg;
465     ts->name = name;
466     s->nb_globals++;
467     tcg_regset_set_reg(s->reserved_regs, reg);
468     return idx;
469 }
470 
471 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
472 {
473     int idx;
474 
475     idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
476     return MAKE_TCGV_I32(idx);
477 }
478 
479 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
480 {
481     int idx;
482 
483     idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
484     return MAKE_TCGV_I64(idx);
485 }
486 
487 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
488                                               intptr_t offset,
489                                               const char *name)
490 {
491     TCGContext *s = &tcg_ctx;
492     TCGTemp *ts;
493     int idx;
494 
495     idx = s->nb_globals;
496 #if TCG_TARGET_REG_BITS == 32
497     if (type == TCG_TYPE_I64) {
498         char buf[64];
499         tcg_temp_alloc(s, s->nb_globals + 2);
500         ts = &s->temps[s->nb_globals];
501         ts->base_type = type;
502         ts->type = TCG_TYPE_I32;
503         ts->fixed_reg = 0;
504         ts->mem_allocated = 1;
505         ts->mem_reg = reg;
506 #ifdef HOST_WORDS_BIGENDIAN
507         ts->mem_offset = offset + 4;
508 #else
509         ts->mem_offset = offset;
510 #endif
511         pstrcpy(buf, sizeof(buf), name);
512         pstrcat(buf, sizeof(buf), "_0");
513         ts->name = strdup(buf);
514         ts++;
515 
516         ts->base_type = type;
517         ts->type = TCG_TYPE_I32;
518         ts->fixed_reg = 0;
519         ts->mem_allocated = 1;
520         ts->mem_reg = reg;
521 #ifdef HOST_WORDS_BIGENDIAN
522         ts->mem_offset = offset;
523 #else
524         ts->mem_offset = offset + 4;
525 #endif
526         pstrcpy(buf, sizeof(buf), name);
527         pstrcat(buf, sizeof(buf), "_1");
528         ts->name = strdup(buf);
529 
530         s->nb_globals += 2;
531     } else
532 #endif
533     {
534         tcg_temp_alloc(s, s->nb_globals + 1);
535         ts = &s->temps[s->nb_globals];
536         ts->base_type = type;
537         ts->type = type;
538         ts->fixed_reg = 0;
539         ts->mem_allocated = 1;
540         ts->mem_reg = reg;
541         ts->mem_offset = offset;
542         ts->name = name;
543         s->nb_globals++;
544     }
545     return idx;
546 }
547 
548 TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name)
549 {
550     int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
551     return MAKE_TCGV_I32(idx);
552 }
553 
554 TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name)
555 {
556     int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
557     return MAKE_TCGV_I64(idx);
558 }
559 
560 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
561 {
562     TCGContext *s = &tcg_ctx;
563     TCGTemp *ts;
564     int idx, k;
565 
566     k = type + (temp_local ? TCG_TYPE_COUNT : 0);
567     idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
568     if (idx < TCG_MAX_TEMPS) {
569         /* There is already an available temp with the right type.  */
570         clear_bit(idx, s->free_temps[k].l);
571 
572         ts = &s->temps[idx];
573         ts->temp_allocated = 1;
574         assert(ts->base_type == type);
575         assert(ts->temp_local == temp_local);
576     } else {
577         idx = s->nb_temps;
578 #if TCG_TARGET_REG_BITS == 32
579         if (type == TCG_TYPE_I64) {
580             tcg_temp_alloc(s, s->nb_temps + 2);
581             ts = &s->temps[s->nb_temps];
582             ts->base_type = type;
583             ts->type = TCG_TYPE_I32;
584             ts->temp_allocated = 1;
585             ts->temp_local = temp_local;
586             ts->name = NULL;
587             ts++;
588             ts->base_type = type;
589             ts->type = TCG_TYPE_I32;
590             ts->temp_allocated = 1;
591             ts->temp_local = temp_local;
592             ts->name = NULL;
593             s->nb_temps += 2;
594         } else
595 #endif
596         {
597             tcg_temp_alloc(s, s->nb_temps + 1);
598             ts = &s->temps[s->nb_temps];
599             ts->base_type = type;
600             ts->type = type;
601             ts->temp_allocated = 1;
602             ts->temp_local = temp_local;
603             ts->name = NULL;
604             s->nb_temps++;
605         }
606     }
607 
608 #if defined(CONFIG_DEBUG_TCG)
609     s->temps_in_use++;
610 #endif
611     return idx;
612 }
613 
614 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
615 {
616     int idx;
617 
618     idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
619     return MAKE_TCGV_I32(idx);
620 }
621 
622 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
623 {
624     int idx;
625 
626     idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
627     return MAKE_TCGV_I64(idx);
628 }
629 
630 static void tcg_temp_free_internal(int idx)
631 {
632     TCGContext *s = &tcg_ctx;
633     TCGTemp *ts;
634     int k;
635 
636 #if defined(CONFIG_DEBUG_TCG)
637     s->temps_in_use--;
638     if (s->temps_in_use < 0) {
639         fprintf(stderr, "More temporaries freed than allocated!\n");
640     }
641 #endif
642 
643     assert(idx >= s->nb_globals && idx < s->nb_temps);
644     ts = &s->temps[idx];
645     assert(ts->temp_allocated != 0);
646     ts->temp_allocated = 0;
647 
648     k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
649     set_bit(idx, s->free_temps[k].l);
650 }
651 
652 void tcg_temp_free_i32(TCGv_i32 arg)
653 {
654     tcg_temp_free_internal(GET_TCGV_I32(arg));
655 }
656 
657 void tcg_temp_free_i64(TCGv_i64 arg)
658 {
659     tcg_temp_free_internal(GET_TCGV_I64(arg));
660 }
661 
662 TCGv_i32 tcg_const_i32(int32_t val)
663 {
664     TCGv_i32 t0;
665     t0 = tcg_temp_new_i32();
666     tcg_gen_movi_i32(t0, val);
667     return t0;
668 }
669 
670 TCGv_i64 tcg_const_i64(int64_t val)
671 {
672     TCGv_i64 t0;
673     t0 = tcg_temp_new_i64();
674     tcg_gen_movi_i64(t0, val);
675     return t0;
676 }
677 
678 TCGv_i32 tcg_const_local_i32(int32_t val)
679 {
680     TCGv_i32 t0;
681     t0 = tcg_temp_local_new_i32();
682     tcg_gen_movi_i32(t0, val);
683     return t0;
684 }
685 
686 TCGv_i64 tcg_const_local_i64(int64_t val)
687 {
688     TCGv_i64 t0;
689     t0 = tcg_temp_local_new_i64();
690     tcg_gen_movi_i64(t0, val);
691     return t0;
692 }
693 
694 #if defined(CONFIG_DEBUG_TCG)
695 void tcg_clear_temp_count(void)
696 {
697     TCGContext *s = &tcg_ctx;
698     s->temps_in_use = 0;
699 }
700 
701 int tcg_check_temp_count(void)
702 {
703     TCGContext *s = &tcg_ctx;
704     if (s->temps_in_use) {
705         /* Clear the count so that we don't give another
706          * warning immediately next time around.
707          */
708         s->temps_in_use = 0;
709         return 1;
710     }
711     return 0;
712 }
713 #endif
714 
715 /* Note: we convert the 64 bit args to 32 bit and do some alignment
716    and endian swap. Maybe it would be better to do the alignment
717    and endian swap in tcg_reg_alloc_call(). */
718 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
719                    int nargs, TCGArg *args)
720 {
721     int i, real_args, nb_rets, pi, pi_first;
722     unsigned sizemask, flags;
723     TCGHelperInfo *info;
724 
725     info = g_hash_table_lookup(s->helpers, (gpointer)func);
726     flags = info->flags;
727     sizemask = info->sizemask;
728 
729 #if defined(__sparc__) && !defined(__arch64__) \
730     && !defined(CONFIG_TCG_INTERPRETER)
731     /* We have 64-bit values in one register, but need to pass as two
732        separate parameters.  Split them.  */
733     int orig_sizemask = sizemask;
734     int orig_nargs = nargs;
735     TCGv_i64 retl, reth;
736 
737     TCGV_UNUSED_I64(retl);
738     TCGV_UNUSED_I64(reth);
739     if (sizemask != 0) {
740         TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
741         for (i = real_args = 0; i < nargs; ++i) {
742             int is_64bit = sizemask & (1 << (i+1)*2);
743             if (is_64bit) {
744                 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
745                 TCGv_i32 h = tcg_temp_new_i32();
746                 TCGv_i32 l = tcg_temp_new_i32();
747                 tcg_gen_extr_i64_i32(l, h, orig);
748                 split_args[real_args++] = GET_TCGV_I32(h);
749                 split_args[real_args++] = GET_TCGV_I32(l);
750             } else {
751                 split_args[real_args++] = args[i];
752             }
753         }
754         nargs = real_args;
755         args = split_args;
756         sizemask = 0;
757     }
758 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
759     for (i = 0; i < nargs; ++i) {
760         int is_64bit = sizemask & (1 << (i+1)*2);
761         int is_signed = sizemask & (2 << (i+1)*2);
762         if (!is_64bit) {
763             TCGv_i64 temp = tcg_temp_new_i64();
764             TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
765             if (is_signed) {
766                 tcg_gen_ext32s_i64(temp, orig);
767             } else {
768                 tcg_gen_ext32u_i64(temp, orig);
769             }
770             args[i] = GET_TCGV_I64(temp);
771         }
772     }
773 #endif /* TCG_TARGET_EXTEND_ARGS */
774 
775     pi_first = pi = s->gen_next_parm_idx;
776     if (ret != TCG_CALL_DUMMY_ARG) {
777 #if defined(__sparc__) && !defined(__arch64__) \
778     && !defined(CONFIG_TCG_INTERPRETER)
779         if (orig_sizemask & 1) {
780             /* The 32-bit ABI is going to return the 64-bit value in
781                the %o0/%o1 register pair.  Prepare for this by using
782                two return temporaries, and reassemble below.  */
783             retl = tcg_temp_new_i64();
784             reth = tcg_temp_new_i64();
785             s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
786             s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
787             nb_rets = 2;
788         } else {
789             s->gen_opparam_buf[pi++] = ret;
790             nb_rets = 1;
791         }
792 #else
793         if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
794 #ifdef HOST_WORDS_BIGENDIAN
795             s->gen_opparam_buf[pi++] = ret + 1;
796             s->gen_opparam_buf[pi++] = ret;
797 #else
798             s->gen_opparam_buf[pi++] = ret;
799             s->gen_opparam_buf[pi++] = ret + 1;
800 #endif
801             nb_rets = 2;
802         } else {
803             s->gen_opparam_buf[pi++] = ret;
804             nb_rets = 1;
805         }
806 #endif
807     } else {
808         nb_rets = 0;
809     }
810     real_args = 0;
811     for (i = 0; i < nargs; i++) {
812         int is_64bit = sizemask & (1 << (i+1)*2);
813         if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
814 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
815             /* some targets want aligned 64 bit args */
816             if (real_args & 1) {
817                 s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
818                 real_args++;
819             }
820 #endif
821 	    /* If stack grows up, then we will be placing successive
822 	       arguments at lower addresses, which means we need to
823 	       reverse the order compared to how we would normally
824 	       treat either big or little-endian.  For those arguments
825 	       that will wind up in registers, this still works for
826 	       HPPA (the only current STACK_GROWSUP target) since the
827 	       argument registers are *also* allocated in decreasing
828 	       order.  If another such target is added, this logic may
829 	       have to get more complicated to differentiate between
830 	       stack arguments and register arguments.  */
831 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
832             s->gen_opparam_buf[pi++] = args[i] + 1;
833             s->gen_opparam_buf[pi++] = args[i];
834 #else
835             s->gen_opparam_buf[pi++] = args[i];
836             s->gen_opparam_buf[pi++] = args[i] + 1;
837 #endif
838             real_args += 2;
839             continue;
840         }
841 
842         s->gen_opparam_buf[pi++] = args[i];
843         real_args++;
844     }
845     s->gen_opparam_buf[pi++] = (uintptr_t)func;
846     s->gen_opparam_buf[pi++] = flags;
847 
848     i = s->gen_next_op_idx;
849     tcg_debug_assert(i < OPC_BUF_SIZE);
850     tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
851 
852     /* Set links for sequential allocation during translation.  */
853     s->gen_op_buf[i] = (TCGOp){
854         .opc = INDEX_op_call,
855         .callo = nb_rets,
856         .calli = real_args,
857         .args = pi_first,
858         .prev = i - 1,
859         .next = i + 1
860     };
861 
862     /* Make sure the calli field didn't overflow.  */
863     tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
864 
865     s->gen_last_op_idx = i;
866     s->gen_next_op_idx = i + 1;
867     s->gen_next_parm_idx = pi;
868 
869 #if defined(__sparc__) && !defined(__arch64__) \
870     && !defined(CONFIG_TCG_INTERPRETER)
871     /* Free all of the parts we allocated above.  */
872     for (i = real_args = 0; i < orig_nargs; ++i) {
873         int is_64bit = orig_sizemask & (1 << (i+1)*2);
874         if (is_64bit) {
875             TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
876             TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
877             tcg_temp_free_i32(h);
878             tcg_temp_free_i32(l);
879         } else {
880             real_args++;
881         }
882     }
883     if (orig_sizemask & 1) {
884         /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
885            Note that describing these as TCGv_i64 eliminates an unnecessary
886            zero-extension that tcg_gen_concat_i32_i64 would create.  */
887         tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
888         tcg_temp_free_i64(retl);
889         tcg_temp_free_i64(reth);
890     }
891 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
892     for (i = 0; i < nargs; ++i) {
893         int is_64bit = sizemask & (1 << (i+1)*2);
894         if (!is_64bit) {
895             TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
896             tcg_temp_free_i64(temp);
897         }
898     }
899 #endif /* TCG_TARGET_EXTEND_ARGS */
900 }
901 
902 static void tcg_reg_alloc_start(TCGContext *s)
903 {
904     int i;
905     TCGTemp *ts;
906     for(i = 0; i < s->nb_globals; i++) {
907         ts = &s->temps[i];
908         if (ts->fixed_reg) {
909             ts->val_type = TEMP_VAL_REG;
910         } else {
911             ts->val_type = TEMP_VAL_MEM;
912         }
913     }
914     for(i = s->nb_globals; i < s->nb_temps; i++) {
915         ts = &s->temps[i];
916         if (ts->temp_local) {
917             ts->val_type = TEMP_VAL_MEM;
918         } else {
919             ts->val_type = TEMP_VAL_DEAD;
920         }
921         ts->mem_allocated = 0;
922         ts->fixed_reg = 0;
923     }
924     for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
925         s->reg_to_temp[i] = -1;
926     }
927 }
928 
929 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
930                                  int idx)
931 {
932     TCGTemp *ts;
933 
934     assert(idx >= 0 && idx < s->nb_temps);
935     ts = &s->temps[idx];
936     if (idx < s->nb_globals) {
937         pstrcpy(buf, buf_size, ts->name);
938     } else {
939         if (ts->temp_local)
940             snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
941         else
942             snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
943     }
944     return buf;
945 }
946 
947 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
948 {
949     return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
950 }
951 
952 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
953 {
954     return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
955 }
956 
957 /* Find helper name.  */
958 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
959 {
960     const char *ret = NULL;
961     if (s->helpers) {
962         TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
963         if (info) {
964             ret = info->name;
965         }
966     }
967     return ret;
968 }
969 
970 static const char * const cond_name[] =
971 {
972     [TCG_COND_NEVER] = "never",
973     [TCG_COND_ALWAYS] = "always",
974     [TCG_COND_EQ] = "eq",
975     [TCG_COND_NE] = "ne",
976     [TCG_COND_LT] = "lt",
977     [TCG_COND_GE] = "ge",
978     [TCG_COND_LE] = "le",
979     [TCG_COND_GT] = "gt",
980     [TCG_COND_LTU] = "ltu",
981     [TCG_COND_GEU] = "geu",
982     [TCG_COND_LEU] = "leu",
983     [TCG_COND_GTU] = "gtu"
984 };
985 
986 static const char * const ldst_name[] =
987 {
988     [MO_UB]   = "ub",
989     [MO_SB]   = "sb",
990     [MO_LEUW] = "leuw",
991     [MO_LESW] = "lesw",
992     [MO_LEUL] = "leul",
993     [MO_LESL] = "lesl",
994     [MO_LEQ]  = "leq",
995     [MO_BEUW] = "beuw",
996     [MO_BESW] = "besw",
997     [MO_BEUL] = "beul",
998     [MO_BESL] = "besl",
999     [MO_BEQ]  = "beq",
1000 };
1001 
1002 void tcg_dump_ops(TCGContext *s)
1003 {
1004     char buf[128];
1005     TCGOp *op;
1006     int oi;
1007 
1008     for (oi = s->gen_first_op_idx; oi >= 0; oi = op->next) {
1009         int i, k, nb_oargs, nb_iargs, nb_cargs;
1010         const TCGOpDef *def;
1011         const TCGArg *args;
1012         TCGOpcode c;
1013 
1014         op = &s->gen_op_buf[oi];
1015         c = op->opc;
1016         def = &tcg_op_defs[c];
1017         args = &s->gen_opparam_buf[op->args];
1018 
1019         if (c == INDEX_op_insn_start) {
1020             qemu_log("%s ----", oi != s->gen_first_op_idx ? "\n" : "");
1021 
1022             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1023                 target_ulong a;
1024 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1025                 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1026 #else
1027                 a = args[i];
1028 #endif
1029                 qemu_log(" " TARGET_FMT_lx, a);
1030             }
1031         } else if (c == INDEX_op_call) {
1032             /* variable number of arguments */
1033             nb_oargs = op->callo;
1034             nb_iargs = op->calli;
1035             nb_cargs = def->nb_cargs;
1036 
1037             /* function name, flags, out args */
1038             qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1039                      tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1040                      args[nb_oargs + nb_iargs + 1], nb_oargs);
1041             for (i = 0; i < nb_oargs; i++) {
1042                 qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1043                                                    args[i]));
1044             }
1045             for (i = 0; i < nb_iargs; i++) {
1046                 TCGArg arg = args[nb_oargs + i];
1047                 const char *t = "<dummy>";
1048                 if (arg != TCG_CALL_DUMMY_ARG) {
1049                     t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1050                 }
1051                 qemu_log(",%s", t);
1052             }
1053         } else {
1054             qemu_log(" %s ", def->name);
1055 
1056             nb_oargs = def->nb_oargs;
1057             nb_iargs = def->nb_iargs;
1058             nb_cargs = def->nb_cargs;
1059 
1060             k = 0;
1061             for (i = 0; i < nb_oargs; i++) {
1062                 if (k != 0) {
1063                     qemu_log(",");
1064                 }
1065                 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1066                                                    args[k++]));
1067             }
1068             for (i = 0; i < nb_iargs; i++) {
1069                 if (k != 0) {
1070                     qemu_log(",");
1071                 }
1072                 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1073                                                    args[k++]));
1074             }
1075             switch (c) {
1076             case INDEX_op_brcond_i32:
1077             case INDEX_op_setcond_i32:
1078             case INDEX_op_movcond_i32:
1079             case INDEX_op_brcond2_i32:
1080             case INDEX_op_setcond2_i32:
1081             case INDEX_op_brcond_i64:
1082             case INDEX_op_setcond_i64:
1083             case INDEX_op_movcond_i64:
1084                 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1085                     qemu_log(",%s", cond_name[args[k++]]);
1086                 } else {
1087                     qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1088                 }
1089                 i = 1;
1090                 break;
1091             case INDEX_op_qemu_ld_i32:
1092             case INDEX_op_qemu_st_i32:
1093             case INDEX_op_qemu_ld_i64:
1094             case INDEX_op_qemu_st_i64:
1095                 {
1096                     TCGMemOpIdx oi = args[k++];
1097                     TCGMemOp op = get_memop(oi);
1098                     unsigned ix = get_mmuidx(oi);
1099 
1100                     if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1101                         qemu_log(",$0x%x,%u", op, ix);
1102                     } else {
1103                         const char *s_al = "", *s_op;
1104                         if (op & MO_AMASK) {
1105                             if ((op & MO_AMASK) == MO_ALIGN) {
1106                                 s_al = "al+";
1107                             } else {
1108                                 s_al = "un+";
1109                             }
1110                         }
1111                         s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1112                         qemu_log(",%s%s,%u", s_al, s_op, ix);
1113                     }
1114                     i = 1;
1115                 }
1116                 break;
1117             default:
1118                 i = 0;
1119                 break;
1120             }
1121             switch (c) {
1122             case INDEX_op_set_label:
1123             case INDEX_op_br:
1124             case INDEX_op_brcond_i32:
1125             case INDEX_op_brcond_i64:
1126             case INDEX_op_brcond2_i32:
1127                 qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1128                 i++, k++;
1129                 break;
1130             default:
1131                 break;
1132             }
1133             for (; i < nb_cargs; i++, k++) {
1134                 qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1135             }
1136         }
1137         qemu_log("\n");
1138     }
1139 }
1140 
1141 /* we give more priority to constraints with less registers */
1142 static int get_constraint_priority(const TCGOpDef *def, int k)
1143 {
1144     const TCGArgConstraint *arg_ct;
1145 
1146     int i, n;
1147     arg_ct = &def->args_ct[k];
1148     if (arg_ct->ct & TCG_CT_ALIAS) {
1149         /* an alias is equivalent to a single register */
1150         n = 1;
1151     } else {
1152         if (!(arg_ct->ct & TCG_CT_REG))
1153             return 0;
1154         n = 0;
1155         for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1156             if (tcg_regset_test_reg(arg_ct->u.regs, i))
1157                 n++;
1158         }
1159     }
1160     return TCG_TARGET_NB_REGS - n + 1;
1161 }
1162 
1163 /* sort from highest priority to lowest */
1164 static void sort_constraints(TCGOpDef *def, int start, int n)
1165 {
1166     int i, j, p1, p2, tmp;
1167 
1168     for(i = 0; i < n; i++)
1169         def->sorted_args[start + i] = start + i;
1170     if (n <= 1)
1171         return;
1172     for(i = 0; i < n - 1; i++) {
1173         for(j = i + 1; j < n; j++) {
1174             p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1175             p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1176             if (p1 < p2) {
1177                 tmp = def->sorted_args[start + i];
1178                 def->sorted_args[start + i] = def->sorted_args[start + j];
1179                 def->sorted_args[start + j] = tmp;
1180             }
1181         }
1182     }
1183 }
1184 
1185 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1186 {
1187     TCGOpcode op;
1188     TCGOpDef *def;
1189     const char *ct_str;
1190     int i, nb_args;
1191 
1192     for(;;) {
1193         if (tdefs->op == (TCGOpcode)-1)
1194             break;
1195         op = tdefs->op;
1196         assert((unsigned)op < NB_OPS);
1197         def = &tcg_op_defs[op];
1198 #if defined(CONFIG_DEBUG_TCG)
1199         /* Duplicate entry in op definitions? */
1200         assert(!def->used);
1201         def->used = 1;
1202 #endif
1203         nb_args = def->nb_iargs + def->nb_oargs;
1204         for(i = 0; i < nb_args; i++) {
1205             ct_str = tdefs->args_ct_str[i];
1206             /* Incomplete TCGTargetOpDef entry? */
1207             assert(ct_str != NULL);
1208             tcg_regset_clear(def->args_ct[i].u.regs);
1209             def->args_ct[i].ct = 0;
1210             if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1211                 int oarg;
1212                 oarg = ct_str[0] - '0';
1213                 assert(oarg < def->nb_oargs);
1214                 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1215                 /* TCG_CT_ALIAS is for the output arguments. The input
1216                    argument is tagged with TCG_CT_IALIAS. */
1217                 def->args_ct[i] = def->args_ct[oarg];
1218                 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1219                 def->args_ct[oarg].alias_index = i;
1220                 def->args_ct[i].ct |= TCG_CT_IALIAS;
1221                 def->args_ct[i].alias_index = oarg;
1222             } else {
1223                 for(;;) {
1224                     if (*ct_str == '\0')
1225                         break;
1226                     switch(*ct_str) {
1227                     case 'i':
1228                         def->args_ct[i].ct |= TCG_CT_CONST;
1229                         ct_str++;
1230                         break;
1231                     default:
1232                         if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1233                             fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1234                                     ct_str, i, def->name);
1235                             exit(1);
1236                         }
1237                     }
1238                 }
1239             }
1240         }
1241 
1242         /* TCGTargetOpDef entry with too much information? */
1243         assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1244 
1245         /* sort the constraints (XXX: this is just an heuristic) */
1246         sort_constraints(def, 0, def->nb_oargs);
1247         sort_constraints(def, def->nb_oargs, def->nb_iargs);
1248 
1249 #if 0
1250         {
1251             int i;
1252 
1253             printf("%s: sorted=", def->name);
1254             for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1255                 printf(" %d", def->sorted_args[i]);
1256             printf("\n");
1257         }
1258 #endif
1259         tdefs++;
1260     }
1261 
1262 #if defined(CONFIG_DEBUG_TCG)
1263     i = 0;
1264     for (op = 0; op < tcg_op_defs_max; op++) {
1265         const TCGOpDef *def = &tcg_op_defs[op];
1266         if (def->flags & TCG_OPF_NOT_PRESENT) {
1267             /* Wrong entry in op definitions? */
1268             if (def->used) {
1269                 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1270                 i = 1;
1271             }
1272         } else {
1273             /* Missing entry in op definitions? */
1274             if (!def->used) {
1275                 fprintf(stderr, "Missing op definition for %s\n", def->name);
1276                 i = 1;
1277             }
1278         }
1279     }
1280     if (i == 1) {
1281         tcg_abort();
1282     }
1283 #endif
1284 }
1285 
1286 void tcg_op_remove(TCGContext *s, TCGOp *op)
1287 {
1288     int next = op->next;
1289     int prev = op->prev;
1290 
1291     if (next >= 0) {
1292         s->gen_op_buf[next].prev = prev;
1293     } else {
1294         s->gen_last_op_idx = prev;
1295     }
1296     if (prev >= 0) {
1297         s->gen_op_buf[prev].next = next;
1298     } else {
1299         s->gen_first_op_idx = next;
1300     }
1301 
1302     memset(op, -1, sizeof(*op));
1303 
1304 #ifdef CONFIG_PROFILER
1305     s->del_op_count++;
1306 #endif
1307 }
1308 
1309 #ifdef USE_LIVENESS_ANALYSIS
1310 /* liveness analysis: end of function: all temps are dead, and globals
1311    should be in memory. */
1312 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
1313                                    uint8_t *mem_temps)
1314 {
1315     memset(dead_temps, 1, s->nb_temps);
1316     memset(mem_temps, 1, s->nb_globals);
1317     memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals);
1318 }
1319 
1320 /* liveness analysis: end of basic block: all temps are dead, globals
1321    and local temps should be in memory. */
1322 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
1323                                  uint8_t *mem_temps)
1324 {
1325     int i;
1326 
1327     memset(dead_temps, 1, s->nb_temps);
1328     memset(mem_temps, 1, s->nb_globals);
1329     for(i = s->nb_globals; i < s->nb_temps; i++) {
1330         mem_temps[i] = s->temps[i].temp_local;
1331     }
1332 }
1333 
1334 /* Liveness analysis : update the opc_dead_args array to tell if a
1335    given input arguments is dead. Instructions updating dead
1336    temporaries are removed. */
1337 static void tcg_liveness_analysis(TCGContext *s)
1338 {
1339     uint8_t *dead_temps, *mem_temps;
1340     int oi, oi_prev, nb_ops;
1341 
1342     nb_ops = s->gen_next_op_idx;
1343     s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1344     s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1345 
1346     dead_temps = tcg_malloc(s->nb_temps);
1347     mem_temps = tcg_malloc(s->nb_temps);
1348     tcg_la_func_end(s, dead_temps, mem_temps);
1349 
1350     for (oi = s->gen_last_op_idx; oi >= 0; oi = oi_prev) {
1351         int i, nb_iargs, nb_oargs;
1352         TCGOpcode opc_new, opc_new2;
1353         bool have_opc_new2;
1354         uint16_t dead_args;
1355         uint8_t sync_args;
1356         TCGArg arg;
1357 
1358         TCGOp * const op = &s->gen_op_buf[oi];
1359         TCGArg * const args = &s->gen_opparam_buf[op->args];
1360         TCGOpcode opc = op->opc;
1361         const TCGOpDef *def = &tcg_op_defs[opc];
1362 
1363         oi_prev = op->prev;
1364 
1365         switch (opc) {
1366         case INDEX_op_call:
1367             {
1368                 int call_flags;
1369 
1370                 nb_oargs = op->callo;
1371                 nb_iargs = op->calli;
1372                 call_flags = args[nb_oargs + nb_iargs + 1];
1373 
1374                 /* pure functions can be removed if their result is unused */
1375                 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1376                     for (i = 0; i < nb_oargs; i++) {
1377                         arg = args[i];
1378                         if (!dead_temps[arg] || mem_temps[arg]) {
1379                             goto do_not_remove_call;
1380                         }
1381                     }
1382                     goto do_remove;
1383                 } else {
1384                 do_not_remove_call:
1385 
1386                     /* output args are dead */
1387                     dead_args = 0;
1388                     sync_args = 0;
1389                     for (i = 0; i < nb_oargs; i++) {
1390                         arg = args[i];
1391                         if (dead_temps[arg]) {
1392                             dead_args |= (1 << i);
1393                         }
1394                         if (mem_temps[arg]) {
1395                             sync_args |= (1 << i);
1396                         }
1397                         dead_temps[arg] = 1;
1398                         mem_temps[arg] = 0;
1399                     }
1400 
1401                     if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1402                         /* globals should be synced to memory */
1403                         memset(mem_temps, 1, s->nb_globals);
1404                     }
1405                     if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1406                                         TCG_CALL_NO_READ_GLOBALS))) {
1407                         /* globals should go back to memory */
1408                         memset(dead_temps, 1, s->nb_globals);
1409                     }
1410 
1411                     /* record arguments that die in this helper */
1412                     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1413                         arg = args[i];
1414                         if (arg != TCG_CALL_DUMMY_ARG) {
1415                             if (dead_temps[arg]) {
1416                                 dead_args |= (1 << i);
1417                             }
1418                         }
1419                     }
1420                     /* input arguments are live for preceding opcodes */
1421                     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1422                         arg = args[i];
1423                         dead_temps[arg] = 0;
1424                     }
1425                     s->op_dead_args[oi] = dead_args;
1426                     s->op_sync_args[oi] = sync_args;
1427                 }
1428             }
1429             break;
1430         case INDEX_op_insn_start:
1431             break;
1432         case INDEX_op_discard:
1433             /* mark the temporary as dead */
1434             dead_temps[args[0]] = 1;
1435             mem_temps[args[0]] = 0;
1436             break;
1437 
1438         case INDEX_op_add2_i32:
1439             opc_new = INDEX_op_add_i32;
1440             goto do_addsub2;
1441         case INDEX_op_sub2_i32:
1442             opc_new = INDEX_op_sub_i32;
1443             goto do_addsub2;
1444         case INDEX_op_add2_i64:
1445             opc_new = INDEX_op_add_i64;
1446             goto do_addsub2;
1447         case INDEX_op_sub2_i64:
1448             opc_new = INDEX_op_sub_i64;
1449         do_addsub2:
1450             nb_iargs = 4;
1451             nb_oargs = 2;
1452             /* Test if the high part of the operation is dead, but not
1453                the low part.  The result can be optimized to a simple
1454                add or sub.  This happens often for x86_64 guest when the
1455                cpu mode is set to 32 bit.  */
1456             if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1457                 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1458                     goto do_remove;
1459                 }
1460                 /* Replace the opcode and adjust the args in place,
1461                    leaving 3 unused args at the end.  */
1462                 op->opc = opc = opc_new;
1463                 args[1] = args[2];
1464                 args[2] = args[4];
1465                 /* Fall through and mark the single-word operation live.  */
1466                 nb_iargs = 2;
1467                 nb_oargs = 1;
1468             }
1469             goto do_not_remove;
1470 
1471         case INDEX_op_mulu2_i32:
1472             opc_new = INDEX_op_mul_i32;
1473             opc_new2 = INDEX_op_muluh_i32;
1474             have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1475             goto do_mul2;
1476         case INDEX_op_muls2_i32:
1477             opc_new = INDEX_op_mul_i32;
1478             opc_new2 = INDEX_op_mulsh_i32;
1479             have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1480             goto do_mul2;
1481         case INDEX_op_mulu2_i64:
1482             opc_new = INDEX_op_mul_i64;
1483             opc_new2 = INDEX_op_muluh_i64;
1484             have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1485             goto do_mul2;
1486         case INDEX_op_muls2_i64:
1487             opc_new = INDEX_op_mul_i64;
1488             opc_new2 = INDEX_op_mulsh_i64;
1489             have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1490             goto do_mul2;
1491         do_mul2:
1492             nb_iargs = 2;
1493             nb_oargs = 2;
1494             if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1495                 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1496                     /* Both parts of the operation are dead.  */
1497                     goto do_remove;
1498                 }
1499                 /* The high part of the operation is dead; generate the low. */
1500                 op->opc = opc = opc_new;
1501                 args[1] = args[2];
1502                 args[2] = args[3];
1503             } else if (have_opc_new2 && dead_temps[args[0]]
1504                        && !mem_temps[args[0]]) {
1505                 /* The low part of the operation is dead; generate the high. */
1506                 op->opc = opc = opc_new2;
1507                 args[0] = args[1];
1508                 args[1] = args[2];
1509                 args[2] = args[3];
1510             } else {
1511                 goto do_not_remove;
1512             }
1513             /* Mark the single-word operation live.  */
1514             nb_oargs = 1;
1515             goto do_not_remove;
1516 
1517         default:
1518             /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1519             nb_iargs = def->nb_iargs;
1520             nb_oargs = def->nb_oargs;
1521 
1522             /* Test if the operation can be removed because all
1523                its outputs are dead. We assume that nb_oargs == 0
1524                implies side effects */
1525             if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1526                 for (i = 0; i < nb_oargs; i++) {
1527                     arg = args[i];
1528                     if (!dead_temps[arg] || mem_temps[arg]) {
1529                         goto do_not_remove;
1530                     }
1531                 }
1532             do_remove:
1533                 tcg_op_remove(s, op);
1534             } else {
1535             do_not_remove:
1536                 /* output args are dead */
1537                 dead_args = 0;
1538                 sync_args = 0;
1539                 for (i = 0; i < nb_oargs; i++) {
1540                     arg = args[i];
1541                     if (dead_temps[arg]) {
1542                         dead_args |= (1 << i);
1543                     }
1544                     if (mem_temps[arg]) {
1545                         sync_args |= (1 << i);
1546                     }
1547                     dead_temps[arg] = 1;
1548                     mem_temps[arg] = 0;
1549                 }
1550 
1551                 /* if end of basic block, update */
1552                 if (def->flags & TCG_OPF_BB_END) {
1553                     tcg_la_bb_end(s, dead_temps, mem_temps);
1554                 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1555                     /* globals should be synced to memory */
1556                     memset(mem_temps, 1, s->nb_globals);
1557                 }
1558 
1559                 /* record arguments that die in this opcode */
1560                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1561                     arg = args[i];
1562                     if (dead_temps[arg]) {
1563                         dead_args |= (1 << i);
1564                     }
1565                 }
1566                 /* input arguments are live for preceding opcodes */
1567                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1568                     arg = args[i];
1569                     dead_temps[arg] = 0;
1570                 }
1571                 s->op_dead_args[oi] = dead_args;
1572                 s->op_sync_args[oi] = sync_args;
1573             }
1574             break;
1575         }
1576     }
1577 }
1578 #else
1579 /* dummy liveness analysis */
1580 static void tcg_liveness_analysis(TCGContext *s)
1581 {
1582     int nb_ops;
1583     nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
1584 
1585     s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1586     memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1587     s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1588     memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1589 }
1590 #endif
1591 
1592 #ifndef NDEBUG
1593 static void dump_regs(TCGContext *s)
1594 {
1595     TCGTemp *ts;
1596     int i;
1597     char buf[64];
1598 
1599     for(i = 0; i < s->nb_temps; i++) {
1600         ts = &s->temps[i];
1601         printf("  %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1602         switch(ts->val_type) {
1603         case TEMP_VAL_REG:
1604             printf("%s", tcg_target_reg_names[ts->reg]);
1605             break;
1606         case TEMP_VAL_MEM:
1607             printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1608             break;
1609         case TEMP_VAL_CONST:
1610             printf("$0x%" TCG_PRIlx, ts->val);
1611             break;
1612         case TEMP_VAL_DEAD:
1613             printf("D");
1614             break;
1615         default:
1616             printf("???");
1617             break;
1618         }
1619         printf("\n");
1620     }
1621 
1622     for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1623         if (s->reg_to_temp[i] >= 0) {
1624             printf("%s: %s\n",
1625                    tcg_target_reg_names[i],
1626                    tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1627         }
1628     }
1629 }
1630 
1631 static void check_regs(TCGContext *s)
1632 {
1633     int reg, k;
1634     TCGTemp *ts;
1635     char buf[64];
1636 
1637     for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1638         k = s->reg_to_temp[reg];
1639         if (k >= 0) {
1640             ts = &s->temps[k];
1641             if (ts->val_type != TEMP_VAL_REG ||
1642                 ts->reg != reg) {
1643                 printf("Inconsistency for register %s:\n",
1644                        tcg_target_reg_names[reg]);
1645                 goto fail;
1646             }
1647         }
1648     }
1649     for(k = 0; k < s->nb_temps; k++) {
1650         ts = &s->temps[k];
1651         if (ts->val_type == TEMP_VAL_REG &&
1652             !ts->fixed_reg &&
1653             s->reg_to_temp[ts->reg] != k) {
1654                 printf("Inconsistency for temp %s:\n",
1655                        tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1656         fail:
1657                 printf("reg state:\n");
1658                 dump_regs(s);
1659                 tcg_abort();
1660         }
1661     }
1662 }
1663 #endif
1664 
1665 static void temp_allocate_frame(TCGContext *s, int temp)
1666 {
1667     TCGTemp *ts;
1668     ts = &s->temps[temp];
1669 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1670     /* Sparc64 stack is accessed with offset of 2047 */
1671     s->current_frame_offset = (s->current_frame_offset +
1672                                (tcg_target_long)sizeof(tcg_target_long) - 1) &
1673         ~(sizeof(tcg_target_long) - 1);
1674 #endif
1675     if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1676         s->frame_end) {
1677         tcg_abort();
1678     }
1679     ts->mem_offset = s->current_frame_offset;
1680     ts->mem_reg = s->frame_reg;
1681     ts->mem_allocated = 1;
1682     s->current_frame_offset += sizeof(tcg_target_long);
1683 }
1684 
1685 /* sync register 'reg' by saving it to the corresponding temporary */
1686 static inline void tcg_reg_sync(TCGContext *s, int reg)
1687 {
1688     TCGTemp *ts;
1689     int temp;
1690 
1691     temp = s->reg_to_temp[reg];
1692     ts = &s->temps[temp];
1693     assert(ts->val_type == TEMP_VAL_REG);
1694     if (!ts->mem_coherent && !ts->fixed_reg) {
1695         if (!ts->mem_allocated) {
1696             temp_allocate_frame(s, temp);
1697         }
1698         tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1699     }
1700     ts->mem_coherent = 1;
1701 }
1702 
1703 /* free register 'reg' by spilling the corresponding temporary if necessary */
1704 static void tcg_reg_free(TCGContext *s, int reg)
1705 {
1706     int temp;
1707 
1708     temp = s->reg_to_temp[reg];
1709     if (temp != -1) {
1710         tcg_reg_sync(s, reg);
1711         s->temps[temp].val_type = TEMP_VAL_MEM;
1712         s->reg_to_temp[reg] = -1;
1713     }
1714 }
1715 
1716 /* Allocate a register belonging to reg1 & ~reg2 */
1717 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1718 {
1719     int i, reg;
1720     TCGRegSet reg_ct;
1721 
1722     tcg_regset_andnot(reg_ct, reg1, reg2);
1723 
1724     /* first try free registers */
1725     for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1726         reg = tcg_target_reg_alloc_order[i];
1727         if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1728             return reg;
1729     }
1730 
1731     /* XXX: do better spill choice */
1732     for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1733         reg = tcg_target_reg_alloc_order[i];
1734         if (tcg_regset_test_reg(reg_ct, reg)) {
1735             tcg_reg_free(s, reg);
1736             return reg;
1737         }
1738     }
1739 
1740     tcg_abort();
1741 }
1742 
1743 /* mark a temporary as dead. */
1744 static inline void temp_dead(TCGContext *s, int temp)
1745 {
1746     TCGTemp *ts;
1747 
1748     ts = &s->temps[temp];
1749     if (!ts->fixed_reg) {
1750         if (ts->val_type == TEMP_VAL_REG) {
1751             s->reg_to_temp[ts->reg] = -1;
1752         }
1753         if (temp < s->nb_globals || ts->temp_local) {
1754             ts->val_type = TEMP_VAL_MEM;
1755         } else {
1756             ts->val_type = TEMP_VAL_DEAD;
1757         }
1758     }
1759 }
1760 
1761 /* sync a temporary to memory. 'allocated_regs' is used in case a
1762    temporary registers needs to be allocated to store a constant. */
1763 static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs)
1764 {
1765     TCGTemp *ts;
1766 
1767     ts = &s->temps[temp];
1768     if (!ts->fixed_reg) {
1769         switch(ts->val_type) {
1770         case TEMP_VAL_CONST:
1771             ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1772                                     allocated_regs);
1773             ts->val_type = TEMP_VAL_REG;
1774             s->reg_to_temp[ts->reg] = temp;
1775             ts->mem_coherent = 0;
1776             tcg_out_movi(s, ts->type, ts->reg, ts->val);
1777             /* fallthrough*/
1778         case TEMP_VAL_REG:
1779             tcg_reg_sync(s, ts->reg);
1780             break;
1781         case TEMP_VAL_DEAD:
1782         case TEMP_VAL_MEM:
1783             break;
1784         default:
1785             tcg_abort();
1786         }
1787     }
1788 }
1789 
1790 /* save a temporary to memory. 'allocated_regs' is used in case a
1791    temporary registers needs to be allocated to store a constant. */
1792 static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1793 {
1794 #ifdef USE_LIVENESS_ANALYSIS
1795     /* The liveness analysis already ensures that globals are back
1796        in memory. Keep an assert for safety. */
1797     assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg);
1798 #else
1799     temp_sync(s, temp, allocated_regs);
1800     temp_dead(s, temp);
1801 #endif
1802 }
1803 
1804 /* save globals to their canonical location and assume they can be
1805    modified be the following code. 'allocated_regs' is used in case a
1806    temporary registers needs to be allocated to store a constant. */
1807 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1808 {
1809     int i;
1810 
1811     for(i = 0; i < s->nb_globals; i++) {
1812         temp_save(s, i, allocated_regs);
1813     }
1814 }
1815 
1816 /* sync globals to their canonical location and assume they can be
1817    read by the following code. 'allocated_regs' is used in case a
1818    temporary registers needs to be allocated to store a constant. */
1819 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
1820 {
1821     int i;
1822 
1823     for (i = 0; i < s->nb_globals; i++) {
1824 #ifdef USE_LIVENESS_ANALYSIS
1825         assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg ||
1826                s->temps[i].mem_coherent);
1827 #else
1828         temp_sync(s, i, allocated_regs);
1829 #endif
1830     }
1831 }
1832 
1833 /* at the end of a basic block, we assume all temporaries are dead and
1834    all globals are stored at their canonical location. */
1835 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1836 {
1837     TCGTemp *ts;
1838     int i;
1839 
1840     for(i = s->nb_globals; i < s->nb_temps; i++) {
1841         ts = &s->temps[i];
1842         if (ts->temp_local) {
1843             temp_save(s, i, allocated_regs);
1844         } else {
1845 #ifdef USE_LIVENESS_ANALYSIS
1846             /* The liveness analysis already ensures that temps are dead.
1847                Keep an assert for safety. */
1848             assert(ts->val_type == TEMP_VAL_DEAD);
1849 #else
1850             temp_dead(s, i);
1851 #endif
1852         }
1853     }
1854 
1855     save_globals(s, allocated_regs);
1856 }
1857 
1858 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1859 #define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1)
1860 
1861 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1862                                uint16_t dead_args, uint8_t sync_args)
1863 {
1864     TCGTemp *ots;
1865     tcg_target_ulong val;
1866 
1867     ots = &s->temps[args[0]];
1868     val = args[1];
1869 
1870     if (ots->fixed_reg) {
1871         /* for fixed registers, we do not do any constant
1872            propagation */
1873         tcg_out_movi(s, ots->type, ots->reg, val);
1874     } else {
1875         /* The movi is not explicitly generated here */
1876         if (ots->val_type == TEMP_VAL_REG)
1877             s->reg_to_temp[ots->reg] = -1;
1878         ots->val_type = TEMP_VAL_CONST;
1879         ots->val = val;
1880     }
1881     if (NEED_SYNC_ARG(0)) {
1882         temp_sync(s, args[0], s->reserved_regs);
1883     }
1884     if (IS_DEAD_ARG(0)) {
1885         temp_dead(s, args[0]);
1886     }
1887 }
1888 
1889 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1890                               const TCGArg *args, uint16_t dead_args,
1891                               uint8_t sync_args)
1892 {
1893     TCGRegSet allocated_regs;
1894     TCGTemp *ts, *ots;
1895     TCGType otype, itype;
1896 
1897     tcg_regset_set(allocated_regs, s->reserved_regs);
1898     ots = &s->temps[args[0]];
1899     ts = &s->temps[args[1]];
1900 
1901     /* Note that otype != itype for no-op truncation.  */
1902     otype = ots->type;
1903     itype = ts->type;
1904 
1905     /* If the source value is not in a register, and we're going to be
1906        forced to have it in a register in order to perform the copy,
1907        then copy the SOURCE value into its own register first.  That way
1908        we don't have to reload SOURCE the next time it is used. */
1909     if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
1910         || ts->val_type == TEMP_VAL_MEM) {
1911         ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[itype],
1912                                 allocated_regs);
1913         if (ts->val_type == TEMP_VAL_MEM) {
1914             tcg_out_ld(s, itype, ts->reg, ts->mem_reg, ts->mem_offset);
1915             ts->mem_coherent = 1;
1916         } else if (ts->val_type == TEMP_VAL_CONST) {
1917             tcg_out_movi(s, itype, ts->reg, ts->val);
1918             ts->mem_coherent = 0;
1919         }
1920         s->reg_to_temp[ts->reg] = args[1];
1921         ts->val_type = TEMP_VAL_REG;
1922     }
1923 
1924     if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
1925         /* mov to a non-saved dead register makes no sense (even with
1926            liveness analysis disabled). */
1927         assert(NEED_SYNC_ARG(0));
1928         /* The code above should have moved the temp to a register. */
1929         assert(ts->val_type == TEMP_VAL_REG);
1930         if (!ots->mem_allocated) {
1931             temp_allocate_frame(s, args[0]);
1932         }
1933         tcg_out_st(s, otype, ts->reg, ots->mem_reg, ots->mem_offset);
1934         if (IS_DEAD_ARG(1)) {
1935             temp_dead(s, args[1]);
1936         }
1937         temp_dead(s, args[0]);
1938     } else if (ts->val_type == TEMP_VAL_CONST) {
1939         /* propagate constant */
1940         if (ots->val_type == TEMP_VAL_REG) {
1941             s->reg_to_temp[ots->reg] = -1;
1942         }
1943         ots->val_type = TEMP_VAL_CONST;
1944         ots->val = ts->val;
1945         if (IS_DEAD_ARG(1)) {
1946             temp_dead(s, args[1]);
1947         }
1948     } else {
1949         /* The code in the first if block should have moved the
1950            temp to a register. */
1951         assert(ts->val_type == TEMP_VAL_REG);
1952         if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1953             /* the mov can be suppressed */
1954             if (ots->val_type == TEMP_VAL_REG) {
1955                 s->reg_to_temp[ots->reg] = -1;
1956             }
1957             ots->reg = ts->reg;
1958             temp_dead(s, args[1]);
1959         } else {
1960             if (ots->val_type != TEMP_VAL_REG) {
1961                 /* When allocating a new register, make sure to not spill the
1962                    input one. */
1963                 tcg_regset_set_reg(allocated_regs, ts->reg);
1964                 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
1965                                          allocated_regs);
1966             }
1967             tcg_out_mov(s, otype, ots->reg, ts->reg);
1968         }
1969         ots->val_type = TEMP_VAL_REG;
1970         ots->mem_coherent = 0;
1971         s->reg_to_temp[ots->reg] = args[0];
1972         if (NEED_SYNC_ARG(0)) {
1973             tcg_reg_sync(s, ots->reg);
1974         }
1975     }
1976 }
1977 
1978 static void tcg_reg_alloc_op(TCGContext *s,
1979                              const TCGOpDef *def, TCGOpcode opc,
1980                              const TCGArg *args, uint16_t dead_args,
1981                              uint8_t sync_args)
1982 {
1983     TCGRegSet allocated_regs;
1984     int i, k, nb_iargs, nb_oargs, reg;
1985     TCGArg arg;
1986     const TCGArgConstraint *arg_ct;
1987     TCGTemp *ts;
1988     TCGArg new_args[TCG_MAX_OP_ARGS];
1989     int const_args[TCG_MAX_OP_ARGS];
1990 
1991     nb_oargs = def->nb_oargs;
1992     nb_iargs = def->nb_iargs;
1993 
1994     /* copy constants */
1995     memcpy(new_args + nb_oargs + nb_iargs,
1996            args + nb_oargs + nb_iargs,
1997            sizeof(TCGArg) * def->nb_cargs);
1998 
1999     /* satisfy input constraints */
2000     tcg_regset_set(allocated_regs, s->reserved_regs);
2001     for(k = 0; k < nb_iargs; k++) {
2002         i = def->sorted_args[nb_oargs + k];
2003         arg = args[i];
2004         arg_ct = &def->args_ct[i];
2005         ts = &s->temps[arg];
2006         if (ts->val_type == TEMP_VAL_MEM) {
2007             reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2008             tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2009             ts->val_type = TEMP_VAL_REG;
2010             ts->reg = reg;
2011             ts->mem_coherent = 1;
2012             s->reg_to_temp[reg] = arg;
2013         } else if (ts->val_type == TEMP_VAL_CONST) {
2014             if (tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2015                 /* constant is OK for instruction */
2016                 const_args[i] = 1;
2017                 new_args[i] = ts->val;
2018                 goto iarg_end;
2019             } else {
2020                 /* need to move to a register */
2021                 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2022                 tcg_out_movi(s, ts->type, reg, ts->val);
2023                 ts->val_type = TEMP_VAL_REG;
2024                 ts->reg = reg;
2025                 ts->mem_coherent = 0;
2026                 s->reg_to_temp[reg] = arg;
2027             }
2028         }
2029         assert(ts->val_type == TEMP_VAL_REG);
2030         if (arg_ct->ct & TCG_CT_IALIAS) {
2031             if (ts->fixed_reg) {
2032                 /* if fixed register, we must allocate a new register
2033                    if the alias is not the same register */
2034                 if (arg != args[arg_ct->alias_index])
2035                     goto allocate_in_reg;
2036             } else {
2037                 /* if the input is aliased to an output and if it is
2038                    not dead after the instruction, we must allocate
2039                    a new register and move it */
2040                 if (!IS_DEAD_ARG(i)) {
2041                     goto allocate_in_reg;
2042                 }
2043                 /* check if the current register has already been allocated
2044                    for another input aliased to an output */
2045                 int k2, i2;
2046                 for (k2 = 0 ; k2 < k ; k2++) {
2047                     i2 = def->sorted_args[nb_oargs + k2];
2048                     if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2049                         (new_args[i2] == ts->reg)) {
2050                         goto allocate_in_reg;
2051                     }
2052                 }
2053             }
2054         }
2055         reg = ts->reg;
2056         if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2057             /* nothing to do : the constraint is satisfied */
2058         } else {
2059         allocate_in_reg:
2060             /* allocate a new register matching the constraint
2061                and move the temporary register into it */
2062             reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2063             tcg_out_mov(s, ts->type, reg, ts->reg);
2064         }
2065         new_args[i] = reg;
2066         const_args[i] = 0;
2067         tcg_regset_set_reg(allocated_regs, reg);
2068     iarg_end: ;
2069     }
2070 
2071     /* mark dead temporaries and free the associated registers */
2072     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2073         if (IS_DEAD_ARG(i)) {
2074             temp_dead(s, args[i]);
2075         }
2076     }
2077 
2078     if (def->flags & TCG_OPF_BB_END) {
2079         tcg_reg_alloc_bb_end(s, allocated_regs);
2080     } else {
2081         if (def->flags & TCG_OPF_CALL_CLOBBER) {
2082             /* XXX: permit generic clobber register list ? */
2083             for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2084                 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2085                     tcg_reg_free(s, reg);
2086                 }
2087             }
2088         }
2089         if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2090             /* sync globals if the op has side effects and might trigger
2091                an exception. */
2092             sync_globals(s, allocated_regs);
2093         }
2094 
2095         /* satisfy the output constraints */
2096         tcg_regset_set(allocated_regs, s->reserved_regs);
2097         for(k = 0; k < nb_oargs; k++) {
2098             i = def->sorted_args[k];
2099             arg = args[i];
2100             arg_ct = &def->args_ct[i];
2101             ts = &s->temps[arg];
2102             if (arg_ct->ct & TCG_CT_ALIAS) {
2103                 reg = new_args[arg_ct->alias_index];
2104             } else {
2105                 /* if fixed register, we try to use it */
2106                 reg = ts->reg;
2107                 if (ts->fixed_reg &&
2108                     tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2109                     goto oarg_end;
2110                 }
2111                 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2112             }
2113             tcg_regset_set_reg(allocated_regs, reg);
2114             /* if a fixed register is used, then a move will be done afterwards */
2115             if (!ts->fixed_reg) {
2116                 if (ts->val_type == TEMP_VAL_REG) {
2117                     s->reg_to_temp[ts->reg] = -1;
2118                 }
2119                 ts->val_type = TEMP_VAL_REG;
2120                 ts->reg = reg;
2121                 /* temp value is modified, so the value kept in memory is
2122                    potentially not the same */
2123                 ts->mem_coherent = 0;
2124                 s->reg_to_temp[reg] = arg;
2125             }
2126         oarg_end:
2127             new_args[i] = reg;
2128         }
2129     }
2130 
2131     /* emit instruction */
2132     tcg_out_op(s, opc, new_args, const_args);
2133 
2134     /* move the outputs in the correct register if needed */
2135     for(i = 0; i < nb_oargs; i++) {
2136         ts = &s->temps[args[i]];
2137         reg = new_args[i];
2138         if (ts->fixed_reg && ts->reg != reg) {
2139             tcg_out_mov(s, ts->type, ts->reg, reg);
2140         }
2141         if (NEED_SYNC_ARG(i)) {
2142             tcg_reg_sync(s, reg);
2143         }
2144         if (IS_DEAD_ARG(i)) {
2145             temp_dead(s, args[i]);
2146         }
2147     }
2148 }
2149 
2150 #ifdef TCG_TARGET_STACK_GROWSUP
2151 #define STACK_DIR(x) (-(x))
2152 #else
2153 #define STACK_DIR(x) (x)
2154 #endif
2155 
2156 static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2157                                const TCGArg * const args, uint16_t dead_args,
2158                                uint8_t sync_args)
2159 {
2160     int flags, nb_regs, i, reg;
2161     TCGArg arg;
2162     TCGTemp *ts;
2163     intptr_t stack_offset;
2164     size_t call_stack_size;
2165     tcg_insn_unit *func_addr;
2166     int allocate_args;
2167     TCGRegSet allocated_regs;
2168 
2169     func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2170     flags = args[nb_oargs + nb_iargs + 1];
2171 
2172     nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2173     if (nb_regs > nb_iargs) {
2174         nb_regs = nb_iargs;
2175     }
2176 
2177     /* assign stack slots first */
2178     call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2179     call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2180         ~(TCG_TARGET_STACK_ALIGN - 1);
2181     allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2182     if (allocate_args) {
2183         /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2184            preallocate call stack */
2185         tcg_abort();
2186     }
2187 
2188     stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2189     for(i = nb_regs; i < nb_iargs; i++) {
2190         arg = args[nb_oargs + i];
2191 #ifdef TCG_TARGET_STACK_GROWSUP
2192         stack_offset -= sizeof(tcg_target_long);
2193 #endif
2194         if (arg != TCG_CALL_DUMMY_ARG) {
2195             ts = &s->temps[arg];
2196             if (ts->val_type == TEMP_VAL_REG) {
2197                 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2198             } else if (ts->val_type == TEMP_VAL_MEM) {
2199                 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2200                                     s->reserved_regs);
2201                 /* XXX: not correct if reading values from the stack */
2202                 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2203                 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2204             } else if (ts->val_type == TEMP_VAL_CONST) {
2205                 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2206                                     s->reserved_regs);
2207                 /* XXX: sign extend may be needed on some targets */
2208                 tcg_out_movi(s, ts->type, reg, ts->val);
2209                 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2210             } else {
2211                 tcg_abort();
2212             }
2213         }
2214 #ifndef TCG_TARGET_STACK_GROWSUP
2215         stack_offset += sizeof(tcg_target_long);
2216 #endif
2217     }
2218 
2219     /* assign input registers */
2220     tcg_regset_set(allocated_regs, s->reserved_regs);
2221     for(i = 0; i < nb_regs; i++) {
2222         arg = args[nb_oargs + i];
2223         if (arg != TCG_CALL_DUMMY_ARG) {
2224             ts = &s->temps[arg];
2225             reg = tcg_target_call_iarg_regs[i];
2226             tcg_reg_free(s, reg);
2227             if (ts->val_type == TEMP_VAL_REG) {
2228                 if (ts->reg != reg) {
2229                     tcg_out_mov(s, ts->type, reg, ts->reg);
2230                 }
2231             } else if (ts->val_type == TEMP_VAL_MEM) {
2232                 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2233             } else if (ts->val_type == TEMP_VAL_CONST) {
2234                 /* XXX: sign extend ? */
2235                 tcg_out_movi(s, ts->type, reg, ts->val);
2236             } else {
2237                 tcg_abort();
2238             }
2239             tcg_regset_set_reg(allocated_regs, reg);
2240         }
2241     }
2242 
2243     /* mark dead temporaries and free the associated registers */
2244     for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2245         if (IS_DEAD_ARG(i)) {
2246             temp_dead(s, args[i]);
2247         }
2248     }
2249 
2250     /* clobber call registers */
2251     for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2252         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2253             tcg_reg_free(s, reg);
2254         }
2255     }
2256 
2257     /* Save globals if they might be written by the helper, sync them if
2258        they might be read. */
2259     if (flags & TCG_CALL_NO_READ_GLOBALS) {
2260         /* Nothing to do */
2261     } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2262         sync_globals(s, allocated_regs);
2263     } else {
2264         save_globals(s, allocated_regs);
2265     }
2266 
2267     tcg_out_call(s, func_addr);
2268 
2269     /* assign output registers and emit moves if needed */
2270     for(i = 0; i < nb_oargs; i++) {
2271         arg = args[i];
2272         ts = &s->temps[arg];
2273         reg = tcg_target_call_oarg_regs[i];
2274         assert(s->reg_to_temp[reg] == -1);
2275 
2276         if (ts->fixed_reg) {
2277             if (ts->reg != reg) {
2278                 tcg_out_mov(s, ts->type, ts->reg, reg);
2279             }
2280         } else {
2281             if (ts->val_type == TEMP_VAL_REG) {
2282                 s->reg_to_temp[ts->reg] = -1;
2283             }
2284             ts->val_type = TEMP_VAL_REG;
2285             ts->reg = reg;
2286             ts->mem_coherent = 0;
2287             s->reg_to_temp[reg] = arg;
2288             if (NEED_SYNC_ARG(i)) {
2289                 tcg_reg_sync(s, reg);
2290             }
2291             if (IS_DEAD_ARG(i)) {
2292                 temp_dead(s, args[i]);
2293             }
2294         }
2295     }
2296 }
2297 
2298 #ifdef CONFIG_PROFILER
2299 
2300 static int64_t tcg_table_op_count[NB_OPS];
2301 
2302 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2303 {
2304     int i;
2305 
2306     for (i = 0; i < NB_OPS; i++) {
2307         cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2308                     tcg_table_op_count[i]);
2309     }
2310 }
2311 #else
2312 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2313 {
2314     cpu_fprintf(f, "[TCG profiler not compiled]\n");
2315 }
2316 #endif
2317 
2318 
2319 int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
2320 {
2321     int i, oi, oi_next, num_insns;
2322 
2323 #ifdef CONFIG_PROFILER
2324     {
2325         int n;
2326 
2327         n = s->gen_last_op_idx + 1;
2328         s->op_count += n;
2329         if (n > s->op_count_max) {
2330             s->op_count_max = n;
2331         }
2332 
2333         n = s->nb_temps;
2334         s->temp_count += n;
2335         if (n > s->temp_count_max) {
2336             s->temp_count_max = n;
2337         }
2338     }
2339 #endif
2340 
2341 #ifdef DEBUG_DISAS
2342     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2343         qemu_log("OP:\n");
2344         tcg_dump_ops(s);
2345         qemu_log("\n");
2346     }
2347 #endif
2348 
2349 #ifdef CONFIG_PROFILER
2350     s->opt_time -= profile_getclock();
2351 #endif
2352 
2353 #ifdef USE_TCG_OPTIMIZATIONS
2354     tcg_optimize(s);
2355 #endif
2356 
2357 #ifdef CONFIG_PROFILER
2358     s->opt_time += profile_getclock();
2359     s->la_time -= profile_getclock();
2360 #endif
2361 
2362     tcg_liveness_analysis(s);
2363 
2364 #ifdef CONFIG_PROFILER
2365     s->la_time += profile_getclock();
2366 #endif
2367 
2368 #ifdef DEBUG_DISAS
2369     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
2370         qemu_log("OP after optimization and liveness analysis:\n");
2371         tcg_dump_ops(s);
2372         qemu_log("\n");
2373     }
2374 #endif
2375 
2376     tcg_reg_alloc_start(s);
2377 
2378     s->code_buf = gen_code_buf;
2379     s->code_ptr = gen_code_buf;
2380 
2381     tcg_out_tb_init(s);
2382 
2383     num_insns = -1;
2384     for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
2385         TCGOp * const op = &s->gen_op_buf[oi];
2386         TCGArg * const args = &s->gen_opparam_buf[op->args];
2387         TCGOpcode opc = op->opc;
2388         const TCGOpDef *def = &tcg_op_defs[opc];
2389         uint16_t dead_args = s->op_dead_args[oi];
2390         uint8_t sync_args = s->op_sync_args[oi];
2391 
2392         oi_next = op->next;
2393 #ifdef CONFIG_PROFILER
2394         tcg_table_op_count[opc]++;
2395 #endif
2396 
2397         switch (opc) {
2398         case INDEX_op_mov_i32:
2399         case INDEX_op_mov_i64:
2400             tcg_reg_alloc_mov(s, def, args, dead_args, sync_args);
2401             break;
2402         case INDEX_op_movi_i32:
2403         case INDEX_op_movi_i64:
2404             tcg_reg_alloc_movi(s, args, dead_args, sync_args);
2405             break;
2406         case INDEX_op_insn_start:
2407             if (num_insns >= 0) {
2408                 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2409             }
2410             num_insns++;
2411             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2412                 target_ulong a;
2413 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2414                 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2415 #else
2416                 a = args[i];
2417 #endif
2418                 s->gen_insn_data[num_insns][i] = a;
2419             }
2420             break;
2421         case INDEX_op_discard:
2422             temp_dead(s, args[0]);
2423             break;
2424         case INDEX_op_set_label:
2425             tcg_reg_alloc_bb_end(s, s->reserved_regs);
2426             tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2427             break;
2428         case INDEX_op_call:
2429             tcg_reg_alloc_call(s, op->callo, op->calli, args,
2430                                dead_args, sync_args);
2431             break;
2432         default:
2433             /* Sanity check that we've not introduced any unhandled opcodes. */
2434             if (def->flags & TCG_OPF_NOT_PRESENT) {
2435                 tcg_abort();
2436             }
2437             /* Note: in order to speed up the code, it would be much
2438                faster to have specialized register allocator functions for
2439                some common argument patterns */
2440             tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args);
2441             break;
2442         }
2443 #ifndef NDEBUG
2444         check_regs(s);
2445 #endif
2446         /* Test for (pending) buffer overflow.  The assumption is that any
2447            one operation beginning below the high water mark cannot overrun
2448            the buffer completely.  Thus we can test for overflow after
2449            generating code without having to check during generation.  */
2450         if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
2451             return -1;
2452         }
2453     }
2454     tcg_debug_assert(num_insns >= 0);
2455     s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2456 
2457     /* Generate TB finalization at the end of block */
2458     tcg_out_tb_finalize(s);
2459 
2460     /* flush instruction cache */
2461     flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2462 
2463     return tcg_current_code_size(s);
2464 }
2465 
2466 #ifdef CONFIG_PROFILER
2467 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2468 {
2469     TCGContext *s = &tcg_ctx;
2470     int64_t tb_count = s->tb_count;
2471     int64_t tb_div_count = tb_count ? tb_count : 1;
2472     int64_t tot = s->interm_time + s->code_time;
2473 
2474     cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2475                 tot, tot / 2.4e9);
2476     cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2477                 tb_count, s->tb_count1 - tb_count,
2478                 (double)(s->tb_count1 - s->tb_count)
2479                 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2480     cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n",
2481                 (double)s->op_count / tb_div_count, s->op_count_max);
2482     cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
2483                 (double)s->del_op_count / tb_div_count);
2484     cpu_fprintf(f, "avg temps/TB        %0.2f max=%d\n",
2485                 (double)s->temp_count / tb_div_count, s->temp_count_max);
2486     cpu_fprintf(f, "avg host code/TB    %0.1f\n",
2487                 (double)s->code_out_len / tb_div_count);
2488     cpu_fprintf(f, "avg search data/TB  %0.1f\n",
2489                 (double)s->search_out_len / tb_div_count);
2490 
2491     cpu_fprintf(f, "cycles/op           %0.1f\n",
2492                 s->op_count ? (double)tot / s->op_count : 0);
2493     cpu_fprintf(f, "cycles/in byte      %0.1f\n",
2494                 s->code_in_len ? (double)tot / s->code_in_len : 0);
2495     cpu_fprintf(f, "cycles/out byte     %0.1f\n",
2496                 s->code_out_len ? (double)tot / s->code_out_len : 0);
2497     cpu_fprintf(f, "cycles/search byte     %0.1f\n",
2498                 s->search_out_len ? (double)tot / s->search_out_len : 0);
2499     if (tot == 0) {
2500         tot = 1;
2501     }
2502     cpu_fprintf(f, "  gen_interm time   %0.1f%%\n",
2503                 (double)s->interm_time / tot * 100.0);
2504     cpu_fprintf(f, "  gen_code time     %0.1f%%\n",
2505                 (double)s->code_time / tot * 100.0);
2506     cpu_fprintf(f, "optim./code time    %0.1f%%\n",
2507                 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2508                 * 100.0);
2509     cpu_fprintf(f, "liveness/code time  %0.1f%%\n",
2510                 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2511     cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
2512                 s->restore_count);
2513     cpu_fprintf(f, "  avg cycles        %0.1f\n",
2514                 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2515 }
2516 #else
2517 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2518 {
2519     cpu_fprintf(f, "[TCG profiler not compiled]\n");
2520 }
2521 #endif
2522 
2523 #ifdef ELF_HOST_MACHINE
2524 /* In order to use this feature, the backend needs to do three things:
2525 
2526    (1) Define ELF_HOST_MACHINE to indicate both what value to
2527        put into the ELF image and to indicate support for the feature.
2528 
2529    (2) Define tcg_register_jit.  This should create a buffer containing
2530        the contents of a .debug_frame section that describes the post-
2531        prologue unwind info for the tcg machine.
2532 
2533    (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2534 */
2535 
2536 /* Begin GDB interface.  THE FOLLOWING MUST MATCH GDB DOCS.  */
2537 typedef enum {
2538     JIT_NOACTION = 0,
2539     JIT_REGISTER_FN,
2540     JIT_UNREGISTER_FN
2541 } jit_actions_t;
2542 
2543 struct jit_code_entry {
2544     struct jit_code_entry *next_entry;
2545     struct jit_code_entry *prev_entry;
2546     const void *symfile_addr;
2547     uint64_t symfile_size;
2548 };
2549 
2550 struct jit_descriptor {
2551     uint32_t version;
2552     uint32_t action_flag;
2553     struct jit_code_entry *relevant_entry;
2554     struct jit_code_entry *first_entry;
2555 };
2556 
2557 void __jit_debug_register_code(void) __attribute__((noinline));
2558 void __jit_debug_register_code(void)
2559 {
2560     asm("");
2561 }
2562 
2563 /* Must statically initialize the version, because GDB may check
2564    the version before we can set it.  */
2565 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2566 
2567 /* End GDB interface.  */
2568 
2569 static int find_string(const char *strtab, const char *str)
2570 {
2571     const char *p = strtab + 1;
2572 
2573     while (1) {
2574         if (strcmp(p, str) == 0) {
2575             return p - strtab;
2576         }
2577         p += strlen(p) + 1;
2578     }
2579 }
2580 
2581 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2582                                  const void *debug_frame,
2583                                  size_t debug_frame_size)
2584 {
2585     struct __attribute__((packed)) DebugInfo {
2586         uint32_t  len;
2587         uint16_t  version;
2588         uint32_t  abbrev;
2589         uint8_t   ptr_size;
2590         uint8_t   cu_die;
2591         uint16_t  cu_lang;
2592         uintptr_t cu_low_pc;
2593         uintptr_t cu_high_pc;
2594         uint8_t   fn_die;
2595         char      fn_name[16];
2596         uintptr_t fn_low_pc;
2597         uintptr_t fn_high_pc;
2598         uint8_t   cu_eoc;
2599     };
2600 
2601     struct ElfImage {
2602         ElfW(Ehdr) ehdr;
2603         ElfW(Phdr) phdr;
2604         ElfW(Shdr) shdr[7];
2605         ElfW(Sym)  sym[2];
2606         struct DebugInfo di;
2607         uint8_t    da[24];
2608         char       str[80];
2609     };
2610 
2611     struct ElfImage *img;
2612 
2613     static const struct ElfImage img_template = {
2614         .ehdr = {
2615             .e_ident[EI_MAG0] = ELFMAG0,
2616             .e_ident[EI_MAG1] = ELFMAG1,
2617             .e_ident[EI_MAG2] = ELFMAG2,
2618             .e_ident[EI_MAG3] = ELFMAG3,
2619             .e_ident[EI_CLASS] = ELF_CLASS,
2620             .e_ident[EI_DATA] = ELF_DATA,
2621             .e_ident[EI_VERSION] = EV_CURRENT,
2622             .e_type = ET_EXEC,
2623             .e_machine = ELF_HOST_MACHINE,
2624             .e_version = EV_CURRENT,
2625             .e_phoff = offsetof(struct ElfImage, phdr),
2626             .e_shoff = offsetof(struct ElfImage, shdr),
2627             .e_ehsize = sizeof(ElfW(Shdr)),
2628             .e_phentsize = sizeof(ElfW(Phdr)),
2629             .e_phnum = 1,
2630             .e_shentsize = sizeof(ElfW(Shdr)),
2631             .e_shnum = ARRAY_SIZE(img->shdr),
2632             .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2633 #ifdef ELF_HOST_FLAGS
2634             .e_flags = ELF_HOST_FLAGS,
2635 #endif
2636 #ifdef ELF_OSABI
2637             .e_ident[EI_OSABI] = ELF_OSABI,
2638 #endif
2639         },
2640         .phdr = {
2641             .p_type = PT_LOAD,
2642             .p_flags = PF_X,
2643         },
2644         .shdr = {
2645             [0] = { .sh_type = SHT_NULL },
2646             /* Trick: The contents of code_gen_buffer are not present in
2647                this fake ELF file; that got allocated elsewhere.  Therefore
2648                we mark .text as SHT_NOBITS (similar to .bss) so that readers
2649                will not look for contents.  We can record any address.  */
2650             [1] = { /* .text */
2651                 .sh_type = SHT_NOBITS,
2652                 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2653             },
2654             [2] = { /* .debug_info */
2655                 .sh_type = SHT_PROGBITS,
2656                 .sh_offset = offsetof(struct ElfImage, di),
2657                 .sh_size = sizeof(struct DebugInfo),
2658             },
2659             [3] = { /* .debug_abbrev */
2660                 .sh_type = SHT_PROGBITS,
2661                 .sh_offset = offsetof(struct ElfImage, da),
2662                 .sh_size = sizeof(img->da),
2663             },
2664             [4] = { /* .debug_frame */
2665                 .sh_type = SHT_PROGBITS,
2666                 .sh_offset = sizeof(struct ElfImage),
2667             },
2668             [5] = { /* .symtab */
2669                 .sh_type = SHT_SYMTAB,
2670                 .sh_offset = offsetof(struct ElfImage, sym),
2671                 .sh_size = sizeof(img->sym),
2672                 .sh_info = 1,
2673                 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2674                 .sh_entsize = sizeof(ElfW(Sym)),
2675             },
2676             [6] = { /* .strtab */
2677                 .sh_type = SHT_STRTAB,
2678                 .sh_offset = offsetof(struct ElfImage, str),
2679                 .sh_size = sizeof(img->str),
2680             }
2681         },
2682         .sym = {
2683             [1] = { /* code_gen_buffer */
2684                 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2685                 .st_shndx = 1,
2686             }
2687         },
2688         .di = {
2689             .len = sizeof(struct DebugInfo) - 4,
2690             .version = 2,
2691             .ptr_size = sizeof(void *),
2692             .cu_die = 1,
2693             .cu_lang = 0x8001,  /* DW_LANG_Mips_Assembler */
2694             .fn_die = 2,
2695             .fn_name = "code_gen_buffer"
2696         },
2697         .da = {
2698             1,          /* abbrev number (the cu) */
2699             0x11, 1,    /* DW_TAG_compile_unit, has children */
2700             0x13, 0x5,  /* DW_AT_language, DW_FORM_data2 */
2701             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
2702             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
2703             0, 0,       /* end of abbrev */
2704             2,          /* abbrev number (the fn) */
2705             0x2e, 0,    /* DW_TAG_subprogram, no children */
2706             0x3, 0x8,   /* DW_AT_name, DW_FORM_string */
2707             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
2708             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
2709             0, 0,       /* end of abbrev */
2710             0           /* no more abbrev */
2711         },
2712         .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2713                ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2714     };
2715 
2716     /* We only need a single jit entry; statically allocate it.  */
2717     static struct jit_code_entry one_entry;
2718 
2719     uintptr_t buf = (uintptr_t)buf_ptr;
2720     size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2721     DebugFrameHeader *dfh;
2722 
2723     img = g_malloc(img_size);
2724     *img = img_template;
2725 
2726     img->phdr.p_vaddr = buf;
2727     img->phdr.p_paddr = buf;
2728     img->phdr.p_memsz = buf_size;
2729 
2730     img->shdr[1].sh_name = find_string(img->str, ".text");
2731     img->shdr[1].sh_addr = buf;
2732     img->shdr[1].sh_size = buf_size;
2733 
2734     img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2735     img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2736 
2737     img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2738     img->shdr[4].sh_size = debug_frame_size;
2739 
2740     img->shdr[5].sh_name = find_string(img->str, ".symtab");
2741     img->shdr[6].sh_name = find_string(img->str, ".strtab");
2742 
2743     img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2744     img->sym[1].st_value = buf;
2745     img->sym[1].st_size = buf_size;
2746 
2747     img->di.cu_low_pc = buf;
2748     img->di.cu_high_pc = buf + buf_size;
2749     img->di.fn_low_pc = buf;
2750     img->di.fn_high_pc = buf + buf_size;
2751 
2752     dfh = (DebugFrameHeader *)(img + 1);
2753     memcpy(dfh, debug_frame, debug_frame_size);
2754     dfh->fde.func_start = buf;
2755     dfh->fde.func_len = buf_size;
2756 
2757 #ifdef DEBUG_JIT
2758     /* Enable this block to be able to debug the ELF image file creation.
2759        One can use readelf, objdump, or other inspection utilities.  */
2760     {
2761         FILE *f = fopen("/tmp/qemu.jit", "w+b");
2762         if (f) {
2763             if (fwrite(img, img_size, 1, f) != img_size) {
2764                 /* Avoid stupid unused return value warning for fwrite.  */
2765             }
2766             fclose(f);
2767         }
2768     }
2769 #endif
2770 
2771     one_entry.symfile_addr = img;
2772     one_entry.symfile_size = img_size;
2773 
2774     __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2775     __jit_debug_descriptor.relevant_entry = &one_entry;
2776     __jit_debug_descriptor.first_entry = &one_entry;
2777     __jit_debug_register_code();
2778 }
2779 #else
2780 /* No support for the feature.  Provide the entry point expected by exec.c,
2781    and implement the internal function we declared earlier.  */
2782 
2783 static void tcg_register_jit_int(void *buf, size_t size,
2784                                  const void *debug_frame,
2785                                  size_t debug_frame_size)
2786 {
2787 }
2788 
2789 void tcg_register_jit(void *buf, size_t buf_size)
2790 {
2791 }
2792 #endif /* ELF_HOST_MACHINE */
2793