xref: /openbmc/qemu/tcg/tcg.c (revision 6e99f574)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
27 
28 #include "qemu/osdep.h"
29 
30 /* Define to jump the ELF file used to communicate with GDB.  */
31 #undef DEBUG_JIT
32 
33 #include "qemu/cutils.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/timer.h"
36 
37 /* Note: the long term plan is to reduce the dependencies on the QEMU
38    CPU definitions. Currently they are used for qemu_ld/st
39    instructions */
40 #define NO_CPU_IO_DEFS
41 #include "cpu.h"
42 
43 #include "exec/cpu-common.h"
44 #include "exec/exec-all.h"
45 
46 #include "tcg-op.h"
47 
48 #if UINTPTR_MAX == UINT32_MAX
49 # define ELF_CLASS  ELFCLASS32
50 #else
51 # define ELF_CLASS  ELFCLASS64
52 #endif
53 #ifdef HOST_WORDS_BIGENDIAN
54 # define ELF_DATA   ELFDATA2MSB
55 #else
56 # define ELF_DATA   ELFDATA2LSB
57 #endif
58 
59 #include "elf.h"
60 #include "exec/log.h"
61 
62 /* Forward declarations for functions declared in tcg-target.inc.c and
63    used here. */
64 static void tcg_target_init(TCGContext *s);
65 static void tcg_target_qemu_prologue(TCGContext *s);
66 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
67                         intptr_t value, intptr_t addend);
68 
69 /* The CIE and FDE header definitions will be common to all hosts.  */
70 typedef struct {
71     uint32_t len __attribute__((aligned((sizeof(void *)))));
72     uint32_t id;
73     uint8_t version;
74     char augmentation[1];
75     uint8_t code_align;
76     uint8_t data_align;
77     uint8_t return_column;
78 } DebugFrameCIE;
79 
80 typedef struct QEMU_PACKED {
81     uint32_t len __attribute__((aligned((sizeof(void *)))));
82     uint32_t cie_offset;
83     uintptr_t func_start;
84     uintptr_t func_len;
85 } DebugFrameFDEHeader;
86 
87 typedef struct QEMU_PACKED {
88     DebugFrameCIE cie;
89     DebugFrameFDEHeader fde;
90 } DebugFrameHeader;
91 
92 static void tcg_register_jit_int(void *buf, size_t size,
93                                  const void *debug_frame,
94                                  size_t debug_frame_size)
95     __attribute__((unused));
96 
97 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
98 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
99 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
100                        intptr_t arg2);
101 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
102 static void tcg_out_movi(TCGContext *s, TCGType type,
103                          TCGReg ret, tcg_target_long arg);
104 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
105                        const int *const_args);
106 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
107                        intptr_t arg2);
108 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
109                         TCGReg base, intptr_t ofs);
110 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
111 static int tcg_target_const_match(tcg_target_long val, TCGType type,
112                                   const TCGArgConstraint *arg_ct);
113 static void tcg_out_tb_init(TCGContext *s);
114 static bool tcg_out_tb_finalize(TCGContext *s);
115 
116 
117 
118 static TCGRegSet tcg_target_available_regs[2];
119 static TCGRegSet tcg_target_call_clobber_regs;
120 
121 #if TCG_TARGET_INSN_UNIT_SIZE == 1
122 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
123 {
124     *s->code_ptr++ = v;
125 }
126 
127 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
128                                                       uint8_t v)
129 {
130     *p = v;
131 }
132 #endif
133 
134 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
135 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
136 {
137     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
138         *s->code_ptr++ = v;
139     } else {
140         tcg_insn_unit *p = s->code_ptr;
141         memcpy(p, &v, sizeof(v));
142         s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
143     }
144 }
145 
146 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
147                                                        uint16_t v)
148 {
149     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
150         *p = v;
151     } else {
152         memcpy(p, &v, sizeof(v));
153     }
154 }
155 #endif
156 
157 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
158 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
159 {
160     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
161         *s->code_ptr++ = v;
162     } else {
163         tcg_insn_unit *p = s->code_ptr;
164         memcpy(p, &v, sizeof(v));
165         s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
166     }
167 }
168 
169 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
170                                                        uint32_t v)
171 {
172     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
173         *p = v;
174     } else {
175         memcpy(p, &v, sizeof(v));
176     }
177 }
178 #endif
179 
180 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
181 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
182 {
183     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
184         *s->code_ptr++ = v;
185     } else {
186         tcg_insn_unit *p = s->code_ptr;
187         memcpy(p, &v, sizeof(v));
188         s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
189     }
190 }
191 
192 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
193                                                        uint64_t v)
194 {
195     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
196         *p = v;
197     } else {
198         memcpy(p, &v, sizeof(v));
199     }
200 }
201 #endif
202 
203 /* label relocation processing */
204 
205 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
206                           TCGLabel *l, intptr_t addend)
207 {
208     TCGRelocation *r;
209 
210     if (l->has_value) {
211         /* FIXME: This may break relocations on RISC targets that
212            modify instruction fields in place.  The caller may not have
213            written the initial value.  */
214         patch_reloc(code_ptr, type, l->u.value, addend);
215     } else {
216         /* add a new relocation entry */
217         r = tcg_malloc(sizeof(TCGRelocation));
218         r->type = type;
219         r->ptr = code_ptr;
220         r->addend = addend;
221         r->next = l->u.first_reloc;
222         l->u.first_reloc = r;
223     }
224 }
225 
226 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
227 {
228     intptr_t value = (intptr_t)ptr;
229     TCGRelocation *r;
230 
231     tcg_debug_assert(!l->has_value);
232 
233     for (r = l->u.first_reloc; r != NULL; r = r->next) {
234         patch_reloc(r->ptr, r->type, value, r->addend);
235     }
236 
237     l->has_value = 1;
238     l->u.value_ptr = ptr;
239 }
240 
241 TCGLabel *gen_new_label(void)
242 {
243     TCGContext *s = &tcg_ctx;
244     TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
245 
246     *l = (TCGLabel){
247         .id = s->nb_labels++
248     };
249 
250     return l;
251 }
252 
253 #include "tcg-target.inc.c"
254 
255 /* pool based memory allocation */
256 void *tcg_malloc_internal(TCGContext *s, int size)
257 {
258     TCGPool *p;
259     int pool_size;
260 
261     if (size > TCG_POOL_CHUNK_SIZE) {
262         /* big malloc: insert a new pool (XXX: could optimize) */
263         p = g_malloc(sizeof(TCGPool) + size);
264         p->size = size;
265         p->next = s->pool_first_large;
266         s->pool_first_large = p;
267         return p->data;
268     } else {
269         p = s->pool_current;
270         if (!p) {
271             p = s->pool_first;
272             if (!p)
273                 goto new_pool;
274         } else {
275             if (!p->next) {
276             new_pool:
277                 pool_size = TCG_POOL_CHUNK_SIZE;
278                 p = g_malloc(sizeof(TCGPool) + pool_size);
279                 p->size = pool_size;
280                 p->next = NULL;
281                 if (s->pool_current)
282                     s->pool_current->next = p;
283                 else
284                     s->pool_first = p;
285             } else {
286                 p = p->next;
287             }
288         }
289     }
290     s->pool_current = p;
291     s->pool_cur = p->data + size;
292     s->pool_end = p->data + p->size;
293     return p->data;
294 }
295 
296 void tcg_pool_reset(TCGContext *s)
297 {
298     TCGPool *p, *t;
299     for (p = s->pool_first_large; p; p = t) {
300         t = p->next;
301         g_free(p);
302     }
303     s->pool_first_large = NULL;
304     s->pool_cur = s->pool_end = NULL;
305     s->pool_current = NULL;
306 }
307 
308 typedef struct TCGHelperInfo {
309     void *func;
310     const char *name;
311     unsigned flags;
312     unsigned sizemask;
313 } TCGHelperInfo;
314 
315 #include "exec/helper-proto.h"
316 
317 static const TCGHelperInfo all_helpers[] = {
318 #include "exec/helper-tcg.h"
319 };
320 
321 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
322 
323 void tcg_context_init(TCGContext *s)
324 {
325     int op, total_args, n, i;
326     TCGOpDef *def;
327     TCGArgConstraint *args_ct;
328     int *sorted_args;
329     GHashTable *helper_table;
330 
331     memset(s, 0, sizeof(*s));
332     s->nb_globals = 0;
333 
334     /* Count total number of arguments and allocate the corresponding
335        space */
336     total_args = 0;
337     for(op = 0; op < NB_OPS; op++) {
338         def = &tcg_op_defs[op];
339         n = def->nb_iargs + def->nb_oargs;
340         total_args += n;
341     }
342 
343     args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
344     sorted_args = g_malloc(sizeof(int) * total_args);
345 
346     for(op = 0; op < NB_OPS; op++) {
347         def = &tcg_op_defs[op];
348         def->args_ct = args_ct;
349         def->sorted_args = sorted_args;
350         n = def->nb_iargs + def->nb_oargs;
351         sorted_args += n;
352         args_ct += n;
353     }
354 
355     /* Register helpers.  */
356     /* Use g_direct_hash/equal for direct pointer comparisons on func.  */
357     s->helpers = helper_table = g_hash_table_new(NULL, NULL);
358 
359     for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
360         g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
361                             (gpointer)&all_helpers[i]);
362     }
363 
364     tcg_target_init(s);
365 
366     /* Reverse the order of the saved registers, assuming they're all at
367        the start of tcg_target_reg_alloc_order.  */
368     for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
369         int r = tcg_target_reg_alloc_order[n];
370         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
371             break;
372         }
373     }
374     for (i = 0; i < n; ++i) {
375         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
376     }
377     for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
378         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
379     }
380 }
381 
382 void tcg_prologue_init(TCGContext *s)
383 {
384     size_t prologue_size, total_size;
385     void *buf0, *buf1;
386 
387     /* Put the prologue at the beginning of code_gen_buffer.  */
388     buf0 = s->code_gen_buffer;
389     s->code_ptr = buf0;
390     s->code_buf = buf0;
391     s->code_gen_prologue = buf0;
392 
393     /* Generate the prologue.  */
394     tcg_target_qemu_prologue(s);
395     buf1 = s->code_ptr;
396     flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
397 
398     /* Deduct the prologue from the buffer.  */
399     prologue_size = tcg_current_code_size(s);
400     s->code_gen_ptr = buf1;
401     s->code_gen_buffer = buf1;
402     s->code_buf = buf1;
403     total_size = s->code_gen_buffer_size - prologue_size;
404     s->code_gen_buffer_size = total_size;
405 
406     /* Compute a high-water mark, at which we voluntarily flush the buffer
407        and start over.  The size here is arbitrary, significantly larger
408        than we expect the code generation for any one opcode to require.  */
409     s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
410 
411     tcg_register_jit(s->code_gen_buffer, total_size);
412 
413 #ifdef DEBUG_DISAS
414     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
415         qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
416         log_disas(buf0, prologue_size);
417         qemu_log("\n");
418         qemu_log_flush();
419     }
420 #endif
421 }
422 
423 void tcg_func_start(TCGContext *s)
424 {
425     tcg_pool_reset(s);
426     s->nb_temps = s->nb_globals;
427 
428     /* No temps have been previously allocated for size or locality.  */
429     memset(s->free_temps, 0, sizeof(s->free_temps));
430 
431     s->nb_labels = 0;
432     s->current_frame_offset = s->frame_start;
433 
434 #ifdef CONFIG_DEBUG_TCG
435     s->goto_tb_issue_mask = 0;
436 #endif
437 
438     s->gen_op_buf[0].next = 1;
439     s->gen_op_buf[0].prev = 0;
440     s->gen_next_op_idx = 1;
441     s->gen_next_parm_idx = 0;
442 
443     s->be = tcg_malloc(sizeof(TCGBackendData));
444 }
445 
446 static inline int temp_idx(TCGContext *s, TCGTemp *ts)
447 {
448     ptrdiff_t n = ts - s->temps;
449     tcg_debug_assert(n >= 0 && n < s->nb_temps);
450     return n;
451 }
452 
453 static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
454 {
455     int n = s->nb_temps++;
456     tcg_debug_assert(n < TCG_MAX_TEMPS);
457     return memset(&s->temps[n], 0, sizeof(TCGTemp));
458 }
459 
460 static inline TCGTemp *tcg_global_alloc(TCGContext *s)
461 {
462     tcg_debug_assert(s->nb_globals == s->nb_temps);
463     s->nb_globals++;
464     return tcg_temp_alloc(s);
465 }
466 
467 static int tcg_global_reg_new_internal(TCGContext *s, TCGType type,
468                                        TCGReg reg, const char *name)
469 {
470     TCGTemp *ts;
471 
472     if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
473         tcg_abort();
474     }
475 
476     ts = tcg_global_alloc(s);
477     ts->base_type = type;
478     ts->type = type;
479     ts->fixed_reg = 1;
480     ts->reg = reg;
481     ts->name = name;
482     tcg_regset_set_reg(s->reserved_regs, reg);
483 
484     return temp_idx(s, ts);
485 }
486 
487 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
488 {
489     int idx;
490     s->frame_start = start;
491     s->frame_end = start + size;
492     idx = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
493     s->frame_temp = &s->temps[idx];
494 }
495 
496 TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
497 {
498     TCGContext *s = &tcg_ctx;
499     int idx;
500 
501     if (tcg_regset_test_reg(s->reserved_regs, reg)) {
502         tcg_abort();
503     }
504     idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
505     return MAKE_TCGV_I32(idx);
506 }
507 
508 TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
509 {
510     TCGContext *s = &tcg_ctx;
511     int idx;
512 
513     if (tcg_regset_test_reg(s->reserved_regs, reg)) {
514         tcg_abort();
515     }
516     idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
517     return MAKE_TCGV_I64(idx);
518 }
519 
520 int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
521                                 intptr_t offset, const char *name)
522 {
523     TCGContext *s = &tcg_ctx;
524     TCGTemp *base_ts = &s->temps[GET_TCGV_PTR(base)];
525     TCGTemp *ts = tcg_global_alloc(s);
526     int indirect_reg = 0, bigendian = 0;
527 #ifdef HOST_WORDS_BIGENDIAN
528     bigendian = 1;
529 #endif
530 
531     if (!base_ts->fixed_reg) {
532         /* We do not support double-indirect registers.  */
533         tcg_debug_assert(!base_ts->indirect_reg);
534         base_ts->indirect_base = 1;
535         s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
536                             ? 2 : 1);
537         indirect_reg = 1;
538     }
539 
540     if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
541         TCGTemp *ts2 = tcg_global_alloc(s);
542         char buf[64];
543 
544         ts->base_type = TCG_TYPE_I64;
545         ts->type = TCG_TYPE_I32;
546         ts->indirect_reg = indirect_reg;
547         ts->mem_allocated = 1;
548         ts->mem_base = base_ts;
549         ts->mem_offset = offset + bigendian * 4;
550         pstrcpy(buf, sizeof(buf), name);
551         pstrcat(buf, sizeof(buf), "_0");
552         ts->name = strdup(buf);
553 
554         tcg_debug_assert(ts2 == ts + 1);
555         ts2->base_type = TCG_TYPE_I64;
556         ts2->type = TCG_TYPE_I32;
557         ts2->indirect_reg = indirect_reg;
558         ts2->mem_allocated = 1;
559         ts2->mem_base = base_ts;
560         ts2->mem_offset = offset + (1 - bigendian) * 4;
561         pstrcpy(buf, sizeof(buf), name);
562         pstrcat(buf, sizeof(buf), "_1");
563         ts2->name = strdup(buf);
564     } else {
565         ts->base_type = type;
566         ts->type = type;
567         ts->indirect_reg = indirect_reg;
568         ts->mem_allocated = 1;
569         ts->mem_base = base_ts;
570         ts->mem_offset = offset;
571         ts->name = name;
572     }
573     return temp_idx(s, ts);
574 }
575 
576 static int tcg_temp_new_internal(TCGType type, int temp_local)
577 {
578     TCGContext *s = &tcg_ctx;
579     TCGTemp *ts;
580     int idx, k;
581 
582     k = type + (temp_local ? TCG_TYPE_COUNT : 0);
583     idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
584     if (idx < TCG_MAX_TEMPS) {
585         /* There is already an available temp with the right type.  */
586         clear_bit(idx, s->free_temps[k].l);
587 
588         ts = &s->temps[idx];
589         ts->temp_allocated = 1;
590         tcg_debug_assert(ts->base_type == type);
591         tcg_debug_assert(ts->temp_local == temp_local);
592     } else {
593         ts = tcg_temp_alloc(s);
594         if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
595             TCGTemp *ts2 = tcg_temp_alloc(s);
596 
597             ts->base_type = type;
598             ts->type = TCG_TYPE_I32;
599             ts->temp_allocated = 1;
600             ts->temp_local = temp_local;
601 
602             tcg_debug_assert(ts2 == ts + 1);
603             ts2->base_type = TCG_TYPE_I64;
604             ts2->type = TCG_TYPE_I32;
605             ts2->temp_allocated = 1;
606             ts2->temp_local = temp_local;
607         } else {
608             ts->base_type = type;
609             ts->type = type;
610             ts->temp_allocated = 1;
611             ts->temp_local = temp_local;
612         }
613         idx = temp_idx(s, ts);
614     }
615 
616 #if defined(CONFIG_DEBUG_TCG)
617     s->temps_in_use++;
618 #endif
619     return idx;
620 }
621 
622 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
623 {
624     int idx;
625 
626     idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
627     return MAKE_TCGV_I32(idx);
628 }
629 
630 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
631 {
632     int idx;
633 
634     idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
635     return MAKE_TCGV_I64(idx);
636 }
637 
638 static void tcg_temp_free_internal(int idx)
639 {
640     TCGContext *s = &tcg_ctx;
641     TCGTemp *ts;
642     int k;
643 
644 #if defined(CONFIG_DEBUG_TCG)
645     s->temps_in_use--;
646     if (s->temps_in_use < 0) {
647         fprintf(stderr, "More temporaries freed than allocated!\n");
648     }
649 #endif
650 
651     tcg_debug_assert(idx >= s->nb_globals && idx < s->nb_temps);
652     ts = &s->temps[idx];
653     tcg_debug_assert(ts->temp_allocated != 0);
654     ts->temp_allocated = 0;
655 
656     k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
657     set_bit(idx, s->free_temps[k].l);
658 }
659 
660 void tcg_temp_free_i32(TCGv_i32 arg)
661 {
662     tcg_temp_free_internal(GET_TCGV_I32(arg));
663 }
664 
665 void tcg_temp_free_i64(TCGv_i64 arg)
666 {
667     tcg_temp_free_internal(GET_TCGV_I64(arg));
668 }
669 
670 TCGv_i32 tcg_const_i32(int32_t val)
671 {
672     TCGv_i32 t0;
673     t0 = tcg_temp_new_i32();
674     tcg_gen_movi_i32(t0, val);
675     return t0;
676 }
677 
678 TCGv_i64 tcg_const_i64(int64_t val)
679 {
680     TCGv_i64 t0;
681     t0 = tcg_temp_new_i64();
682     tcg_gen_movi_i64(t0, val);
683     return t0;
684 }
685 
686 TCGv_i32 tcg_const_local_i32(int32_t val)
687 {
688     TCGv_i32 t0;
689     t0 = tcg_temp_local_new_i32();
690     tcg_gen_movi_i32(t0, val);
691     return t0;
692 }
693 
694 TCGv_i64 tcg_const_local_i64(int64_t val)
695 {
696     TCGv_i64 t0;
697     t0 = tcg_temp_local_new_i64();
698     tcg_gen_movi_i64(t0, val);
699     return t0;
700 }
701 
702 #if defined(CONFIG_DEBUG_TCG)
703 void tcg_clear_temp_count(void)
704 {
705     TCGContext *s = &tcg_ctx;
706     s->temps_in_use = 0;
707 }
708 
709 int tcg_check_temp_count(void)
710 {
711     TCGContext *s = &tcg_ctx;
712     if (s->temps_in_use) {
713         /* Clear the count so that we don't give another
714          * warning immediately next time around.
715          */
716         s->temps_in_use = 0;
717         return 1;
718     }
719     return 0;
720 }
721 #endif
722 
723 /* Note: we convert the 64 bit args to 32 bit and do some alignment
724    and endian swap. Maybe it would be better to do the alignment
725    and endian swap in tcg_reg_alloc_call(). */
726 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
727                    int nargs, TCGArg *args)
728 {
729     int i, real_args, nb_rets, pi, pi_first;
730     unsigned sizemask, flags;
731     TCGHelperInfo *info;
732 
733     info = g_hash_table_lookup(s->helpers, (gpointer)func);
734     flags = info->flags;
735     sizemask = info->sizemask;
736 
737 #if defined(__sparc__) && !defined(__arch64__) \
738     && !defined(CONFIG_TCG_INTERPRETER)
739     /* We have 64-bit values in one register, but need to pass as two
740        separate parameters.  Split them.  */
741     int orig_sizemask = sizemask;
742     int orig_nargs = nargs;
743     TCGv_i64 retl, reth;
744 
745     TCGV_UNUSED_I64(retl);
746     TCGV_UNUSED_I64(reth);
747     if (sizemask != 0) {
748         TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
749         for (i = real_args = 0; i < nargs; ++i) {
750             int is_64bit = sizemask & (1 << (i+1)*2);
751             if (is_64bit) {
752                 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
753                 TCGv_i32 h = tcg_temp_new_i32();
754                 TCGv_i32 l = tcg_temp_new_i32();
755                 tcg_gen_extr_i64_i32(l, h, orig);
756                 split_args[real_args++] = GET_TCGV_I32(h);
757                 split_args[real_args++] = GET_TCGV_I32(l);
758             } else {
759                 split_args[real_args++] = args[i];
760             }
761         }
762         nargs = real_args;
763         args = split_args;
764         sizemask = 0;
765     }
766 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
767     for (i = 0; i < nargs; ++i) {
768         int is_64bit = sizemask & (1 << (i+1)*2);
769         int is_signed = sizemask & (2 << (i+1)*2);
770         if (!is_64bit) {
771             TCGv_i64 temp = tcg_temp_new_i64();
772             TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
773             if (is_signed) {
774                 tcg_gen_ext32s_i64(temp, orig);
775             } else {
776                 tcg_gen_ext32u_i64(temp, orig);
777             }
778             args[i] = GET_TCGV_I64(temp);
779         }
780     }
781 #endif /* TCG_TARGET_EXTEND_ARGS */
782 
783     pi_first = pi = s->gen_next_parm_idx;
784     if (ret != TCG_CALL_DUMMY_ARG) {
785 #if defined(__sparc__) && !defined(__arch64__) \
786     && !defined(CONFIG_TCG_INTERPRETER)
787         if (orig_sizemask & 1) {
788             /* The 32-bit ABI is going to return the 64-bit value in
789                the %o0/%o1 register pair.  Prepare for this by using
790                two return temporaries, and reassemble below.  */
791             retl = tcg_temp_new_i64();
792             reth = tcg_temp_new_i64();
793             s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
794             s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
795             nb_rets = 2;
796         } else {
797             s->gen_opparam_buf[pi++] = ret;
798             nb_rets = 1;
799         }
800 #else
801         if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
802 #ifdef HOST_WORDS_BIGENDIAN
803             s->gen_opparam_buf[pi++] = ret + 1;
804             s->gen_opparam_buf[pi++] = ret;
805 #else
806             s->gen_opparam_buf[pi++] = ret;
807             s->gen_opparam_buf[pi++] = ret + 1;
808 #endif
809             nb_rets = 2;
810         } else {
811             s->gen_opparam_buf[pi++] = ret;
812             nb_rets = 1;
813         }
814 #endif
815     } else {
816         nb_rets = 0;
817     }
818     real_args = 0;
819     for (i = 0; i < nargs; i++) {
820         int is_64bit = sizemask & (1 << (i+1)*2);
821         if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
822 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
823             /* some targets want aligned 64 bit args */
824             if (real_args & 1) {
825                 s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
826                 real_args++;
827             }
828 #endif
829            /* If stack grows up, then we will be placing successive
830               arguments at lower addresses, which means we need to
831               reverse the order compared to how we would normally
832               treat either big or little-endian.  For those arguments
833               that will wind up in registers, this still works for
834               HPPA (the only current STACK_GROWSUP target) since the
835               argument registers are *also* allocated in decreasing
836               order.  If another such target is added, this logic may
837               have to get more complicated to differentiate between
838               stack arguments and register arguments.  */
839 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
840             s->gen_opparam_buf[pi++] = args[i] + 1;
841             s->gen_opparam_buf[pi++] = args[i];
842 #else
843             s->gen_opparam_buf[pi++] = args[i];
844             s->gen_opparam_buf[pi++] = args[i] + 1;
845 #endif
846             real_args += 2;
847             continue;
848         }
849 
850         s->gen_opparam_buf[pi++] = args[i];
851         real_args++;
852     }
853     s->gen_opparam_buf[pi++] = (uintptr_t)func;
854     s->gen_opparam_buf[pi++] = flags;
855 
856     i = s->gen_next_op_idx;
857     tcg_debug_assert(i < OPC_BUF_SIZE);
858     tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
859 
860     /* Set links for sequential allocation during translation.  */
861     s->gen_op_buf[i] = (TCGOp){
862         .opc = INDEX_op_call,
863         .callo = nb_rets,
864         .calli = real_args,
865         .args = pi_first,
866         .prev = i - 1,
867         .next = i + 1
868     };
869 
870     /* Make sure the calli field didn't overflow.  */
871     tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
872 
873     s->gen_op_buf[0].prev = i;
874     s->gen_next_op_idx = i + 1;
875     s->gen_next_parm_idx = pi;
876 
877 #if defined(__sparc__) && !defined(__arch64__) \
878     && !defined(CONFIG_TCG_INTERPRETER)
879     /* Free all of the parts we allocated above.  */
880     for (i = real_args = 0; i < orig_nargs; ++i) {
881         int is_64bit = orig_sizemask & (1 << (i+1)*2);
882         if (is_64bit) {
883             TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
884             TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
885             tcg_temp_free_i32(h);
886             tcg_temp_free_i32(l);
887         } else {
888             real_args++;
889         }
890     }
891     if (orig_sizemask & 1) {
892         /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
893            Note that describing these as TCGv_i64 eliminates an unnecessary
894            zero-extension that tcg_gen_concat_i32_i64 would create.  */
895         tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
896         tcg_temp_free_i64(retl);
897         tcg_temp_free_i64(reth);
898     }
899 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
900     for (i = 0; i < nargs; ++i) {
901         int is_64bit = sizemask & (1 << (i+1)*2);
902         if (!is_64bit) {
903             TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
904             tcg_temp_free_i64(temp);
905         }
906     }
907 #endif /* TCG_TARGET_EXTEND_ARGS */
908 }
909 
910 static void tcg_reg_alloc_start(TCGContext *s)
911 {
912     int i;
913     TCGTemp *ts;
914     for(i = 0; i < s->nb_globals; i++) {
915         ts = &s->temps[i];
916         if (ts->fixed_reg) {
917             ts->val_type = TEMP_VAL_REG;
918         } else {
919             ts->val_type = TEMP_VAL_MEM;
920         }
921     }
922     for(i = s->nb_globals; i < s->nb_temps; i++) {
923         ts = &s->temps[i];
924         if (ts->temp_local) {
925             ts->val_type = TEMP_VAL_MEM;
926         } else {
927             ts->val_type = TEMP_VAL_DEAD;
928         }
929         ts->mem_allocated = 0;
930         ts->fixed_reg = 0;
931     }
932 
933     memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
934 }
935 
936 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
937                                  TCGTemp *ts)
938 {
939     int idx = temp_idx(s, ts);
940 
941     if (idx < s->nb_globals) {
942         pstrcpy(buf, buf_size, ts->name);
943     } else if (ts->temp_local) {
944         snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
945     } else {
946         snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
947     }
948     return buf;
949 }
950 
951 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf,
952                                  int buf_size, int idx)
953 {
954     tcg_debug_assert(idx >= 0 && idx < s->nb_temps);
955     return tcg_get_arg_str_ptr(s, buf, buf_size, &s->temps[idx]);
956 }
957 
958 /* Find helper name.  */
959 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
960 {
961     const char *ret = NULL;
962     if (s->helpers) {
963         TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
964         if (info) {
965             ret = info->name;
966         }
967     }
968     return ret;
969 }
970 
971 static const char * const cond_name[] =
972 {
973     [TCG_COND_NEVER] = "never",
974     [TCG_COND_ALWAYS] = "always",
975     [TCG_COND_EQ] = "eq",
976     [TCG_COND_NE] = "ne",
977     [TCG_COND_LT] = "lt",
978     [TCG_COND_GE] = "ge",
979     [TCG_COND_LE] = "le",
980     [TCG_COND_GT] = "gt",
981     [TCG_COND_LTU] = "ltu",
982     [TCG_COND_GEU] = "geu",
983     [TCG_COND_LEU] = "leu",
984     [TCG_COND_GTU] = "gtu"
985 };
986 
987 static const char * const ldst_name[] =
988 {
989     [MO_UB]   = "ub",
990     [MO_SB]   = "sb",
991     [MO_LEUW] = "leuw",
992     [MO_LESW] = "lesw",
993     [MO_LEUL] = "leul",
994     [MO_LESL] = "lesl",
995     [MO_LEQ]  = "leq",
996     [MO_BEUW] = "beuw",
997     [MO_BESW] = "besw",
998     [MO_BEUL] = "beul",
999     [MO_BESL] = "besl",
1000     [MO_BEQ]  = "beq",
1001 };
1002 
1003 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1004 #ifdef ALIGNED_ONLY
1005     [MO_UNALN >> MO_ASHIFT]    = "un+",
1006     [MO_ALIGN >> MO_ASHIFT]    = "",
1007 #else
1008     [MO_UNALN >> MO_ASHIFT]    = "",
1009     [MO_ALIGN >> MO_ASHIFT]    = "al+",
1010 #endif
1011     [MO_ALIGN_2 >> MO_ASHIFT]  = "al2+",
1012     [MO_ALIGN_4 >> MO_ASHIFT]  = "al4+",
1013     [MO_ALIGN_8 >> MO_ASHIFT]  = "al8+",
1014     [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1015     [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1016     [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1017 };
1018 
1019 void tcg_dump_ops(TCGContext *s)
1020 {
1021     char buf[128];
1022     TCGOp *op;
1023     int oi;
1024 
1025     for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) {
1026         int i, k, nb_oargs, nb_iargs, nb_cargs;
1027         const TCGOpDef *def;
1028         const TCGArg *args;
1029         TCGOpcode c;
1030         int col = 0;
1031 
1032         op = &s->gen_op_buf[oi];
1033         c = op->opc;
1034         def = &tcg_op_defs[c];
1035         args = &s->gen_opparam_buf[op->args];
1036 
1037         if (c == INDEX_op_insn_start) {
1038             col += qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : "");
1039 
1040             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1041                 target_ulong a;
1042 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1043                 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1044 #else
1045                 a = args[i];
1046 #endif
1047                 col += qemu_log(" " TARGET_FMT_lx, a);
1048             }
1049         } else if (c == INDEX_op_call) {
1050             /* variable number of arguments */
1051             nb_oargs = op->callo;
1052             nb_iargs = op->calli;
1053             nb_cargs = def->nb_cargs;
1054 
1055             /* function name, flags, out args */
1056             col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1057                             tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1058                             args[nb_oargs + nb_iargs + 1], nb_oargs);
1059             for (i = 0; i < nb_oargs; i++) {
1060                 col += qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1061                                                            args[i]));
1062             }
1063             for (i = 0; i < nb_iargs; i++) {
1064                 TCGArg arg = args[nb_oargs + i];
1065                 const char *t = "<dummy>";
1066                 if (arg != TCG_CALL_DUMMY_ARG) {
1067                     t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1068                 }
1069                 col += qemu_log(",%s", t);
1070             }
1071         } else {
1072             col += qemu_log(" %s ", def->name);
1073 
1074             nb_oargs = def->nb_oargs;
1075             nb_iargs = def->nb_iargs;
1076             nb_cargs = def->nb_cargs;
1077 
1078             k = 0;
1079             for (i = 0; i < nb_oargs; i++) {
1080                 if (k != 0) {
1081                     col += qemu_log(",");
1082                 }
1083                 col += qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1084                                                           args[k++]));
1085             }
1086             for (i = 0; i < nb_iargs; i++) {
1087                 if (k != 0) {
1088                     col += qemu_log(",");
1089                 }
1090                 col += qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1091                                                           args[k++]));
1092             }
1093             switch (c) {
1094             case INDEX_op_brcond_i32:
1095             case INDEX_op_setcond_i32:
1096             case INDEX_op_movcond_i32:
1097             case INDEX_op_brcond2_i32:
1098             case INDEX_op_setcond2_i32:
1099             case INDEX_op_brcond_i64:
1100             case INDEX_op_setcond_i64:
1101             case INDEX_op_movcond_i64:
1102                 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1103                     col += qemu_log(",%s", cond_name[args[k++]]);
1104                 } else {
1105                     col += qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1106                 }
1107                 i = 1;
1108                 break;
1109             case INDEX_op_qemu_ld_i32:
1110             case INDEX_op_qemu_st_i32:
1111             case INDEX_op_qemu_ld_i64:
1112             case INDEX_op_qemu_st_i64:
1113                 {
1114                     TCGMemOpIdx oi = args[k++];
1115                     TCGMemOp op = get_memop(oi);
1116                     unsigned ix = get_mmuidx(oi);
1117 
1118                     if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1119                         col += qemu_log(",$0x%x,%u", op, ix);
1120                     } else {
1121                         const char *s_al, *s_op;
1122                         s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
1123                         s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1124                         col += qemu_log(",%s%s,%u", s_al, s_op, ix);
1125                     }
1126                     i = 1;
1127                 }
1128                 break;
1129             default:
1130                 i = 0;
1131                 break;
1132             }
1133             switch (c) {
1134             case INDEX_op_set_label:
1135             case INDEX_op_br:
1136             case INDEX_op_brcond_i32:
1137             case INDEX_op_brcond_i64:
1138             case INDEX_op_brcond2_i32:
1139                 col += qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1140                 i++, k++;
1141                 break;
1142             default:
1143                 break;
1144             }
1145             for (; i < nb_cargs; i++, k++) {
1146                 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1147             }
1148         }
1149         if (op->life) {
1150             unsigned life = op->life;
1151 
1152             for (; col < 48; ++col) {
1153                 putc(' ', qemu_logfile);
1154             }
1155 
1156             if (life & (SYNC_ARG * 3)) {
1157                 qemu_log("  sync:");
1158                 for (i = 0; i < 2; ++i) {
1159                     if (life & (SYNC_ARG << i)) {
1160                         qemu_log(" %d", i);
1161                     }
1162                 }
1163             }
1164             life /= DEAD_ARG;
1165             if (life) {
1166                 qemu_log("  dead:");
1167                 for (i = 0; life; ++i, life >>= 1) {
1168                     if (life & 1) {
1169                         qemu_log(" %d", i);
1170                     }
1171                 }
1172             }
1173         }
1174         qemu_log("\n");
1175     }
1176 }
1177 
1178 /* we give more priority to constraints with less registers */
1179 static int get_constraint_priority(const TCGOpDef *def, int k)
1180 {
1181     const TCGArgConstraint *arg_ct;
1182 
1183     int i, n;
1184     arg_ct = &def->args_ct[k];
1185     if (arg_ct->ct & TCG_CT_ALIAS) {
1186         /* an alias is equivalent to a single register */
1187         n = 1;
1188     } else {
1189         if (!(arg_ct->ct & TCG_CT_REG))
1190             return 0;
1191         n = 0;
1192         for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1193             if (tcg_regset_test_reg(arg_ct->u.regs, i))
1194                 n++;
1195         }
1196     }
1197     return TCG_TARGET_NB_REGS - n + 1;
1198 }
1199 
1200 /* sort from highest priority to lowest */
1201 static void sort_constraints(TCGOpDef *def, int start, int n)
1202 {
1203     int i, j, p1, p2, tmp;
1204 
1205     for(i = 0; i < n; i++)
1206         def->sorted_args[start + i] = start + i;
1207     if (n <= 1)
1208         return;
1209     for(i = 0; i < n - 1; i++) {
1210         for(j = i + 1; j < n; j++) {
1211             p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1212             p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1213             if (p1 < p2) {
1214                 tmp = def->sorted_args[start + i];
1215                 def->sorted_args[start + i] = def->sorted_args[start + j];
1216                 def->sorted_args[start + j] = tmp;
1217             }
1218         }
1219     }
1220 }
1221 
1222 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1223 {
1224     TCGOpcode op;
1225     TCGOpDef *def;
1226     const char *ct_str;
1227     int i, nb_args;
1228 
1229     for(;;) {
1230         if (tdefs->op == (TCGOpcode)-1)
1231             break;
1232         op = tdefs->op;
1233         tcg_debug_assert((unsigned)op < NB_OPS);
1234         def = &tcg_op_defs[op];
1235 #if defined(CONFIG_DEBUG_TCG)
1236         /* Duplicate entry in op definitions? */
1237         tcg_debug_assert(!def->used);
1238         def->used = 1;
1239 #endif
1240         nb_args = def->nb_iargs + def->nb_oargs;
1241         for(i = 0; i < nb_args; i++) {
1242             ct_str = tdefs->args_ct_str[i];
1243             /* Incomplete TCGTargetOpDef entry? */
1244             tcg_debug_assert(ct_str != NULL);
1245             tcg_regset_clear(def->args_ct[i].u.regs);
1246             def->args_ct[i].ct = 0;
1247             if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1248                 int oarg;
1249                 oarg = ct_str[0] - '0';
1250                 tcg_debug_assert(oarg < def->nb_oargs);
1251                 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1252                 /* TCG_CT_ALIAS is for the output arguments. The input
1253                    argument is tagged with TCG_CT_IALIAS. */
1254                 def->args_ct[i] = def->args_ct[oarg];
1255                 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1256                 def->args_ct[oarg].alias_index = i;
1257                 def->args_ct[i].ct |= TCG_CT_IALIAS;
1258                 def->args_ct[i].alias_index = oarg;
1259             } else {
1260                 for(;;) {
1261                     if (*ct_str == '\0')
1262                         break;
1263                     switch(*ct_str) {
1264                     case 'i':
1265                         def->args_ct[i].ct |= TCG_CT_CONST;
1266                         ct_str++;
1267                         break;
1268                     default:
1269                         if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1270                             fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1271                                     ct_str, i, def->name);
1272                             exit(1);
1273                         }
1274                     }
1275                 }
1276             }
1277         }
1278 
1279         /* TCGTargetOpDef entry with too much information? */
1280         tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1281 
1282         /* sort the constraints (XXX: this is just an heuristic) */
1283         sort_constraints(def, 0, def->nb_oargs);
1284         sort_constraints(def, def->nb_oargs, def->nb_iargs);
1285 
1286 #if 0
1287         {
1288             int i;
1289 
1290             printf("%s: sorted=", def->name);
1291             for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1292                 printf(" %d", def->sorted_args[i]);
1293             printf("\n");
1294         }
1295 #endif
1296         tdefs++;
1297     }
1298 
1299 #if defined(CONFIG_DEBUG_TCG)
1300     i = 0;
1301     for (op = 0; op < tcg_op_defs_max; op++) {
1302         const TCGOpDef *def = &tcg_op_defs[op];
1303         if (def->flags & TCG_OPF_NOT_PRESENT) {
1304             /* Wrong entry in op definitions? */
1305             if (def->used) {
1306                 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1307                 i = 1;
1308             }
1309         } else {
1310             /* Missing entry in op definitions? */
1311             if (!def->used) {
1312                 fprintf(stderr, "Missing op definition for %s\n", def->name);
1313                 i = 1;
1314             }
1315         }
1316     }
1317     if (i == 1) {
1318         tcg_abort();
1319     }
1320 #endif
1321 }
1322 
1323 void tcg_op_remove(TCGContext *s, TCGOp *op)
1324 {
1325     int next = op->next;
1326     int prev = op->prev;
1327 
1328     /* We should never attempt to remove the list terminator.  */
1329     tcg_debug_assert(op != &s->gen_op_buf[0]);
1330 
1331     s->gen_op_buf[next].prev = prev;
1332     s->gen_op_buf[prev].next = next;
1333 
1334     memset(op, 0, sizeof(*op));
1335 
1336 #ifdef CONFIG_PROFILER
1337     s->del_op_count++;
1338 #endif
1339 }
1340 
1341 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
1342                             TCGOpcode opc, int nargs)
1343 {
1344     int oi = s->gen_next_op_idx;
1345     int pi = s->gen_next_parm_idx;
1346     int prev = old_op->prev;
1347     int next = old_op - s->gen_op_buf;
1348     TCGOp *new_op;
1349 
1350     tcg_debug_assert(oi < OPC_BUF_SIZE);
1351     tcg_debug_assert(pi + nargs <= OPPARAM_BUF_SIZE);
1352     s->gen_next_op_idx = oi + 1;
1353     s->gen_next_parm_idx = pi + nargs;
1354 
1355     new_op = &s->gen_op_buf[oi];
1356     *new_op = (TCGOp){
1357         .opc = opc,
1358         .args = pi,
1359         .prev = prev,
1360         .next = next
1361     };
1362     s->gen_op_buf[prev].next = oi;
1363     old_op->prev = oi;
1364 
1365     return new_op;
1366 }
1367 
1368 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
1369                            TCGOpcode opc, int nargs)
1370 {
1371     int oi = s->gen_next_op_idx;
1372     int pi = s->gen_next_parm_idx;
1373     int prev = old_op - s->gen_op_buf;
1374     int next = old_op->next;
1375     TCGOp *new_op;
1376 
1377     tcg_debug_assert(oi < OPC_BUF_SIZE);
1378     tcg_debug_assert(pi + nargs <= OPPARAM_BUF_SIZE);
1379     s->gen_next_op_idx = oi + 1;
1380     s->gen_next_parm_idx = pi + nargs;
1381 
1382     new_op = &s->gen_op_buf[oi];
1383     *new_op = (TCGOp){
1384         .opc = opc,
1385         .args = pi,
1386         .prev = prev,
1387         .next = next
1388     };
1389     s->gen_op_buf[next].prev = oi;
1390     old_op->next = oi;
1391 
1392     return new_op;
1393 }
1394 
1395 #define TS_DEAD  1
1396 #define TS_MEM   2
1397 
1398 #define IS_DEAD_ARG(n)   (arg_life & (DEAD_ARG << (n)))
1399 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
1400 
1401 /* liveness analysis: end of function: all temps are dead, and globals
1402    should be in memory. */
1403 static inline void tcg_la_func_end(TCGContext *s, uint8_t *temp_state)
1404 {
1405     memset(temp_state, TS_DEAD | TS_MEM, s->nb_globals);
1406     memset(temp_state + s->nb_globals, TS_DEAD, s->nb_temps - s->nb_globals);
1407 }
1408 
1409 /* liveness analysis: end of basic block: all temps are dead, globals
1410    and local temps should be in memory. */
1411 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *temp_state)
1412 {
1413     int i, n;
1414 
1415     tcg_la_func_end(s, temp_state);
1416     for (i = s->nb_globals, n = s->nb_temps; i < n; i++) {
1417         if (s->temps[i].temp_local) {
1418             temp_state[i] |= TS_MEM;
1419         }
1420     }
1421 }
1422 
1423 /* Liveness analysis : update the opc_arg_life array to tell if a
1424    given input arguments is dead. Instructions updating dead
1425    temporaries are removed. */
1426 static void liveness_pass_1(TCGContext *s, uint8_t *temp_state)
1427 {
1428     int nb_globals = s->nb_globals;
1429     int oi, oi_prev;
1430 
1431     tcg_la_func_end(s, temp_state);
1432 
1433     for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) {
1434         int i, nb_iargs, nb_oargs;
1435         TCGOpcode opc_new, opc_new2;
1436         bool have_opc_new2;
1437         TCGLifeData arg_life = 0;
1438         TCGArg arg;
1439 
1440         TCGOp * const op = &s->gen_op_buf[oi];
1441         TCGArg * const args = &s->gen_opparam_buf[op->args];
1442         TCGOpcode opc = op->opc;
1443         const TCGOpDef *def = &tcg_op_defs[opc];
1444 
1445         oi_prev = op->prev;
1446 
1447         switch (opc) {
1448         case INDEX_op_call:
1449             {
1450                 int call_flags;
1451 
1452                 nb_oargs = op->callo;
1453                 nb_iargs = op->calli;
1454                 call_flags = args[nb_oargs + nb_iargs + 1];
1455 
1456                 /* pure functions can be removed if their result is unused */
1457                 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1458                     for (i = 0; i < nb_oargs; i++) {
1459                         arg = args[i];
1460                         if (temp_state[arg] != TS_DEAD) {
1461                             goto do_not_remove_call;
1462                         }
1463                     }
1464                     goto do_remove;
1465                 } else {
1466                 do_not_remove_call:
1467 
1468                     /* output args are dead */
1469                     for (i = 0; i < nb_oargs; i++) {
1470                         arg = args[i];
1471                         if (temp_state[arg] & TS_DEAD) {
1472                             arg_life |= DEAD_ARG << i;
1473                         }
1474                         if (temp_state[arg] & TS_MEM) {
1475                             arg_life |= SYNC_ARG << i;
1476                         }
1477                         temp_state[arg] = TS_DEAD;
1478                     }
1479 
1480                     if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1481                                         TCG_CALL_NO_READ_GLOBALS))) {
1482                         /* globals should go back to memory */
1483                         memset(temp_state, TS_DEAD | TS_MEM, nb_globals);
1484                     } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1485                         /* globals should be synced to memory */
1486                         for (i = 0; i < nb_globals; i++) {
1487                             temp_state[i] |= TS_MEM;
1488                         }
1489                     }
1490 
1491                     /* record arguments that die in this helper */
1492                     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1493                         arg = args[i];
1494                         if (arg != TCG_CALL_DUMMY_ARG) {
1495                             if (temp_state[arg] & TS_DEAD) {
1496                                 arg_life |= DEAD_ARG << i;
1497                             }
1498                         }
1499                     }
1500                     /* input arguments are live for preceding opcodes */
1501                     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1502                         arg = args[i];
1503                         if (arg != TCG_CALL_DUMMY_ARG) {
1504                             temp_state[arg] &= ~TS_DEAD;
1505                         }
1506                     }
1507                 }
1508             }
1509             break;
1510         case INDEX_op_insn_start:
1511             break;
1512         case INDEX_op_discard:
1513             /* mark the temporary as dead */
1514             temp_state[args[0]] = TS_DEAD;
1515             break;
1516 
1517         case INDEX_op_add2_i32:
1518             opc_new = INDEX_op_add_i32;
1519             goto do_addsub2;
1520         case INDEX_op_sub2_i32:
1521             opc_new = INDEX_op_sub_i32;
1522             goto do_addsub2;
1523         case INDEX_op_add2_i64:
1524             opc_new = INDEX_op_add_i64;
1525             goto do_addsub2;
1526         case INDEX_op_sub2_i64:
1527             opc_new = INDEX_op_sub_i64;
1528         do_addsub2:
1529             nb_iargs = 4;
1530             nb_oargs = 2;
1531             /* Test if the high part of the operation is dead, but not
1532                the low part.  The result can be optimized to a simple
1533                add or sub.  This happens often for x86_64 guest when the
1534                cpu mode is set to 32 bit.  */
1535             if (temp_state[args[1]] == TS_DEAD) {
1536                 if (temp_state[args[0]] == TS_DEAD) {
1537                     goto do_remove;
1538                 }
1539                 /* Replace the opcode and adjust the args in place,
1540                    leaving 3 unused args at the end.  */
1541                 op->opc = opc = opc_new;
1542                 args[1] = args[2];
1543                 args[2] = args[4];
1544                 /* Fall through and mark the single-word operation live.  */
1545                 nb_iargs = 2;
1546                 nb_oargs = 1;
1547             }
1548             goto do_not_remove;
1549 
1550         case INDEX_op_mulu2_i32:
1551             opc_new = INDEX_op_mul_i32;
1552             opc_new2 = INDEX_op_muluh_i32;
1553             have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1554             goto do_mul2;
1555         case INDEX_op_muls2_i32:
1556             opc_new = INDEX_op_mul_i32;
1557             opc_new2 = INDEX_op_mulsh_i32;
1558             have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1559             goto do_mul2;
1560         case INDEX_op_mulu2_i64:
1561             opc_new = INDEX_op_mul_i64;
1562             opc_new2 = INDEX_op_muluh_i64;
1563             have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1564             goto do_mul2;
1565         case INDEX_op_muls2_i64:
1566             opc_new = INDEX_op_mul_i64;
1567             opc_new2 = INDEX_op_mulsh_i64;
1568             have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1569             goto do_mul2;
1570         do_mul2:
1571             nb_iargs = 2;
1572             nb_oargs = 2;
1573             if (temp_state[args[1]] == TS_DEAD) {
1574                 if (temp_state[args[0]] == TS_DEAD) {
1575                     /* Both parts of the operation are dead.  */
1576                     goto do_remove;
1577                 }
1578                 /* The high part of the operation is dead; generate the low. */
1579                 op->opc = opc = opc_new;
1580                 args[1] = args[2];
1581                 args[2] = args[3];
1582             } else if (temp_state[args[0]] == TS_DEAD && have_opc_new2) {
1583                 /* The low part of the operation is dead; generate the high. */
1584                 op->opc = opc = opc_new2;
1585                 args[0] = args[1];
1586                 args[1] = args[2];
1587                 args[2] = args[3];
1588             } else {
1589                 goto do_not_remove;
1590             }
1591             /* Mark the single-word operation live.  */
1592             nb_oargs = 1;
1593             goto do_not_remove;
1594 
1595         default:
1596             /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1597             nb_iargs = def->nb_iargs;
1598             nb_oargs = def->nb_oargs;
1599 
1600             /* Test if the operation can be removed because all
1601                its outputs are dead. We assume that nb_oargs == 0
1602                implies side effects */
1603             if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1604                 for (i = 0; i < nb_oargs; i++) {
1605                     if (temp_state[args[i]] != TS_DEAD) {
1606                         goto do_not_remove;
1607                     }
1608                 }
1609             do_remove:
1610                 tcg_op_remove(s, op);
1611             } else {
1612             do_not_remove:
1613                 /* output args are dead */
1614                 for (i = 0; i < nb_oargs; i++) {
1615                     arg = args[i];
1616                     if (temp_state[arg] & TS_DEAD) {
1617                         arg_life |= DEAD_ARG << i;
1618                     }
1619                     if (temp_state[arg] & TS_MEM) {
1620                         arg_life |= SYNC_ARG << i;
1621                     }
1622                     temp_state[arg] = TS_DEAD;
1623                 }
1624 
1625                 /* if end of basic block, update */
1626                 if (def->flags & TCG_OPF_BB_END) {
1627                     tcg_la_bb_end(s, temp_state);
1628                 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1629                     /* globals should be synced to memory */
1630                     for (i = 0; i < nb_globals; i++) {
1631                         temp_state[i] |= TS_MEM;
1632                     }
1633                 }
1634 
1635                 /* record arguments that die in this opcode */
1636                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1637                     arg = args[i];
1638                     if (temp_state[arg] & TS_DEAD) {
1639                         arg_life |= DEAD_ARG << i;
1640                     }
1641                 }
1642                 /* input arguments are live for preceding opcodes */
1643                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1644                     temp_state[args[i]] &= ~TS_DEAD;
1645                 }
1646             }
1647             break;
1648         }
1649         op->life = arg_life;
1650     }
1651 }
1652 
1653 /* Liveness analysis: Convert indirect regs to direct temporaries.  */
1654 static bool liveness_pass_2(TCGContext *s, uint8_t *temp_state)
1655 {
1656     int nb_globals = s->nb_globals;
1657     int16_t *dir_temps;
1658     int i, oi, oi_next;
1659     bool changes = false;
1660 
1661     dir_temps = tcg_malloc(nb_globals * sizeof(int16_t));
1662     memset(dir_temps, 0, nb_globals * sizeof(int16_t));
1663 
1664     /* Create a temporary for each indirect global.  */
1665     for (i = 0; i < nb_globals; ++i) {
1666         TCGTemp *its = &s->temps[i];
1667         if (its->indirect_reg) {
1668             TCGTemp *dts = tcg_temp_alloc(s);
1669             dts->type = its->type;
1670             dts->base_type = its->base_type;
1671             dir_temps[i] = temp_idx(s, dts);
1672         }
1673     }
1674 
1675     memset(temp_state, TS_DEAD, nb_globals);
1676 
1677     for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
1678         TCGOp *op = &s->gen_op_buf[oi];
1679         TCGArg *args = &s->gen_opparam_buf[op->args];
1680         TCGOpcode opc = op->opc;
1681         const TCGOpDef *def = &tcg_op_defs[opc];
1682         TCGLifeData arg_life = op->life;
1683         int nb_iargs, nb_oargs, call_flags;
1684         TCGArg arg, dir;
1685 
1686         oi_next = op->next;
1687 
1688         if (opc == INDEX_op_call) {
1689             nb_oargs = op->callo;
1690             nb_iargs = op->calli;
1691             call_flags = args[nb_oargs + nb_iargs + 1];
1692         } else {
1693             nb_iargs = def->nb_iargs;
1694             nb_oargs = def->nb_oargs;
1695 
1696             /* Set flags similar to how calls require.  */
1697             if (def->flags & TCG_OPF_BB_END) {
1698                 /* Like writing globals: save_globals */
1699                 call_flags = 0;
1700             } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1701                 /* Like reading globals: sync_globals */
1702                 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
1703             } else {
1704                 /* No effect on globals.  */
1705                 call_flags = (TCG_CALL_NO_READ_GLOBALS |
1706                               TCG_CALL_NO_WRITE_GLOBALS);
1707             }
1708         }
1709 
1710         /* Make sure that input arguments are available.  */
1711         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1712             arg = args[i];
1713             /* Note this unsigned test catches TCG_CALL_ARG_DUMMY too.  */
1714             if (arg < nb_globals) {
1715                 dir = dir_temps[arg];
1716                 if (dir != 0 && temp_state[arg] == TS_DEAD) {
1717                     TCGTemp *its = &s->temps[arg];
1718                     TCGOpcode lopc = (its->type == TCG_TYPE_I32
1719                                       ? INDEX_op_ld_i32
1720                                       : INDEX_op_ld_i64);
1721                     TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
1722                     TCGArg *largs = &s->gen_opparam_buf[lop->args];
1723 
1724                     largs[0] = dir;
1725                     largs[1] = temp_idx(s, its->mem_base);
1726                     largs[2] = its->mem_offset;
1727 
1728                     /* Loaded, but synced with memory.  */
1729                     temp_state[arg] = TS_MEM;
1730                 }
1731             }
1732         }
1733 
1734         /* Perform input replacement, and mark inputs that became dead.
1735            No action is required except keeping temp_state up to date
1736            so that we reload when needed.  */
1737         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1738             arg = args[i];
1739             if (arg < nb_globals) {
1740                 dir = dir_temps[arg];
1741                 if (dir != 0) {
1742                     args[i] = dir;
1743                     changes = true;
1744                     if (IS_DEAD_ARG(i)) {
1745                         temp_state[arg] = TS_DEAD;
1746                     }
1747                 }
1748             }
1749         }
1750 
1751         /* Liveness analysis should ensure that the following are
1752            all correct, for call sites and basic block end points.  */
1753         if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
1754             /* Nothing to do */
1755         } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
1756             for (i = 0; i < nb_globals; ++i) {
1757                 /* Liveness should see that globals are synced back,
1758                    that is, either TS_DEAD or TS_MEM.  */
1759                 tcg_debug_assert(dir_temps[i] == 0
1760                                  || temp_state[i] != 0);
1761             }
1762         } else {
1763             for (i = 0; i < nb_globals; ++i) {
1764                 /* Liveness should see that globals are saved back,
1765                    that is, TS_DEAD, waiting to be reloaded.  */
1766                 tcg_debug_assert(dir_temps[i] == 0
1767                                  || temp_state[i] == TS_DEAD);
1768             }
1769         }
1770 
1771         /* Outputs become available.  */
1772         for (i = 0; i < nb_oargs; i++) {
1773             arg = args[i];
1774             if (arg >= nb_globals) {
1775                 continue;
1776             }
1777             dir = dir_temps[arg];
1778             if (dir == 0) {
1779                 continue;
1780             }
1781             args[i] = dir;
1782             changes = true;
1783 
1784             /* The output is now live and modified.  */
1785             temp_state[arg] = 0;
1786 
1787             /* Sync outputs upon their last write.  */
1788             if (NEED_SYNC_ARG(i)) {
1789                 TCGTemp *its = &s->temps[arg];
1790                 TCGOpcode sopc = (its->type == TCG_TYPE_I32
1791                                   ? INDEX_op_st_i32
1792                                   : INDEX_op_st_i64);
1793                 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
1794                 TCGArg *sargs = &s->gen_opparam_buf[sop->args];
1795 
1796                 sargs[0] = dir;
1797                 sargs[1] = temp_idx(s, its->mem_base);
1798                 sargs[2] = its->mem_offset;
1799 
1800                 temp_state[arg] = TS_MEM;
1801             }
1802             /* Drop outputs that are dead.  */
1803             if (IS_DEAD_ARG(i)) {
1804                 temp_state[arg] = TS_DEAD;
1805             }
1806         }
1807     }
1808 
1809     return changes;
1810 }
1811 
1812 #ifdef CONFIG_DEBUG_TCG
1813 static void dump_regs(TCGContext *s)
1814 {
1815     TCGTemp *ts;
1816     int i;
1817     char buf[64];
1818 
1819     for(i = 0; i < s->nb_temps; i++) {
1820         ts = &s->temps[i];
1821         printf("  %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1822         switch(ts->val_type) {
1823         case TEMP_VAL_REG:
1824             printf("%s", tcg_target_reg_names[ts->reg]);
1825             break;
1826         case TEMP_VAL_MEM:
1827             printf("%d(%s)", (int)ts->mem_offset,
1828                    tcg_target_reg_names[ts->mem_base->reg]);
1829             break;
1830         case TEMP_VAL_CONST:
1831             printf("$0x%" TCG_PRIlx, ts->val);
1832             break;
1833         case TEMP_VAL_DEAD:
1834             printf("D");
1835             break;
1836         default:
1837             printf("???");
1838             break;
1839         }
1840         printf("\n");
1841     }
1842 
1843     for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1844         if (s->reg_to_temp[i] != NULL) {
1845             printf("%s: %s\n",
1846                    tcg_target_reg_names[i],
1847                    tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
1848         }
1849     }
1850 }
1851 
1852 static void check_regs(TCGContext *s)
1853 {
1854     int reg;
1855     int k;
1856     TCGTemp *ts;
1857     char buf[64];
1858 
1859     for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1860         ts = s->reg_to_temp[reg];
1861         if (ts != NULL) {
1862             if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
1863                 printf("Inconsistency for register %s:\n",
1864                        tcg_target_reg_names[reg]);
1865                 goto fail;
1866             }
1867         }
1868     }
1869     for (k = 0; k < s->nb_temps; k++) {
1870         ts = &s->temps[k];
1871         if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
1872             && s->reg_to_temp[ts->reg] != ts) {
1873             printf("Inconsistency for temp %s:\n",
1874                    tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
1875         fail:
1876             printf("reg state:\n");
1877             dump_regs(s);
1878             tcg_abort();
1879         }
1880     }
1881 }
1882 #endif
1883 
1884 static void temp_allocate_frame(TCGContext *s, int temp)
1885 {
1886     TCGTemp *ts;
1887     ts = &s->temps[temp];
1888 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1889     /* Sparc64 stack is accessed with offset of 2047 */
1890     s->current_frame_offset = (s->current_frame_offset +
1891                                (tcg_target_long)sizeof(tcg_target_long) - 1) &
1892         ~(sizeof(tcg_target_long) - 1);
1893 #endif
1894     if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1895         s->frame_end) {
1896         tcg_abort();
1897     }
1898     ts->mem_offset = s->current_frame_offset;
1899     ts->mem_base = s->frame_temp;
1900     ts->mem_allocated = 1;
1901     s->current_frame_offset += sizeof(tcg_target_long);
1902 }
1903 
1904 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
1905 
1906 /* Mark a temporary as free or dead.  If 'free_or_dead' is negative,
1907    mark it free; otherwise mark it dead.  */
1908 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
1909 {
1910     if (ts->fixed_reg) {
1911         return;
1912     }
1913     if (ts->val_type == TEMP_VAL_REG) {
1914         s->reg_to_temp[ts->reg] = NULL;
1915     }
1916     ts->val_type = (free_or_dead < 0
1917                     || ts->temp_local
1918                     || temp_idx(s, ts) < s->nb_globals
1919                     ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
1920 }
1921 
1922 /* Mark a temporary as dead.  */
1923 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
1924 {
1925     temp_free_or_dead(s, ts, 1);
1926 }
1927 
1928 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
1929    registers needs to be allocated to store a constant.  If 'free_or_dead'
1930    is non-zero, subsequently release the temporary; if it is positive, the
1931    temp is dead; if it is negative, the temp is free.  */
1932 static void temp_sync(TCGContext *s, TCGTemp *ts,
1933                       TCGRegSet allocated_regs, int free_or_dead)
1934 {
1935     if (ts->fixed_reg) {
1936         return;
1937     }
1938     if (!ts->mem_coherent) {
1939         if (!ts->mem_allocated) {
1940             temp_allocate_frame(s, temp_idx(s, ts));
1941         }
1942         switch (ts->val_type) {
1943         case TEMP_VAL_CONST:
1944             /* If we're going to free the temp immediately, then we won't
1945                require it later in a register, so attempt to store the
1946                constant to memory directly.  */
1947             if (free_or_dead
1948                 && tcg_out_sti(s, ts->type, ts->val,
1949                                ts->mem_base->reg, ts->mem_offset)) {
1950                 break;
1951             }
1952             temp_load(s, ts, tcg_target_available_regs[ts->type],
1953                       allocated_regs);
1954             /* fallthrough */
1955 
1956         case TEMP_VAL_REG:
1957             tcg_out_st(s, ts->type, ts->reg,
1958                        ts->mem_base->reg, ts->mem_offset);
1959             break;
1960 
1961         case TEMP_VAL_MEM:
1962             break;
1963 
1964         case TEMP_VAL_DEAD:
1965         default:
1966             tcg_abort();
1967         }
1968         ts->mem_coherent = 1;
1969     }
1970     if (free_or_dead) {
1971         temp_free_or_dead(s, ts, free_or_dead);
1972     }
1973 }
1974 
1975 /* free register 'reg' by spilling the corresponding temporary if necessary */
1976 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
1977 {
1978     TCGTemp *ts = s->reg_to_temp[reg];
1979     if (ts != NULL) {
1980         temp_sync(s, ts, allocated_regs, -1);
1981     }
1982 }
1983 
1984 /* Allocate a register belonging to reg1 & ~reg2 */
1985 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
1986                             TCGRegSet allocated_regs, bool rev)
1987 {
1988     int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
1989     const int *order;
1990     TCGReg reg;
1991     TCGRegSet reg_ct;
1992 
1993     tcg_regset_andnot(reg_ct, desired_regs, allocated_regs);
1994     order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
1995 
1996     /* first try free registers */
1997     for(i = 0; i < n; i++) {
1998         reg = order[i];
1999         if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
2000             return reg;
2001     }
2002 
2003     /* XXX: do better spill choice */
2004     for(i = 0; i < n; i++) {
2005         reg = order[i];
2006         if (tcg_regset_test_reg(reg_ct, reg)) {
2007             tcg_reg_free(s, reg, allocated_regs);
2008             return reg;
2009         }
2010     }
2011 
2012     tcg_abort();
2013 }
2014 
2015 /* Make sure the temporary is in a register.  If needed, allocate the register
2016    from DESIRED while avoiding ALLOCATED.  */
2017 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
2018                       TCGRegSet allocated_regs)
2019 {
2020     TCGReg reg;
2021 
2022     switch (ts->val_type) {
2023     case TEMP_VAL_REG:
2024         return;
2025     case TEMP_VAL_CONST:
2026         reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2027         tcg_out_movi(s, ts->type, reg, ts->val);
2028         ts->mem_coherent = 0;
2029         break;
2030     case TEMP_VAL_MEM:
2031         reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
2032         tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
2033         ts->mem_coherent = 1;
2034         break;
2035     case TEMP_VAL_DEAD:
2036     default:
2037         tcg_abort();
2038     }
2039     ts->reg = reg;
2040     ts->val_type = TEMP_VAL_REG;
2041     s->reg_to_temp[reg] = ts;
2042 }
2043 
2044 /* Save a temporary to memory. 'allocated_regs' is used in case a
2045    temporary registers needs to be allocated to store a constant.  */
2046 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
2047 {
2048     /* The liveness analysis already ensures that globals are back
2049        in memory. Keep an tcg_debug_assert for safety. */
2050     tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
2051 }
2052 
2053 /* save globals to their canonical location and assume they can be
2054    modified be the following code. 'allocated_regs' is used in case a
2055    temporary registers needs to be allocated to store a constant. */
2056 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
2057 {
2058     int i;
2059 
2060     for (i = 0; i < s->nb_globals; i++) {
2061         temp_save(s, &s->temps[i], allocated_regs);
2062     }
2063 }
2064 
2065 /* sync globals to their canonical location and assume they can be
2066    read by the following code. 'allocated_regs' is used in case a
2067    temporary registers needs to be allocated to store a constant. */
2068 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
2069 {
2070     int i;
2071 
2072     for (i = 0; i < s->nb_globals; i++) {
2073         TCGTemp *ts = &s->temps[i];
2074         tcg_debug_assert(ts->val_type != TEMP_VAL_REG
2075                          || ts->fixed_reg
2076                          || ts->mem_coherent);
2077     }
2078 }
2079 
2080 /* at the end of a basic block, we assume all temporaries are dead and
2081    all globals are stored at their canonical location. */
2082 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
2083 {
2084     int i;
2085 
2086     for (i = s->nb_globals; i < s->nb_temps; i++) {
2087         TCGTemp *ts = &s->temps[i];
2088         if (ts->temp_local) {
2089             temp_save(s, ts, allocated_regs);
2090         } else {
2091             /* The liveness analysis already ensures that temps are dead.
2092                Keep an tcg_debug_assert for safety. */
2093             tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
2094         }
2095     }
2096 
2097     save_globals(s, allocated_regs);
2098 }
2099 
2100 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
2101                                   tcg_target_ulong val, TCGLifeData arg_life)
2102 {
2103     if (ots->fixed_reg) {
2104         /* For fixed registers, we do not do any constant propagation.  */
2105         tcg_out_movi(s, ots->type, ots->reg, val);
2106         return;
2107     }
2108 
2109     /* The movi is not explicitly generated here.  */
2110     if (ots->val_type == TEMP_VAL_REG) {
2111         s->reg_to_temp[ots->reg] = NULL;
2112     }
2113     ots->val_type = TEMP_VAL_CONST;
2114     ots->val = val;
2115     ots->mem_coherent = 0;
2116     if (NEED_SYNC_ARG(0)) {
2117         temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
2118     } else if (IS_DEAD_ARG(0)) {
2119         temp_dead(s, ots);
2120     }
2121 }
2122 
2123 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
2124                                TCGLifeData arg_life)
2125 {
2126     TCGTemp *ots = &s->temps[args[0]];
2127     tcg_target_ulong val = args[1];
2128 
2129     tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2130 }
2131 
2132 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
2133                               const TCGArg *args, TCGLifeData arg_life)
2134 {
2135     TCGRegSet allocated_regs;
2136     TCGTemp *ts, *ots;
2137     TCGType otype, itype;
2138 
2139     tcg_regset_set(allocated_regs, s->reserved_regs);
2140     ots = &s->temps[args[0]];
2141     ts = &s->temps[args[1]];
2142 
2143     /* Note that otype != itype for no-op truncation.  */
2144     otype = ots->type;
2145     itype = ts->type;
2146 
2147     if (ts->val_type == TEMP_VAL_CONST) {
2148         /* propagate constant or generate sti */
2149         tcg_target_ulong val = ts->val;
2150         if (IS_DEAD_ARG(1)) {
2151             temp_dead(s, ts);
2152         }
2153         tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2154         return;
2155     }
2156 
2157     /* If the source value is in memory we're going to be forced
2158        to have it in a register in order to perform the copy.  Copy
2159        the SOURCE value into its own register first, that way we
2160        don't have to reload SOURCE the next time it is used. */
2161     if (ts->val_type == TEMP_VAL_MEM) {
2162         temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
2163     }
2164 
2165     tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
2166     if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2167         /* mov to a non-saved dead register makes no sense (even with
2168            liveness analysis disabled). */
2169         tcg_debug_assert(NEED_SYNC_ARG(0));
2170         if (!ots->mem_allocated) {
2171             temp_allocate_frame(s, args[0]);
2172         }
2173         tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
2174         if (IS_DEAD_ARG(1)) {
2175             temp_dead(s, ts);
2176         }
2177         temp_dead(s, ots);
2178     } else {
2179         if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
2180             /* the mov can be suppressed */
2181             if (ots->val_type == TEMP_VAL_REG) {
2182                 s->reg_to_temp[ots->reg] = NULL;
2183             }
2184             ots->reg = ts->reg;
2185             temp_dead(s, ts);
2186         } else {
2187             if (ots->val_type != TEMP_VAL_REG) {
2188                 /* When allocating a new register, make sure to not spill the
2189                    input one. */
2190                 tcg_regset_set_reg(allocated_regs, ts->reg);
2191                 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
2192                                          allocated_regs, ots->indirect_base);
2193             }
2194             tcg_out_mov(s, otype, ots->reg, ts->reg);
2195         }
2196         ots->val_type = TEMP_VAL_REG;
2197         ots->mem_coherent = 0;
2198         s->reg_to_temp[ots->reg] = ots;
2199         if (NEED_SYNC_ARG(0)) {
2200             temp_sync(s, ots, allocated_regs, 0);
2201         }
2202     }
2203 }
2204 
2205 static void tcg_reg_alloc_op(TCGContext *s,
2206                              const TCGOpDef *def, TCGOpcode opc,
2207                              const TCGArg *args, TCGLifeData arg_life)
2208 {
2209     TCGRegSet allocated_regs;
2210     int i, k, nb_iargs, nb_oargs;
2211     TCGReg reg;
2212     TCGArg arg;
2213     const TCGArgConstraint *arg_ct;
2214     TCGTemp *ts;
2215     TCGArg new_args[TCG_MAX_OP_ARGS];
2216     int const_args[TCG_MAX_OP_ARGS];
2217 
2218     nb_oargs = def->nb_oargs;
2219     nb_iargs = def->nb_iargs;
2220 
2221     /* copy constants */
2222     memcpy(new_args + nb_oargs + nb_iargs,
2223            args + nb_oargs + nb_iargs,
2224            sizeof(TCGArg) * def->nb_cargs);
2225 
2226     /* satisfy input constraints */
2227     tcg_regset_set(allocated_regs, s->reserved_regs);
2228     for(k = 0; k < nb_iargs; k++) {
2229         i = def->sorted_args[nb_oargs + k];
2230         arg = args[i];
2231         arg_ct = &def->args_ct[i];
2232         ts = &s->temps[arg];
2233 
2234         if (ts->val_type == TEMP_VAL_CONST
2235             && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2236             /* constant is OK for instruction */
2237             const_args[i] = 1;
2238             new_args[i] = ts->val;
2239             goto iarg_end;
2240         }
2241 
2242         temp_load(s, ts, arg_ct->u.regs, allocated_regs);
2243 
2244         if (arg_ct->ct & TCG_CT_IALIAS) {
2245             if (ts->fixed_reg) {
2246                 /* if fixed register, we must allocate a new register
2247                    if the alias is not the same register */
2248                 if (arg != args[arg_ct->alias_index])
2249                     goto allocate_in_reg;
2250             } else {
2251                 /* if the input is aliased to an output and if it is
2252                    not dead after the instruction, we must allocate
2253                    a new register and move it */
2254                 if (!IS_DEAD_ARG(i)) {
2255                     goto allocate_in_reg;
2256                 }
2257                 /* check if the current register has already been allocated
2258                    for another input aliased to an output */
2259                 int k2, i2;
2260                 for (k2 = 0 ; k2 < k ; k2++) {
2261                     i2 = def->sorted_args[nb_oargs + k2];
2262                     if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2263                         (new_args[i2] == ts->reg)) {
2264                         goto allocate_in_reg;
2265                     }
2266                 }
2267             }
2268         }
2269         reg = ts->reg;
2270         if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2271             /* nothing to do : the constraint is satisfied */
2272         } else {
2273         allocate_in_reg:
2274             /* allocate a new register matching the constraint
2275                and move the temporary register into it */
2276             reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2277                                 ts->indirect_base);
2278             tcg_out_mov(s, ts->type, reg, ts->reg);
2279         }
2280         new_args[i] = reg;
2281         const_args[i] = 0;
2282         tcg_regset_set_reg(allocated_regs, reg);
2283     iarg_end: ;
2284     }
2285 
2286     /* mark dead temporaries and free the associated registers */
2287     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2288         if (IS_DEAD_ARG(i)) {
2289             temp_dead(s, &s->temps[args[i]]);
2290         }
2291     }
2292 
2293     if (def->flags & TCG_OPF_BB_END) {
2294         tcg_reg_alloc_bb_end(s, allocated_regs);
2295     } else {
2296         if (def->flags & TCG_OPF_CALL_CLOBBER) {
2297             /* XXX: permit generic clobber register list ? */
2298             for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2299                 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2300                     tcg_reg_free(s, i, allocated_regs);
2301                 }
2302             }
2303         }
2304         if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2305             /* sync globals if the op has side effects and might trigger
2306                an exception. */
2307             sync_globals(s, allocated_regs);
2308         }
2309 
2310         /* satisfy the output constraints */
2311         tcg_regset_set(allocated_regs, s->reserved_regs);
2312         for(k = 0; k < nb_oargs; k++) {
2313             i = def->sorted_args[k];
2314             arg = args[i];
2315             arg_ct = &def->args_ct[i];
2316             ts = &s->temps[arg];
2317             if (arg_ct->ct & TCG_CT_ALIAS) {
2318                 reg = new_args[arg_ct->alias_index];
2319             } else {
2320                 /* if fixed register, we try to use it */
2321                 reg = ts->reg;
2322                 if (ts->fixed_reg &&
2323                     tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2324                     goto oarg_end;
2325                 }
2326                 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2327                                     ts->indirect_base);
2328             }
2329             tcg_regset_set_reg(allocated_regs, reg);
2330             /* if a fixed register is used, then a move will be done afterwards */
2331             if (!ts->fixed_reg) {
2332                 if (ts->val_type == TEMP_VAL_REG) {
2333                     s->reg_to_temp[ts->reg] = NULL;
2334                 }
2335                 ts->val_type = TEMP_VAL_REG;
2336                 ts->reg = reg;
2337                 /* temp value is modified, so the value kept in memory is
2338                    potentially not the same */
2339                 ts->mem_coherent = 0;
2340                 s->reg_to_temp[reg] = ts;
2341             }
2342         oarg_end:
2343             new_args[i] = reg;
2344         }
2345     }
2346 
2347     /* emit instruction */
2348     tcg_out_op(s, opc, new_args, const_args);
2349 
2350     /* move the outputs in the correct register if needed */
2351     for(i = 0; i < nb_oargs; i++) {
2352         ts = &s->temps[args[i]];
2353         reg = new_args[i];
2354         if (ts->fixed_reg && ts->reg != reg) {
2355             tcg_out_mov(s, ts->type, ts->reg, reg);
2356         }
2357         if (NEED_SYNC_ARG(i)) {
2358             temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2359         } else if (IS_DEAD_ARG(i)) {
2360             temp_dead(s, ts);
2361         }
2362     }
2363 }
2364 
2365 #ifdef TCG_TARGET_STACK_GROWSUP
2366 #define STACK_DIR(x) (-(x))
2367 #else
2368 #define STACK_DIR(x) (x)
2369 #endif
2370 
2371 static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2372                                const TCGArg * const args, TCGLifeData arg_life)
2373 {
2374     int flags, nb_regs, i;
2375     TCGReg reg;
2376     TCGArg arg;
2377     TCGTemp *ts;
2378     intptr_t stack_offset;
2379     size_t call_stack_size;
2380     tcg_insn_unit *func_addr;
2381     int allocate_args;
2382     TCGRegSet allocated_regs;
2383 
2384     func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2385     flags = args[nb_oargs + nb_iargs + 1];
2386 
2387     nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2388     if (nb_regs > nb_iargs) {
2389         nb_regs = nb_iargs;
2390     }
2391 
2392     /* assign stack slots first */
2393     call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2394     call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2395         ~(TCG_TARGET_STACK_ALIGN - 1);
2396     allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2397     if (allocate_args) {
2398         /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2399            preallocate call stack */
2400         tcg_abort();
2401     }
2402 
2403     stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2404     for(i = nb_regs; i < nb_iargs; i++) {
2405         arg = args[nb_oargs + i];
2406 #ifdef TCG_TARGET_STACK_GROWSUP
2407         stack_offset -= sizeof(tcg_target_long);
2408 #endif
2409         if (arg != TCG_CALL_DUMMY_ARG) {
2410             ts = &s->temps[arg];
2411             temp_load(s, ts, tcg_target_available_regs[ts->type],
2412                       s->reserved_regs);
2413             tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2414         }
2415 #ifndef TCG_TARGET_STACK_GROWSUP
2416         stack_offset += sizeof(tcg_target_long);
2417 #endif
2418     }
2419 
2420     /* assign input registers */
2421     tcg_regset_set(allocated_regs, s->reserved_regs);
2422     for(i = 0; i < nb_regs; i++) {
2423         arg = args[nb_oargs + i];
2424         if (arg != TCG_CALL_DUMMY_ARG) {
2425             ts = &s->temps[arg];
2426             reg = tcg_target_call_iarg_regs[i];
2427             tcg_reg_free(s, reg, allocated_regs);
2428 
2429             if (ts->val_type == TEMP_VAL_REG) {
2430                 if (ts->reg != reg) {
2431                     tcg_out_mov(s, ts->type, reg, ts->reg);
2432                 }
2433             } else {
2434                 TCGRegSet arg_set;
2435 
2436                 tcg_regset_clear(arg_set);
2437                 tcg_regset_set_reg(arg_set, reg);
2438                 temp_load(s, ts, arg_set, allocated_regs);
2439             }
2440 
2441             tcg_regset_set_reg(allocated_regs, reg);
2442         }
2443     }
2444 
2445     /* mark dead temporaries and free the associated registers */
2446     for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2447         if (IS_DEAD_ARG(i)) {
2448             temp_dead(s, &s->temps[args[i]]);
2449         }
2450     }
2451 
2452     /* clobber call registers */
2453     for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2454         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2455             tcg_reg_free(s, i, allocated_regs);
2456         }
2457     }
2458 
2459     /* Save globals if they might be written by the helper, sync them if
2460        they might be read. */
2461     if (flags & TCG_CALL_NO_READ_GLOBALS) {
2462         /* Nothing to do */
2463     } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2464         sync_globals(s, allocated_regs);
2465     } else {
2466         save_globals(s, allocated_regs);
2467     }
2468 
2469     tcg_out_call(s, func_addr);
2470 
2471     /* assign output registers and emit moves if needed */
2472     for(i = 0; i < nb_oargs; i++) {
2473         arg = args[i];
2474         ts = &s->temps[arg];
2475         reg = tcg_target_call_oarg_regs[i];
2476         tcg_debug_assert(s->reg_to_temp[reg] == NULL);
2477 
2478         if (ts->fixed_reg) {
2479             if (ts->reg != reg) {
2480                 tcg_out_mov(s, ts->type, ts->reg, reg);
2481             }
2482         } else {
2483             if (ts->val_type == TEMP_VAL_REG) {
2484                 s->reg_to_temp[ts->reg] = NULL;
2485             }
2486             ts->val_type = TEMP_VAL_REG;
2487             ts->reg = reg;
2488             ts->mem_coherent = 0;
2489             s->reg_to_temp[reg] = ts;
2490             if (NEED_SYNC_ARG(i)) {
2491                 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2492             } else if (IS_DEAD_ARG(i)) {
2493                 temp_dead(s, ts);
2494             }
2495         }
2496     }
2497 }
2498 
2499 #ifdef CONFIG_PROFILER
2500 
2501 static int64_t tcg_table_op_count[NB_OPS];
2502 
2503 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2504 {
2505     int i;
2506 
2507     for (i = 0; i < NB_OPS; i++) {
2508         cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2509                     tcg_table_op_count[i]);
2510     }
2511 }
2512 #else
2513 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2514 {
2515     cpu_fprintf(f, "[TCG profiler not compiled]\n");
2516 }
2517 #endif
2518 
2519 
2520 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
2521 {
2522     int i, oi, oi_next, num_insns;
2523 
2524 #ifdef CONFIG_PROFILER
2525     {
2526         int n;
2527 
2528         n = s->gen_op_buf[0].prev + 1;
2529         s->op_count += n;
2530         if (n > s->op_count_max) {
2531             s->op_count_max = n;
2532         }
2533 
2534         n = s->nb_temps;
2535         s->temp_count += n;
2536         if (n > s->temp_count_max) {
2537             s->temp_count_max = n;
2538         }
2539     }
2540 #endif
2541 
2542 #ifdef DEBUG_DISAS
2543     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
2544                  && qemu_log_in_addr_range(tb->pc))) {
2545         qemu_log("OP:\n");
2546         tcg_dump_ops(s);
2547         qemu_log("\n");
2548     }
2549 #endif
2550 
2551 #ifdef CONFIG_PROFILER
2552     s->opt_time -= profile_getclock();
2553 #endif
2554 
2555 #ifdef USE_TCG_OPTIMIZATIONS
2556     tcg_optimize(s);
2557 #endif
2558 
2559 #ifdef CONFIG_PROFILER
2560     s->opt_time += profile_getclock();
2561     s->la_time -= profile_getclock();
2562 #endif
2563 
2564     {
2565         uint8_t *temp_state = tcg_malloc(s->nb_temps + s->nb_indirects);
2566 
2567         liveness_pass_1(s, temp_state);
2568 
2569         if (s->nb_indirects > 0) {
2570 #ifdef DEBUG_DISAS
2571             if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
2572                          && qemu_log_in_addr_range(tb->pc))) {
2573                 qemu_log("OP before indirect lowering:\n");
2574                 tcg_dump_ops(s);
2575                 qemu_log("\n");
2576             }
2577 #endif
2578             /* Replace indirect temps with direct temps.  */
2579             if (liveness_pass_2(s, temp_state)) {
2580                 /* If changes were made, re-run liveness.  */
2581                 liveness_pass_1(s, temp_state);
2582             }
2583         }
2584     }
2585 
2586 #ifdef CONFIG_PROFILER
2587     s->la_time += profile_getclock();
2588 #endif
2589 
2590 #ifdef DEBUG_DISAS
2591     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
2592                  && qemu_log_in_addr_range(tb->pc))) {
2593         qemu_log("OP after optimization and liveness analysis:\n");
2594         tcg_dump_ops(s);
2595         qemu_log("\n");
2596     }
2597 #endif
2598 
2599     tcg_reg_alloc_start(s);
2600 
2601     s->code_buf = tb->tc_ptr;
2602     s->code_ptr = tb->tc_ptr;
2603 
2604     tcg_out_tb_init(s);
2605 
2606     num_insns = -1;
2607     for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
2608         TCGOp * const op = &s->gen_op_buf[oi];
2609         TCGArg * const args = &s->gen_opparam_buf[op->args];
2610         TCGOpcode opc = op->opc;
2611         const TCGOpDef *def = &tcg_op_defs[opc];
2612         TCGLifeData arg_life = op->life;
2613 
2614         oi_next = op->next;
2615 #ifdef CONFIG_PROFILER
2616         tcg_table_op_count[opc]++;
2617 #endif
2618 
2619         switch (opc) {
2620         case INDEX_op_mov_i32:
2621         case INDEX_op_mov_i64:
2622             tcg_reg_alloc_mov(s, def, args, arg_life);
2623             break;
2624         case INDEX_op_movi_i32:
2625         case INDEX_op_movi_i64:
2626             tcg_reg_alloc_movi(s, args, arg_life);
2627             break;
2628         case INDEX_op_insn_start:
2629             if (num_insns >= 0) {
2630                 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2631             }
2632             num_insns++;
2633             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2634                 target_ulong a;
2635 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2636                 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2637 #else
2638                 a = args[i];
2639 #endif
2640                 s->gen_insn_data[num_insns][i] = a;
2641             }
2642             break;
2643         case INDEX_op_discard:
2644             temp_dead(s, &s->temps[args[0]]);
2645             break;
2646         case INDEX_op_set_label:
2647             tcg_reg_alloc_bb_end(s, s->reserved_regs);
2648             tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2649             break;
2650         case INDEX_op_call:
2651             tcg_reg_alloc_call(s, op->callo, op->calli, args, arg_life);
2652             break;
2653         default:
2654             /* Sanity check that we've not introduced any unhandled opcodes. */
2655             if (def->flags & TCG_OPF_NOT_PRESENT) {
2656                 tcg_abort();
2657             }
2658             /* Note: in order to speed up the code, it would be much
2659                faster to have specialized register allocator functions for
2660                some common argument patterns */
2661             tcg_reg_alloc_op(s, def, opc, args, arg_life);
2662             break;
2663         }
2664 #ifdef CONFIG_DEBUG_TCG
2665         check_regs(s);
2666 #endif
2667         /* Test for (pending) buffer overflow.  The assumption is that any
2668            one operation beginning below the high water mark cannot overrun
2669            the buffer completely.  Thus we can test for overflow after
2670            generating code without having to check during generation.  */
2671         if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
2672             return -1;
2673         }
2674     }
2675     tcg_debug_assert(num_insns >= 0);
2676     s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2677 
2678     /* Generate TB finalization at the end of block */
2679     if (!tcg_out_tb_finalize(s)) {
2680         return -1;
2681     }
2682 
2683     /* flush instruction cache */
2684     flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2685 
2686     return tcg_current_code_size(s);
2687 }
2688 
2689 #ifdef CONFIG_PROFILER
2690 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2691 {
2692     TCGContext *s = &tcg_ctx;
2693     int64_t tb_count = s->tb_count;
2694     int64_t tb_div_count = tb_count ? tb_count : 1;
2695     int64_t tot = s->interm_time + s->code_time;
2696 
2697     cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2698                 tot, tot / 2.4e9);
2699     cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2700                 tb_count, s->tb_count1 - tb_count,
2701                 (double)(s->tb_count1 - s->tb_count)
2702                 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2703     cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n",
2704                 (double)s->op_count / tb_div_count, s->op_count_max);
2705     cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
2706                 (double)s->del_op_count / tb_div_count);
2707     cpu_fprintf(f, "avg temps/TB        %0.2f max=%d\n",
2708                 (double)s->temp_count / tb_div_count, s->temp_count_max);
2709     cpu_fprintf(f, "avg host code/TB    %0.1f\n",
2710                 (double)s->code_out_len / tb_div_count);
2711     cpu_fprintf(f, "avg search data/TB  %0.1f\n",
2712                 (double)s->search_out_len / tb_div_count);
2713 
2714     cpu_fprintf(f, "cycles/op           %0.1f\n",
2715                 s->op_count ? (double)tot / s->op_count : 0);
2716     cpu_fprintf(f, "cycles/in byte      %0.1f\n",
2717                 s->code_in_len ? (double)tot / s->code_in_len : 0);
2718     cpu_fprintf(f, "cycles/out byte     %0.1f\n",
2719                 s->code_out_len ? (double)tot / s->code_out_len : 0);
2720     cpu_fprintf(f, "cycles/search byte     %0.1f\n",
2721                 s->search_out_len ? (double)tot / s->search_out_len : 0);
2722     if (tot == 0) {
2723         tot = 1;
2724     }
2725     cpu_fprintf(f, "  gen_interm time   %0.1f%%\n",
2726                 (double)s->interm_time / tot * 100.0);
2727     cpu_fprintf(f, "  gen_code time     %0.1f%%\n",
2728                 (double)s->code_time / tot * 100.0);
2729     cpu_fprintf(f, "optim./code time    %0.1f%%\n",
2730                 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2731                 * 100.0);
2732     cpu_fprintf(f, "liveness/code time  %0.1f%%\n",
2733                 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2734     cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
2735                 s->restore_count);
2736     cpu_fprintf(f, "  avg cycles        %0.1f\n",
2737                 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2738 }
2739 #else
2740 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2741 {
2742     cpu_fprintf(f, "[TCG profiler not compiled]\n");
2743 }
2744 #endif
2745 
2746 #ifdef ELF_HOST_MACHINE
2747 /* In order to use this feature, the backend needs to do three things:
2748 
2749    (1) Define ELF_HOST_MACHINE to indicate both what value to
2750        put into the ELF image and to indicate support for the feature.
2751 
2752    (2) Define tcg_register_jit.  This should create a buffer containing
2753        the contents of a .debug_frame section that describes the post-
2754        prologue unwind info for the tcg machine.
2755 
2756    (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2757 */
2758 
2759 /* Begin GDB interface.  THE FOLLOWING MUST MATCH GDB DOCS.  */
2760 typedef enum {
2761     JIT_NOACTION = 0,
2762     JIT_REGISTER_FN,
2763     JIT_UNREGISTER_FN
2764 } jit_actions_t;
2765 
2766 struct jit_code_entry {
2767     struct jit_code_entry *next_entry;
2768     struct jit_code_entry *prev_entry;
2769     const void *symfile_addr;
2770     uint64_t symfile_size;
2771 };
2772 
2773 struct jit_descriptor {
2774     uint32_t version;
2775     uint32_t action_flag;
2776     struct jit_code_entry *relevant_entry;
2777     struct jit_code_entry *first_entry;
2778 };
2779 
2780 void __jit_debug_register_code(void) __attribute__((noinline));
2781 void __jit_debug_register_code(void)
2782 {
2783     asm("");
2784 }
2785 
2786 /* Must statically initialize the version, because GDB may check
2787    the version before we can set it.  */
2788 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2789 
2790 /* End GDB interface.  */
2791 
2792 static int find_string(const char *strtab, const char *str)
2793 {
2794     const char *p = strtab + 1;
2795 
2796     while (1) {
2797         if (strcmp(p, str) == 0) {
2798             return p - strtab;
2799         }
2800         p += strlen(p) + 1;
2801     }
2802 }
2803 
2804 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2805                                  const void *debug_frame,
2806                                  size_t debug_frame_size)
2807 {
2808     struct __attribute__((packed)) DebugInfo {
2809         uint32_t  len;
2810         uint16_t  version;
2811         uint32_t  abbrev;
2812         uint8_t   ptr_size;
2813         uint8_t   cu_die;
2814         uint16_t  cu_lang;
2815         uintptr_t cu_low_pc;
2816         uintptr_t cu_high_pc;
2817         uint8_t   fn_die;
2818         char      fn_name[16];
2819         uintptr_t fn_low_pc;
2820         uintptr_t fn_high_pc;
2821         uint8_t   cu_eoc;
2822     };
2823 
2824     struct ElfImage {
2825         ElfW(Ehdr) ehdr;
2826         ElfW(Phdr) phdr;
2827         ElfW(Shdr) shdr[7];
2828         ElfW(Sym)  sym[2];
2829         struct DebugInfo di;
2830         uint8_t    da[24];
2831         char       str[80];
2832     };
2833 
2834     struct ElfImage *img;
2835 
2836     static const struct ElfImage img_template = {
2837         .ehdr = {
2838             .e_ident[EI_MAG0] = ELFMAG0,
2839             .e_ident[EI_MAG1] = ELFMAG1,
2840             .e_ident[EI_MAG2] = ELFMAG2,
2841             .e_ident[EI_MAG3] = ELFMAG3,
2842             .e_ident[EI_CLASS] = ELF_CLASS,
2843             .e_ident[EI_DATA] = ELF_DATA,
2844             .e_ident[EI_VERSION] = EV_CURRENT,
2845             .e_type = ET_EXEC,
2846             .e_machine = ELF_HOST_MACHINE,
2847             .e_version = EV_CURRENT,
2848             .e_phoff = offsetof(struct ElfImage, phdr),
2849             .e_shoff = offsetof(struct ElfImage, shdr),
2850             .e_ehsize = sizeof(ElfW(Shdr)),
2851             .e_phentsize = sizeof(ElfW(Phdr)),
2852             .e_phnum = 1,
2853             .e_shentsize = sizeof(ElfW(Shdr)),
2854             .e_shnum = ARRAY_SIZE(img->shdr),
2855             .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2856 #ifdef ELF_HOST_FLAGS
2857             .e_flags = ELF_HOST_FLAGS,
2858 #endif
2859 #ifdef ELF_OSABI
2860             .e_ident[EI_OSABI] = ELF_OSABI,
2861 #endif
2862         },
2863         .phdr = {
2864             .p_type = PT_LOAD,
2865             .p_flags = PF_X,
2866         },
2867         .shdr = {
2868             [0] = { .sh_type = SHT_NULL },
2869             /* Trick: The contents of code_gen_buffer are not present in
2870                this fake ELF file; that got allocated elsewhere.  Therefore
2871                we mark .text as SHT_NOBITS (similar to .bss) so that readers
2872                will not look for contents.  We can record any address.  */
2873             [1] = { /* .text */
2874                 .sh_type = SHT_NOBITS,
2875                 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2876             },
2877             [2] = { /* .debug_info */
2878                 .sh_type = SHT_PROGBITS,
2879                 .sh_offset = offsetof(struct ElfImage, di),
2880                 .sh_size = sizeof(struct DebugInfo),
2881             },
2882             [3] = { /* .debug_abbrev */
2883                 .sh_type = SHT_PROGBITS,
2884                 .sh_offset = offsetof(struct ElfImage, da),
2885                 .sh_size = sizeof(img->da),
2886             },
2887             [4] = { /* .debug_frame */
2888                 .sh_type = SHT_PROGBITS,
2889                 .sh_offset = sizeof(struct ElfImage),
2890             },
2891             [5] = { /* .symtab */
2892                 .sh_type = SHT_SYMTAB,
2893                 .sh_offset = offsetof(struct ElfImage, sym),
2894                 .sh_size = sizeof(img->sym),
2895                 .sh_info = 1,
2896                 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2897                 .sh_entsize = sizeof(ElfW(Sym)),
2898             },
2899             [6] = { /* .strtab */
2900                 .sh_type = SHT_STRTAB,
2901                 .sh_offset = offsetof(struct ElfImage, str),
2902                 .sh_size = sizeof(img->str),
2903             }
2904         },
2905         .sym = {
2906             [1] = { /* code_gen_buffer */
2907                 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2908                 .st_shndx = 1,
2909             }
2910         },
2911         .di = {
2912             .len = sizeof(struct DebugInfo) - 4,
2913             .version = 2,
2914             .ptr_size = sizeof(void *),
2915             .cu_die = 1,
2916             .cu_lang = 0x8001,  /* DW_LANG_Mips_Assembler */
2917             .fn_die = 2,
2918             .fn_name = "code_gen_buffer"
2919         },
2920         .da = {
2921             1,          /* abbrev number (the cu) */
2922             0x11, 1,    /* DW_TAG_compile_unit, has children */
2923             0x13, 0x5,  /* DW_AT_language, DW_FORM_data2 */
2924             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
2925             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
2926             0, 0,       /* end of abbrev */
2927             2,          /* abbrev number (the fn) */
2928             0x2e, 0,    /* DW_TAG_subprogram, no children */
2929             0x3, 0x8,   /* DW_AT_name, DW_FORM_string */
2930             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
2931             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
2932             0, 0,       /* end of abbrev */
2933             0           /* no more abbrev */
2934         },
2935         .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2936                ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2937     };
2938 
2939     /* We only need a single jit entry; statically allocate it.  */
2940     static struct jit_code_entry one_entry;
2941 
2942     uintptr_t buf = (uintptr_t)buf_ptr;
2943     size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2944     DebugFrameHeader *dfh;
2945 
2946     img = g_malloc(img_size);
2947     *img = img_template;
2948 
2949     img->phdr.p_vaddr = buf;
2950     img->phdr.p_paddr = buf;
2951     img->phdr.p_memsz = buf_size;
2952 
2953     img->shdr[1].sh_name = find_string(img->str, ".text");
2954     img->shdr[1].sh_addr = buf;
2955     img->shdr[1].sh_size = buf_size;
2956 
2957     img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2958     img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2959 
2960     img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2961     img->shdr[4].sh_size = debug_frame_size;
2962 
2963     img->shdr[5].sh_name = find_string(img->str, ".symtab");
2964     img->shdr[6].sh_name = find_string(img->str, ".strtab");
2965 
2966     img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2967     img->sym[1].st_value = buf;
2968     img->sym[1].st_size = buf_size;
2969 
2970     img->di.cu_low_pc = buf;
2971     img->di.cu_high_pc = buf + buf_size;
2972     img->di.fn_low_pc = buf;
2973     img->di.fn_high_pc = buf + buf_size;
2974 
2975     dfh = (DebugFrameHeader *)(img + 1);
2976     memcpy(dfh, debug_frame, debug_frame_size);
2977     dfh->fde.func_start = buf;
2978     dfh->fde.func_len = buf_size;
2979 
2980 #ifdef DEBUG_JIT
2981     /* Enable this block to be able to debug the ELF image file creation.
2982        One can use readelf, objdump, or other inspection utilities.  */
2983     {
2984         FILE *f = fopen("/tmp/qemu.jit", "w+b");
2985         if (f) {
2986             if (fwrite(img, img_size, 1, f) != img_size) {
2987                 /* Avoid stupid unused return value warning for fwrite.  */
2988             }
2989             fclose(f);
2990         }
2991     }
2992 #endif
2993 
2994     one_entry.symfile_addr = img;
2995     one_entry.symfile_size = img_size;
2996 
2997     __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2998     __jit_debug_descriptor.relevant_entry = &one_entry;
2999     __jit_debug_descriptor.first_entry = &one_entry;
3000     __jit_debug_register_code();
3001 }
3002 #else
3003 /* No support for the feature.  Provide the entry point expected by exec.c,
3004    and implement the internal function we declared earlier.  */
3005 
3006 static void tcg_register_jit_int(void *buf, size_t size,
3007                                  const void *debug_frame,
3008                                  size_t debug_frame_size)
3009 {
3010 }
3011 
3012 void tcg_register_jit(void *buf, size_t buf_size)
3013 {
3014 }
3015 #endif /* ELF_HOST_MACHINE */
3016