xref: /openbmc/qemu/include/tcg/tcg.h (revision c46184a9)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #ifndef TCG_H
26 #define TCG_H
27 
28 #include "cpu.h"
29 #include "exec/memop.h"
30 #include "qemu/bitops.h"
31 #include "qemu/plugin.h"
32 #include "qemu/queue.h"
33 #include "tcg/tcg-mo.h"
34 #include "tcg-target.h"
35 #include "qemu/int128.h"
36 
37 /* XXX: make safe guess about sizes */
38 #define MAX_OP_PER_INSTR 266
39 
40 #if HOST_LONG_BITS == 32
41 #define MAX_OPC_PARAM_PER_ARG 2
42 #else
43 #define MAX_OPC_PARAM_PER_ARG 1
44 #endif
45 #define MAX_OPC_PARAM_IARGS 6
46 #define MAX_OPC_PARAM_OARGS 1
47 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
48 
49 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
50  * and up to 4 + N parameters on 64-bit archs
51  * (N = number of input arguments + output arguments).  */
52 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
53 
54 #define CPU_TEMP_BUF_NLONGS 128
55 
56 /* Default target word size to pointer size.  */
57 #ifndef TCG_TARGET_REG_BITS
58 # if UINTPTR_MAX == UINT32_MAX
59 #  define TCG_TARGET_REG_BITS 32
60 # elif UINTPTR_MAX == UINT64_MAX
61 #  define TCG_TARGET_REG_BITS 64
62 # else
63 #  error Unknown pointer size for tcg target
64 # endif
65 #endif
66 
67 #if TCG_TARGET_REG_BITS == 32
68 typedef int32_t tcg_target_long;
69 typedef uint32_t tcg_target_ulong;
70 #define TCG_PRIlx PRIx32
71 #define TCG_PRIld PRId32
72 #elif TCG_TARGET_REG_BITS == 64
73 typedef int64_t tcg_target_long;
74 typedef uint64_t tcg_target_ulong;
75 #define TCG_PRIlx PRIx64
76 #define TCG_PRIld PRId64
77 #else
78 #error unsupported
79 #endif
80 
81 /* Oversized TCG guests make things like MTTCG hard
82  * as we can't use atomics for cputlb updates.
83  */
84 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
85 #define TCG_OVERSIZED_GUEST 1
86 #else
87 #define TCG_OVERSIZED_GUEST 0
88 #endif
89 
90 #if TCG_TARGET_NB_REGS <= 32
91 typedef uint32_t TCGRegSet;
92 #elif TCG_TARGET_NB_REGS <= 64
93 typedef uint64_t TCGRegSet;
94 #else
95 #error unsupported
96 #endif
97 
98 #if TCG_TARGET_REG_BITS == 32
99 /* Turn some undef macros into false macros.  */
100 #define TCG_TARGET_HAS_extrl_i64_i32    0
101 #define TCG_TARGET_HAS_extrh_i64_i32    0
102 #define TCG_TARGET_HAS_div_i64          0
103 #define TCG_TARGET_HAS_rem_i64          0
104 #define TCG_TARGET_HAS_div2_i64         0
105 #define TCG_TARGET_HAS_rot_i64          0
106 #define TCG_TARGET_HAS_ext8s_i64        0
107 #define TCG_TARGET_HAS_ext16s_i64       0
108 #define TCG_TARGET_HAS_ext32s_i64       0
109 #define TCG_TARGET_HAS_ext8u_i64        0
110 #define TCG_TARGET_HAS_ext16u_i64       0
111 #define TCG_TARGET_HAS_ext32u_i64       0
112 #define TCG_TARGET_HAS_bswap16_i64      0
113 #define TCG_TARGET_HAS_bswap32_i64      0
114 #define TCG_TARGET_HAS_bswap64_i64      0
115 #define TCG_TARGET_HAS_neg_i64          0
116 #define TCG_TARGET_HAS_not_i64          0
117 #define TCG_TARGET_HAS_andc_i64         0
118 #define TCG_TARGET_HAS_orc_i64          0
119 #define TCG_TARGET_HAS_eqv_i64          0
120 #define TCG_TARGET_HAS_nand_i64         0
121 #define TCG_TARGET_HAS_nor_i64          0
122 #define TCG_TARGET_HAS_clz_i64          0
123 #define TCG_TARGET_HAS_ctz_i64          0
124 #define TCG_TARGET_HAS_ctpop_i64        0
125 #define TCG_TARGET_HAS_deposit_i64      0
126 #define TCG_TARGET_HAS_extract_i64      0
127 #define TCG_TARGET_HAS_sextract_i64     0
128 #define TCG_TARGET_HAS_extract2_i64     0
129 #define TCG_TARGET_HAS_movcond_i64      0
130 #define TCG_TARGET_HAS_add2_i64         0
131 #define TCG_TARGET_HAS_sub2_i64         0
132 #define TCG_TARGET_HAS_mulu2_i64        0
133 #define TCG_TARGET_HAS_muls2_i64        0
134 #define TCG_TARGET_HAS_muluh_i64        0
135 #define TCG_TARGET_HAS_mulsh_i64        0
136 /* Turn some undef macros into true macros.  */
137 #define TCG_TARGET_HAS_add2_i32         1
138 #define TCG_TARGET_HAS_sub2_i32         1
139 #endif
140 
141 #ifndef TCG_TARGET_deposit_i32_valid
142 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
143 #endif
144 #ifndef TCG_TARGET_deposit_i64_valid
145 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
146 #endif
147 #ifndef TCG_TARGET_extract_i32_valid
148 #define TCG_TARGET_extract_i32_valid(ofs, len) 1
149 #endif
150 #ifndef TCG_TARGET_extract_i64_valid
151 #define TCG_TARGET_extract_i64_valid(ofs, len) 1
152 #endif
153 
154 /* Only one of DIV or DIV2 should be defined.  */
155 #if defined(TCG_TARGET_HAS_div_i32)
156 #define TCG_TARGET_HAS_div2_i32         0
157 #elif defined(TCG_TARGET_HAS_div2_i32)
158 #define TCG_TARGET_HAS_div_i32          0
159 #define TCG_TARGET_HAS_rem_i32          0
160 #endif
161 #if defined(TCG_TARGET_HAS_div_i64)
162 #define TCG_TARGET_HAS_div2_i64         0
163 #elif defined(TCG_TARGET_HAS_div2_i64)
164 #define TCG_TARGET_HAS_div_i64          0
165 #define TCG_TARGET_HAS_rem_i64          0
166 #endif
167 
168 /* For 32-bit targets, some sort of unsigned widening multiply is required.  */
169 #if TCG_TARGET_REG_BITS == 32 \
170     && !(defined(TCG_TARGET_HAS_mulu2_i32) \
171          || defined(TCG_TARGET_HAS_muluh_i32))
172 # error "Missing unsigned widening multiply"
173 #endif
174 
175 #if !defined(TCG_TARGET_HAS_v64) \
176     && !defined(TCG_TARGET_HAS_v128) \
177     && !defined(TCG_TARGET_HAS_v256)
178 #define TCG_TARGET_MAYBE_vec            0
179 #define TCG_TARGET_HAS_abs_vec          0
180 #define TCG_TARGET_HAS_neg_vec          0
181 #define TCG_TARGET_HAS_not_vec          0
182 #define TCG_TARGET_HAS_andc_vec         0
183 #define TCG_TARGET_HAS_orc_vec          0
184 #define TCG_TARGET_HAS_roti_vec         0
185 #define TCG_TARGET_HAS_rots_vec         0
186 #define TCG_TARGET_HAS_rotv_vec         0
187 #define TCG_TARGET_HAS_shi_vec          0
188 #define TCG_TARGET_HAS_shs_vec          0
189 #define TCG_TARGET_HAS_shv_vec          0
190 #define TCG_TARGET_HAS_mul_vec          0
191 #define TCG_TARGET_HAS_sat_vec          0
192 #define TCG_TARGET_HAS_minmax_vec       0
193 #define TCG_TARGET_HAS_bitsel_vec       0
194 #define TCG_TARGET_HAS_cmpsel_vec       0
195 #else
196 #define TCG_TARGET_MAYBE_vec            1
197 #endif
198 #ifndef TCG_TARGET_HAS_v64
199 #define TCG_TARGET_HAS_v64              0
200 #endif
201 #ifndef TCG_TARGET_HAS_v128
202 #define TCG_TARGET_HAS_v128             0
203 #endif
204 #ifndef TCG_TARGET_HAS_v256
205 #define TCG_TARGET_HAS_v256             0
206 #endif
207 
208 #ifndef TARGET_INSN_START_EXTRA_WORDS
209 # define TARGET_INSN_START_WORDS 1
210 #else
211 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
212 #endif
213 
214 typedef enum TCGOpcode {
215 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
216 #include "tcg/tcg-opc.h"
217 #undef DEF
218     NB_OPS,
219 } TCGOpcode;
220 
221 #define tcg_regset_set_reg(d, r)   ((d) |= (TCGRegSet)1 << (r))
222 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
223 #define tcg_regset_test_reg(d, r)  (((d) >> (r)) & 1)
224 
225 #ifndef TCG_TARGET_INSN_UNIT_SIZE
226 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
227 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
228 typedef uint8_t tcg_insn_unit;
229 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
230 typedef uint16_t tcg_insn_unit;
231 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
232 typedef uint32_t tcg_insn_unit;
233 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
234 typedef uint64_t tcg_insn_unit;
235 #else
236 /* The port better have done this.  */
237 #endif
238 
239 
240 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
241 # define tcg_debug_assert(X) do { assert(X); } while (0)
242 #else
243 # define tcg_debug_assert(X) \
244     do { if (!(X)) { __builtin_unreachable(); } } while (0)
245 #endif
246 
247 typedef struct TCGRelocation TCGRelocation;
248 struct TCGRelocation {
249     QSIMPLEQ_ENTRY(TCGRelocation) next;
250     tcg_insn_unit *ptr;
251     intptr_t addend;
252     int type;
253 };
254 
255 typedef struct TCGLabel TCGLabel;
256 struct TCGLabel {
257     unsigned present : 1;
258     unsigned has_value : 1;
259     unsigned id : 14;
260     unsigned refs : 16;
261     union {
262         uintptr_t value;
263         const tcg_insn_unit *value_ptr;
264     } u;
265     QSIMPLEQ_HEAD(, TCGRelocation) relocs;
266     QSIMPLEQ_ENTRY(TCGLabel) next;
267 };
268 
269 typedef struct TCGPool {
270     struct TCGPool *next;
271     int size;
272     uint8_t data[] __attribute__ ((aligned));
273 } TCGPool;
274 
275 #define TCG_POOL_CHUNK_SIZE 32768
276 
277 #define TCG_MAX_TEMPS 512
278 #define TCG_MAX_INSNS 512
279 
280 /* when the size of the arguments of a called function is smaller than
281    this value, they are statically allocated in the TB stack frame */
282 #define TCG_STATIC_CALL_ARGS_SIZE 128
283 
284 typedef enum TCGType {
285     TCG_TYPE_I32,
286     TCG_TYPE_I64,
287 
288     TCG_TYPE_V64,
289     TCG_TYPE_V128,
290     TCG_TYPE_V256,
291 
292     TCG_TYPE_COUNT, /* number of different types */
293 
294     /* An alias for the size of the host register.  */
295 #if TCG_TARGET_REG_BITS == 32
296     TCG_TYPE_REG = TCG_TYPE_I32,
297 #else
298     TCG_TYPE_REG = TCG_TYPE_I64,
299 #endif
300 
301     /* An alias for the size of the native pointer.  */
302 #if UINTPTR_MAX == UINT32_MAX
303     TCG_TYPE_PTR = TCG_TYPE_I32,
304 #else
305     TCG_TYPE_PTR = TCG_TYPE_I64,
306 #endif
307 
308     /* An alias for the size of the target "long", aka register.  */
309 #if TARGET_LONG_BITS == 64
310     TCG_TYPE_TL = TCG_TYPE_I64,
311 #else
312     TCG_TYPE_TL = TCG_TYPE_I32,
313 #endif
314 } TCGType;
315 
316 /**
317  * get_alignment_bits
318  * @memop: MemOp value
319  *
320  * Extract the alignment size from the memop.
321  */
322 static inline unsigned get_alignment_bits(MemOp memop)
323 {
324     unsigned a = memop & MO_AMASK;
325 
326     if (a == MO_UNALN) {
327         /* No alignment required.  */
328         a = 0;
329     } else if (a == MO_ALIGN) {
330         /* A natural alignment requirement.  */
331         a = memop & MO_SIZE;
332     } else {
333         /* A specific alignment requirement.  */
334         a = a >> MO_ASHIFT;
335     }
336 #if defined(CONFIG_SOFTMMU)
337     /* The requested alignment cannot overlap the TLB flags.  */
338     tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
339 #endif
340     return a;
341 }
342 
343 typedef tcg_target_ulong TCGArg;
344 
345 /* Define type and accessor macros for TCG variables.
346 
347    TCG variables are the inputs and outputs of TCG ops, as described
348    in tcg/README. Target CPU front-end code uses these types to deal
349    with TCG variables as it emits TCG code via the tcg_gen_* functions.
350    They come in several flavours:
351     * TCGv_i32 : 32 bit integer type
352     * TCGv_i64 : 64 bit integer type
353     * TCGv_ptr : a host pointer type
354     * TCGv_vec : a host vector type; the exact size is not exposed
355                  to the CPU front-end code.
356     * TCGv : an integer type the same size as target_ulong
357              (an alias for either TCGv_i32 or TCGv_i64)
358    The compiler's type checking will complain if you mix them
359    up and pass the wrong sized TCGv to a function.
360 
361    Users of tcg_gen_* don't need to know about any of the internal
362    details of these, and should treat them as opaque types.
363    You won't be able to look inside them in a debugger either.
364 
365    Internal implementation details follow:
366 
367    Note that there is no definition of the structs TCGv_i32_d etc anywhere.
368    This is deliberate, because the values we store in variables of type
369    TCGv_i32 are not really pointers-to-structures. They're just small
370    integers, but keeping them in pointer types like this means that the
371    compiler will complain if you accidentally pass a TCGv_i32 to a
372    function which takes a TCGv_i64, and so on. Only the internals of
373    TCG need to care about the actual contents of the types.  */
374 
375 typedef struct TCGv_i32_d *TCGv_i32;
376 typedef struct TCGv_i64_d *TCGv_i64;
377 typedef struct TCGv_ptr_d *TCGv_ptr;
378 typedef struct TCGv_vec_d *TCGv_vec;
379 typedef TCGv_ptr TCGv_env;
380 #if TARGET_LONG_BITS == 32
381 #define TCGv TCGv_i32
382 #elif TARGET_LONG_BITS == 64
383 #define TCGv TCGv_i64
384 #else
385 #error Unhandled TARGET_LONG_BITS value
386 #endif
387 
388 /* call flags */
389 /* Helper does not read globals (either directly or through an exception). It
390    implies TCG_CALL_NO_WRITE_GLOBALS. */
391 #define TCG_CALL_NO_READ_GLOBALS    0x0001
392 /* Helper does not write globals */
393 #define TCG_CALL_NO_WRITE_GLOBALS   0x0002
394 /* Helper can be safely suppressed if the return value is not used. */
395 #define TCG_CALL_NO_SIDE_EFFECTS    0x0004
396 /* Helper is QEMU_NORETURN.  */
397 #define TCG_CALL_NO_RETURN          0x0008
398 
399 /* convenience version of most used call flags */
400 #define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
401 #define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
402 #define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
403 #define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
404 #define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
405 
406 /* Used to align parameters.  See the comment before tcgv_i32_temp.  */
407 #define TCG_CALL_DUMMY_ARG      ((TCGArg)0)
408 
409 /* Conditions.  Note that these are laid out for easy manipulation by
410    the functions below:
411      bit 0 is used for inverting;
412      bit 1 is signed,
413      bit 2 is unsigned,
414      bit 3 is used with bit 0 for swapping signed/unsigned.  */
415 typedef enum {
416     /* non-signed */
417     TCG_COND_NEVER  = 0 | 0 | 0 | 0,
418     TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
419     TCG_COND_EQ     = 8 | 0 | 0 | 0,
420     TCG_COND_NE     = 8 | 0 | 0 | 1,
421     /* signed */
422     TCG_COND_LT     = 0 | 0 | 2 | 0,
423     TCG_COND_GE     = 0 | 0 | 2 | 1,
424     TCG_COND_LE     = 8 | 0 | 2 | 0,
425     TCG_COND_GT     = 8 | 0 | 2 | 1,
426     /* unsigned */
427     TCG_COND_LTU    = 0 | 4 | 0 | 0,
428     TCG_COND_GEU    = 0 | 4 | 0 | 1,
429     TCG_COND_LEU    = 8 | 4 | 0 | 0,
430     TCG_COND_GTU    = 8 | 4 | 0 | 1,
431 } TCGCond;
432 
433 /* Invert the sense of the comparison.  */
434 static inline TCGCond tcg_invert_cond(TCGCond c)
435 {
436     return (TCGCond)(c ^ 1);
437 }
438 
439 /* Swap the operands in a comparison.  */
440 static inline TCGCond tcg_swap_cond(TCGCond c)
441 {
442     return c & 6 ? (TCGCond)(c ^ 9) : c;
443 }
444 
445 /* Create an "unsigned" version of a "signed" comparison.  */
446 static inline TCGCond tcg_unsigned_cond(TCGCond c)
447 {
448     return c & 2 ? (TCGCond)(c ^ 6) : c;
449 }
450 
451 /* Create a "signed" version of an "unsigned" comparison.  */
452 static inline TCGCond tcg_signed_cond(TCGCond c)
453 {
454     return c & 4 ? (TCGCond)(c ^ 6) : c;
455 }
456 
457 /* Must a comparison be considered unsigned?  */
458 static inline bool is_unsigned_cond(TCGCond c)
459 {
460     return (c & 4) != 0;
461 }
462 
463 /* Create a "high" version of a double-word comparison.
464    This removes equality from a LTE or GTE comparison.  */
465 static inline TCGCond tcg_high_cond(TCGCond c)
466 {
467     switch (c) {
468     case TCG_COND_GE:
469     case TCG_COND_LE:
470     case TCG_COND_GEU:
471     case TCG_COND_LEU:
472         return (TCGCond)(c ^ 8);
473     default:
474         return c;
475     }
476 }
477 
478 typedef enum TCGTempVal {
479     TEMP_VAL_DEAD,
480     TEMP_VAL_REG,
481     TEMP_VAL_MEM,
482     TEMP_VAL_CONST,
483 } TCGTempVal;
484 
485 typedef enum TCGTempKind {
486     /* Temp is dead at the end of all basic blocks. */
487     TEMP_NORMAL,
488     /* Temp is saved across basic blocks but dead at the end of TBs. */
489     TEMP_LOCAL,
490     /* Temp is saved across both basic blocks and translation blocks. */
491     TEMP_GLOBAL,
492     /* Temp is in a fixed register. */
493     TEMP_FIXED,
494     /* Temp is a fixed constant. */
495     TEMP_CONST,
496 } TCGTempKind;
497 
498 typedef struct TCGTemp {
499     TCGReg reg:8;
500     TCGTempVal val_type:8;
501     TCGType base_type:8;
502     TCGType type:8;
503     TCGTempKind kind:3;
504     unsigned int indirect_reg:1;
505     unsigned int indirect_base:1;
506     unsigned int mem_coherent:1;
507     unsigned int mem_allocated:1;
508     unsigned int temp_allocated:1;
509 
510     int64_t val;
511     struct TCGTemp *mem_base;
512     intptr_t mem_offset;
513     const char *name;
514 
515     /* Pass-specific information that can be stored for a temporary.
516        One word worth of integer data, and one pointer to data
517        allocated separately.  */
518     uintptr_t state;
519     void *state_ptr;
520 } TCGTemp;
521 
522 typedef struct TCGContext TCGContext;
523 
524 typedef struct TCGTempSet {
525     unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
526 } TCGTempSet;
527 
528 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
529    this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
530    There are never more than 2 outputs, which means that we can store all
531    dead + sync data within 16 bits.  */
532 #define DEAD_ARG  4
533 #define SYNC_ARG  1
534 typedef uint16_t TCGLifeData;
535 
536 /* The layout here is designed to avoid a bitfield crossing of
537    a 32-bit boundary, which would cause GCC to add extra padding.  */
538 typedef struct TCGOp {
539     TCGOpcode opc   : 8;        /*  8 */
540 
541     /* Parameters for this opcode.  See below.  */
542     unsigned param1 : 4;        /* 12 */
543     unsigned param2 : 4;        /* 16 */
544 
545     /* Lifetime data of the operands.  */
546     unsigned life   : 16;       /* 32 */
547 
548     /* Next and previous opcodes.  */
549     QTAILQ_ENTRY(TCGOp) link;
550 #ifdef CONFIG_PLUGIN
551     QSIMPLEQ_ENTRY(TCGOp) plugin_link;
552 #endif
553 
554     /* Arguments for the opcode.  */
555     TCGArg args[MAX_OPC_PARAM];
556 
557     /* Register preferences for the output(s).  */
558     TCGRegSet output_pref[2];
559 } TCGOp;
560 
561 #define TCGOP_CALLI(X)    (X)->param1
562 #define TCGOP_CALLO(X)    (X)->param2
563 
564 #define TCGOP_VECL(X)     (X)->param1
565 #define TCGOP_VECE(X)     (X)->param2
566 
567 /* Make sure operands fit in the bitfields above.  */
568 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
569 
570 typedef struct TCGProfile {
571     int64_t cpu_exec_time;
572     int64_t tb_count1;
573     int64_t tb_count;
574     int64_t op_count; /* total insn count */
575     int op_count_max; /* max insn per TB */
576     int temp_count_max;
577     int64_t temp_count;
578     int64_t del_op_count;
579     int64_t code_in_len;
580     int64_t code_out_len;
581     int64_t search_out_len;
582     int64_t interm_time;
583     int64_t code_time;
584     int64_t la_time;
585     int64_t opt_time;
586     int64_t restore_count;
587     int64_t restore_time;
588     int64_t table_op_count[NB_OPS];
589 } TCGProfile;
590 
591 struct TCGContext {
592     uint8_t *pool_cur, *pool_end;
593     TCGPool *pool_first, *pool_current, *pool_first_large;
594     int nb_labels;
595     int nb_globals;
596     int nb_temps;
597     int nb_indirects;
598     int nb_ops;
599 
600     /* goto_tb support */
601     tcg_insn_unit *code_buf;
602     uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
603     uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
604     uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
605 
606     TCGRegSet reserved_regs;
607     uint32_t tb_cflags; /* cflags of the current TB */
608     intptr_t current_frame_offset;
609     intptr_t frame_start;
610     intptr_t frame_end;
611     TCGTemp *frame_temp;
612 
613     tcg_insn_unit *code_ptr;
614 
615 #ifdef CONFIG_PROFILER
616     TCGProfile prof;
617 #endif
618 
619 #ifdef CONFIG_DEBUG_TCG
620     int temps_in_use;
621     int goto_tb_issue_mask;
622     const TCGOpcode *vecop_list;
623 #endif
624 
625     /* Code generation.  Note that we specifically do not use tcg_insn_unit
626        here, because there's too much arithmetic throughout that relies
627        on addition and subtraction working on bytes.  Rely on the GCC
628        extension that allows arithmetic on void*.  */
629     void *code_gen_buffer;
630     size_t code_gen_buffer_size;
631     void *code_gen_ptr;
632     void *data_gen_ptr;
633 
634     /* Threshold to flush the translated code buffer.  */
635     void *code_gen_highwater;
636 
637     size_t tb_phys_invalidate_count;
638 
639     /* Track which vCPU triggers events */
640     CPUState *cpu;                      /* *_trans */
641 
642     /* These structures are private to tcg-target.c.inc.  */
643 #ifdef TCG_TARGET_NEED_LDST_LABELS
644     QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
645 #endif
646 #ifdef TCG_TARGET_NEED_POOL_LABELS
647     struct TCGLabelPoolData *pool_labels;
648 #endif
649 
650     TCGLabel *exitreq_label;
651 
652 #ifdef CONFIG_PLUGIN
653     /*
654      * We keep one plugin_tb struct per TCGContext. Note that on every TB
655      * translation we clear but do not free its contents; this way we
656      * avoid a lot of malloc/free churn, since after a few TB's it's
657      * unlikely that we'll need to allocate either more instructions or more
658      * space for instructions (for variable-instruction-length ISAs).
659      */
660     struct qemu_plugin_tb *plugin_tb;
661 
662     /* descriptor of the instruction being translated */
663     struct qemu_plugin_insn *plugin_insn;
664 
665     /* list to quickly access the injected ops */
666     QSIMPLEQ_HEAD(, TCGOp) plugin_ops;
667 #endif
668 
669     GHashTable *const_table[TCG_TYPE_COUNT];
670     TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
671     TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
672 
673     QTAILQ_HEAD(, TCGOp) ops, free_ops;
674     QSIMPLEQ_HEAD(, TCGLabel) labels;
675 
676     /* Tells which temporary holds a given register.
677        It does not take into account fixed registers */
678     TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
679 
680     uint16_t gen_insn_end_off[TCG_MAX_INSNS];
681     target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
682 
683     /* Exit to translator on overflow. */
684     sigjmp_buf jmp_trans;
685 };
686 
687 static inline bool temp_readonly(TCGTemp *ts)
688 {
689     return ts->kind >= TEMP_FIXED;
690 }
691 
692 extern TCGContext tcg_init_ctx;
693 extern __thread TCGContext *tcg_ctx;
694 extern const void *tcg_code_gen_epilogue;
695 extern uintptr_t tcg_splitwx_diff;
696 extern TCGv_env cpu_env;
697 
698 static inline bool in_code_gen_buffer(const void *p)
699 {
700     const TCGContext *s = &tcg_init_ctx;
701     /*
702      * Much like it is valid to have a pointer to the byte past the
703      * end of an array (so long as you don't dereference it), allow
704      * a pointer to the byte past the end of the code gen buffer.
705      */
706     return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
707 }
708 
709 #ifdef CONFIG_DEBUG_TCG
710 const void *tcg_splitwx_to_rx(void *rw);
711 void *tcg_splitwx_to_rw(const void *rx);
712 #else
713 static inline const void *tcg_splitwx_to_rx(void *rw)
714 {
715     return rw ? rw + tcg_splitwx_diff : NULL;
716 }
717 
718 static inline void *tcg_splitwx_to_rw(const void *rx)
719 {
720     return rx ? (void *)rx - tcg_splitwx_diff : NULL;
721 }
722 #endif
723 
724 static inline size_t temp_idx(TCGTemp *ts)
725 {
726     ptrdiff_t n = ts - tcg_ctx->temps;
727     tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
728     return n;
729 }
730 
731 static inline TCGArg temp_arg(TCGTemp *ts)
732 {
733     return (uintptr_t)ts;
734 }
735 
736 static inline TCGTemp *arg_temp(TCGArg a)
737 {
738     return (TCGTemp *)(uintptr_t)a;
739 }
740 
741 /* Using the offset of a temporary, relative to TCGContext, rather than
742    its index means that we don't use 0.  That leaves offset 0 free for
743    a NULL representation without having to leave index 0 unused.  */
744 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
745 {
746     uintptr_t o = (uintptr_t)v;
747     TCGTemp *t = (void *)tcg_ctx + o;
748     tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
749     return t;
750 }
751 
752 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
753 {
754     return tcgv_i32_temp((TCGv_i32)v);
755 }
756 
757 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
758 {
759     return tcgv_i32_temp((TCGv_i32)v);
760 }
761 
762 static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
763 {
764     return tcgv_i32_temp((TCGv_i32)v);
765 }
766 
767 static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
768 {
769     return temp_arg(tcgv_i32_temp(v));
770 }
771 
772 static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
773 {
774     return temp_arg(tcgv_i64_temp(v));
775 }
776 
777 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
778 {
779     return temp_arg(tcgv_ptr_temp(v));
780 }
781 
782 static inline TCGArg tcgv_vec_arg(TCGv_vec v)
783 {
784     return temp_arg(tcgv_vec_temp(v));
785 }
786 
787 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
788 {
789     (void)temp_idx(t); /* trigger embedded assert */
790     return (TCGv_i32)((void *)t - (void *)tcg_ctx);
791 }
792 
793 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
794 {
795     return (TCGv_i64)temp_tcgv_i32(t);
796 }
797 
798 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
799 {
800     return (TCGv_ptr)temp_tcgv_i32(t);
801 }
802 
803 static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
804 {
805     return (TCGv_vec)temp_tcgv_i32(t);
806 }
807 
808 #if TCG_TARGET_REG_BITS == 32
809 static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
810 {
811     return temp_tcgv_i32(tcgv_i64_temp(t));
812 }
813 
814 static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
815 {
816     return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
817 }
818 #endif
819 
820 static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
821 {
822     return op->args[arg];
823 }
824 
825 static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
826 {
827     op->args[arg] = v;
828 }
829 
830 static inline target_ulong tcg_get_insn_start_param(TCGOp *op, int arg)
831 {
832 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
833     return tcg_get_insn_param(op, arg);
834 #else
835     return tcg_get_insn_param(op, arg * 2) |
836            ((uint64_t)tcg_get_insn_param(op, arg * 2 + 1) << 32);
837 #endif
838 }
839 
840 static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
841 {
842 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
843     tcg_set_insn_param(op, arg, v);
844 #else
845     tcg_set_insn_param(op, arg * 2, v);
846     tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
847 #endif
848 }
849 
850 /* The last op that was emitted.  */
851 static inline TCGOp *tcg_last_op(void)
852 {
853     return QTAILQ_LAST(&tcg_ctx->ops);
854 }
855 
856 /* Test for whether to terminate the TB for using too many opcodes.  */
857 static inline bool tcg_op_buf_full(void)
858 {
859     /* This is not a hard limit, it merely stops translation when
860      * we have produced "enough" opcodes.  We want to limit TB size
861      * such that a RISC host can reasonably use a 16-bit signed
862      * branch within the TB.  We also need to be mindful of the
863      * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
864      * and TCGContext.gen_insn_end_off[].
865      */
866     return tcg_ctx->nb_ops >= 4000;
867 }
868 
869 /* pool based memory allocation */
870 
871 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */
872 void *tcg_malloc_internal(TCGContext *s, int size);
873 void tcg_pool_reset(TCGContext *s);
874 TranslationBlock *tcg_tb_alloc(TCGContext *s);
875 
876 void tcg_region_init(size_t tb_size, int splitwx);
877 void tb_destroy(TranslationBlock *tb);
878 void tcg_region_reset_all(void);
879 
880 size_t tcg_code_size(void);
881 size_t tcg_code_capacity(void);
882 
883 void tcg_tb_insert(TranslationBlock *tb);
884 void tcg_tb_remove(TranslationBlock *tb);
885 size_t tcg_tb_phys_invalidate_count(void);
886 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
887 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
888 size_t tcg_nb_tbs(void);
889 
890 /* user-mode: Called with mmap_lock held.  */
891 static inline void *tcg_malloc(int size)
892 {
893     TCGContext *s = tcg_ctx;
894     uint8_t *ptr, *ptr_end;
895 
896     /* ??? This is a weak placeholder for minimum malloc alignment.  */
897     size = QEMU_ALIGN_UP(size, 8);
898 
899     ptr = s->pool_cur;
900     ptr_end = ptr + size;
901     if (unlikely(ptr_end > s->pool_end)) {
902         return tcg_malloc_internal(tcg_ctx, size);
903     } else {
904         s->pool_cur = ptr_end;
905         return ptr;
906     }
907 }
908 
909 void tcg_context_init(TCGContext *s);
910 void tcg_register_thread(void);
911 void tcg_prologue_init(TCGContext *s);
912 void tcg_func_start(TCGContext *s);
913 
914 int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
915 
916 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
917 
918 TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
919                                      intptr_t, const char *);
920 TCGTemp *tcg_temp_new_internal(TCGType, bool);
921 void tcg_temp_free_internal(TCGTemp *);
922 TCGv_vec tcg_temp_new_vec(TCGType type);
923 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
924 
925 static inline void tcg_temp_free_i32(TCGv_i32 arg)
926 {
927     tcg_temp_free_internal(tcgv_i32_temp(arg));
928 }
929 
930 static inline void tcg_temp_free_i64(TCGv_i64 arg)
931 {
932     tcg_temp_free_internal(tcgv_i64_temp(arg));
933 }
934 
935 static inline void tcg_temp_free_ptr(TCGv_ptr arg)
936 {
937     tcg_temp_free_internal(tcgv_ptr_temp(arg));
938 }
939 
940 static inline void tcg_temp_free_vec(TCGv_vec arg)
941 {
942     tcg_temp_free_internal(tcgv_vec_temp(arg));
943 }
944 
945 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
946                                               const char *name)
947 {
948     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
949     return temp_tcgv_i32(t);
950 }
951 
952 static inline TCGv_i32 tcg_temp_new_i32(void)
953 {
954     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
955     return temp_tcgv_i32(t);
956 }
957 
958 static inline TCGv_i32 tcg_temp_local_new_i32(void)
959 {
960     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
961     return temp_tcgv_i32(t);
962 }
963 
964 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
965                                               const char *name)
966 {
967     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
968     return temp_tcgv_i64(t);
969 }
970 
971 static inline TCGv_i64 tcg_temp_new_i64(void)
972 {
973     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
974     return temp_tcgv_i64(t);
975 }
976 
977 static inline TCGv_i64 tcg_temp_local_new_i64(void)
978 {
979     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
980     return temp_tcgv_i64(t);
981 }
982 
983 static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
984                                               const char *name)
985 {
986     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
987     return temp_tcgv_ptr(t);
988 }
989 
990 static inline TCGv_ptr tcg_temp_new_ptr(void)
991 {
992     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
993     return temp_tcgv_ptr(t);
994 }
995 
996 static inline TCGv_ptr tcg_temp_local_new_ptr(void)
997 {
998     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
999     return temp_tcgv_ptr(t);
1000 }
1001 
1002 #if defined(CONFIG_DEBUG_TCG)
1003 /* If you call tcg_clear_temp_count() at the start of a section of
1004  * code which is not supposed to leak any TCG temporaries, then
1005  * calling tcg_check_temp_count() at the end of the section will
1006  * return 1 if the section did in fact leak a temporary.
1007  */
1008 void tcg_clear_temp_count(void);
1009 int tcg_check_temp_count(void);
1010 #else
1011 #define tcg_clear_temp_count() do { } while (0)
1012 #define tcg_check_temp_count() 0
1013 #endif
1014 
1015 int64_t tcg_cpu_exec_time(void);
1016 void tcg_dump_info(void);
1017 void tcg_dump_op_count(void);
1018 
1019 #define TCG_CT_CONST  1 /* any constant of register size */
1020 
1021 typedef struct TCGArgConstraint {
1022     unsigned ct : 16;
1023     unsigned alias_index : 4;
1024     unsigned sort_index : 4;
1025     bool oalias : 1;
1026     bool ialias : 1;
1027     bool newreg : 1;
1028     TCGRegSet regs;
1029 } TCGArgConstraint;
1030 
1031 #define TCG_MAX_OP_ARGS 16
1032 
1033 /* Bits for TCGOpDef->flags, 8 bits available, all used.  */
1034 enum {
1035     /* Instruction exits the translation block.  */
1036     TCG_OPF_BB_EXIT      = 0x01,
1037     /* Instruction defines the end of a basic block.  */
1038     TCG_OPF_BB_END       = 0x02,
1039     /* Instruction clobbers call registers and potentially update globals.  */
1040     TCG_OPF_CALL_CLOBBER = 0x04,
1041     /* Instruction has side effects: it cannot be removed if its outputs
1042        are not used, and might trigger exceptions.  */
1043     TCG_OPF_SIDE_EFFECTS = 0x08,
1044     /* Instruction operands are 64-bits (otherwise 32-bits).  */
1045     TCG_OPF_64BIT        = 0x10,
1046     /* Instruction is optional and not implemented by the host, or insn
1047        is generic and should not be implemened by the host.  */
1048     TCG_OPF_NOT_PRESENT  = 0x20,
1049     /* Instruction operands are vectors.  */
1050     TCG_OPF_VECTOR       = 0x40,
1051     /* Instruction is a conditional branch. */
1052     TCG_OPF_COND_BRANCH  = 0x80
1053 };
1054 
1055 typedef struct TCGOpDef {
1056     const char *name;
1057     uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
1058     uint8_t flags;
1059     TCGArgConstraint *args_ct;
1060 } TCGOpDef;
1061 
1062 extern TCGOpDef tcg_op_defs[];
1063 extern const size_t tcg_op_defs_max;
1064 
1065 typedef struct TCGTargetOpDef {
1066     TCGOpcode op;
1067     const char *args_ct_str[TCG_MAX_OP_ARGS];
1068 } TCGTargetOpDef;
1069 
1070 #define tcg_abort() \
1071 do {\
1072     fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
1073     abort();\
1074 } while (0)
1075 
1076 bool tcg_op_supported(TCGOpcode op);
1077 
1078 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
1079 
1080 TCGOp *tcg_emit_op(TCGOpcode opc);
1081 void tcg_op_remove(TCGContext *s, TCGOp *op);
1082 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
1083 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
1084 
1085 void tcg_optimize(TCGContext *s);
1086 
1087 /* Allocate a new temporary and initialize it with a constant. */
1088 TCGv_i32 tcg_const_i32(int32_t val);
1089 TCGv_i64 tcg_const_i64(int64_t val);
1090 TCGv_i32 tcg_const_local_i32(int32_t val);
1091 TCGv_i64 tcg_const_local_i64(int64_t val);
1092 TCGv_vec tcg_const_zeros_vec(TCGType);
1093 TCGv_vec tcg_const_ones_vec(TCGType);
1094 TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
1095 TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
1096 
1097 /*
1098  * Locate or create a read-only temporary that is a constant.
1099  * This kind of temporary need not and should not be freed.
1100  */
1101 TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
1102 
1103 static inline TCGv_i32 tcg_constant_i32(int32_t val)
1104 {
1105     return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
1106 }
1107 
1108 static inline TCGv_i64 tcg_constant_i64(int64_t val)
1109 {
1110     return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
1111 }
1112 
1113 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
1114 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
1115 
1116 #if UINTPTR_MAX == UINT32_MAX
1117 # define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
1118 # define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
1119 #else
1120 # define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
1121 # define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
1122 #endif
1123 
1124 TCGLabel *gen_new_label(void);
1125 
1126 /**
1127  * label_arg
1128  * @l: label
1129  *
1130  * Encode a label for storage in the TCG opcode stream.
1131  */
1132 
1133 static inline TCGArg label_arg(TCGLabel *l)
1134 {
1135     return (uintptr_t)l;
1136 }
1137 
1138 /**
1139  * arg_label
1140  * @i: value
1141  *
1142  * The opposite of label_arg.  Retrieve a label from the
1143  * encoding of the TCG opcode stream.
1144  */
1145 
1146 static inline TCGLabel *arg_label(TCGArg i)
1147 {
1148     return (TCGLabel *)(uintptr_t)i;
1149 }
1150 
1151 /**
1152  * tcg_ptr_byte_diff
1153  * @a, @b: addresses to be differenced
1154  *
1155  * There are many places within the TCG backends where we need a byte
1156  * difference between two pointers.  While this can be accomplished
1157  * with local casting, it's easy to get wrong -- especially if one is
1158  * concerned with the signedness of the result.
1159  *
1160  * This version relies on GCC's void pointer arithmetic to get the
1161  * correct result.
1162  */
1163 
1164 static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b)
1165 {
1166     return a - b;
1167 }
1168 
1169 /**
1170  * tcg_pcrel_diff
1171  * @s: the tcg context
1172  * @target: address of the target
1173  *
1174  * Produce a pc-relative difference, from the current code_ptr
1175  * to the destination address.
1176  */
1177 
1178 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
1179 {
1180     return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
1181 }
1182 
1183 /**
1184  * tcg_tbrel_diff
1185  * @s: the tcg context
1186  * @target: address of the target
1187  *
1188  * Produce a difference, from the beginning of the current TB code
1189  * to the destination address.
1190  */
1191 static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target)
1192 {
1193     return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf));
1194 }
1195 
1196 /**
1197  * tcg_current_code_size
1198  * @s: the tcg context
1199  *
1200  * Compute the current code size within the translation block.
1201  * This is used to fill in qemu's data structures for goto_tb.
1202  */
1203 
1204 static inline size_t tcg_current_code_size(TCGContext *s)
1205 {
1206     return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1207 }
1208 
1209 /* Combine the MemOp and mmu_idx parameters into a single value.  */
1210 typedef uint32_t TCGMemOpIdx;
1211 
1212 /**
1213  * make_memop_idx
1214  * @op: memory operation
1215  * @idx: mmu index
1216  *
1217  * Encode these values into a single parameter.
1218  */
1219 static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx)
1220 {
1221     tcg_debug_assert(idx <= 15);
1222     return (op << 4) | idx;
1223 }
1224 
1225 /**
1226  * get_memop
1227  * @oi: combined op/idx parameter
1228  *
1229  * Extract the memory operation from the combined value.
1230  */
1231 static inline MemOp get_memop(TCGMemOpIdx oi)
1232 {
1233     return oi >> 4;
1234 }
1235 
1236 /**
1237  * get_mmuidx
1238  * @oi: combined op/idx parameter
1239  *
1240  * Extract the mmu index from the combined value.
1241  */
1242 static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1243 {
1244     return oi & 15;
1245 }
1246 
1247 /**
1248  * tcg_qemu_tb_exec:
1249  * @env: pointer to CPUArchState for the CPU
1250  * @tb_ptr: address of generated code for the TB to execute
1251  *
1252  * Start executing code from a given translation block.
1253  * Where translation blocks have been linked, execution
1254  * may proceed from the given TB into successive ones.
1255  * Control eventually returns only when some action is needed
1256  * from the top-level loop: either control must pass to a TB
1257  * which has not yet been directly linked, or an asynchronous
1258  * event such as an interrupt needs handling.
1259  *
1260  * Return: The return value is the value passed to the corresponding
1261  * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1262  * The value is either zero or a 4-byte aligned pointer to that TB combined
1263  * with additional information in its two least significant bits. The
1264  * additional information is encoded as follows:
1265  *  0, 1: the link between this TB and the next is via the specified
1266  *        TB index (0 or 1). That is, we left the TB via (the equivalent
1267  *        of) "goto_tb <index>". The main loop uses this to determine
1268  *        how to link the TB just executed to the next.
1269  *  2:    we are using instruction counting code generation, and we
1270  *        did not start executing this TB because the instruction counter
1271  *        would hit zero midway through it. In this case the pointer
1272  *        returned is the TB we were about to execute, and the caller must
1273  *        arrange to execute the remaining count of instructions.
1274  *  3:    we stopped because the CPU's exit_request flag was set
1275  *        (usually meaning that there is an interrupt that needs to be
1276  *        handled). The pointer returned is the TB we were about to execute
1277  *        when we noticed the pending exit request.
1278  *
1279  * If the bottom two bits indicate an exit-via-index then the CPU
1280  * state is correctly synchronised and ready for execution of the next
1281  * TB (and in particular the guest PC is the address to execute next).
1282  * Otherwise, we gave up on execution of this TB before it started, and
1283  * the caller must fix up the CPU state by calling the CPU's
1284  * synchronize_from_tb() method with the TB pointer we return (falling
1285  * back to calling the CPU's set_pc method with tb->pb if no
1286  * synchronize_from_tb() method exists).
1287  *
1288  * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1289  * to this default (which just calls the prologue.code emitted by
1290  * tcg_target_qemu_prologue()).
1291  */
1292 #define TB_EXIT_MASK      3
1293 #define TB_EXIT_IDX0      0
1294 #define TB_EXIT_IDX1      1
1295 #define TB_EXIT_IDXMAX    1
1296 #define TB_EXIT_REQUESTED 3
1297 
1298 #ifdef CONFIG_TCG_INTERPRETER
1299 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr);
1300 #else
1301 typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
1302 extern tcg_prologue_fn *tcg_qemu_tb_exec;
1303 #endif
1304 
1305 void tcg_register_jit(const void *buf, size_t buf_size);
1306 
1307 #if TCG_TARGET_MAYBE_vec
1308 /* Return zero if the tuple (opc, type, vece) is unsupportable;
1309    return > 0 if it is directly supportable;
1310    return < 0 if we must call tcg_expand_vec_op.  */
1311 int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
1312 #else
1313 static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
1314 {
1315     return 0;
1316 }
1317 #endif
1318 
1319 /* Expand the tuple (opc, type, vece) on the given arguments.  */
1320 void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
1321 
1322 /* Replicate a constant C accoring to the log2 of the element size.  */
1323 uint64_t dup_const(unsigned vece, uint64_t c);
1324 
1325 #define dup_const(VECE, C)                                         \
1326     (__builtin_constant_p(VECE)                                    \
1327      ? (  (VECE) == MO_8  ? 0x0101010101010101ull * (uint8_t)(C)   \
1328         : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C)  \
1329         : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C)  \
1330         : (VECE) == MO_64 ? (uint64_t)(C)                          \
1331         : (qemu_build_not_reached_always(), 0))                    \
1332      : dup_const(VECE, C))
1333 
1334 
1335 /*
1336  * Memory helpers that will be used by TCG generated code.
1337  */
1338 #ifdef CONFIG_SOFTMMU
1339 /* Value zero-extended to tcg register size.  */
1340 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1341                                      TCGMemOpIdx oi, uintptr_t retaddr);
1342 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1343                                     TCGMemOpIdx oi, uintptr_t retaddr);
1344 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1345                                     TCGMemOpIdx oi, uintptr_t retaddr);
1346 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1347                            TCGMemOpIdx oi, uintptr_t retaddr);
1348 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1349                                     TCGMemOpIdx oi, uintptr_t retaddr);
1350 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1351                                     TCGMemOpIdx oi, uintptr_t retaddr);
1352 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1353                            TCGMemOpIdx oi, uintptr_t retaddr);
1354 
1355 /* Value sign-extended to tcg register size.  */
1356 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1357                                      TCGMemOpIdx oi, uintptr_t retaddr);
1358 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1359                                     TCGMemOpIdx oi, uintptr_t retaddr);
1360 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1361                                     TCGMemOpIdx oi, uintptr_t retaddr);
1362 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1363                                     TCGMemOpIdx oi, uintptr_t retaddr);
1364 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1365                                     TCGMemOpIdx oi, uintptr_t retaddr);
1366 
1367 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1368                         TCGMemOpIdx oi, uintptr_t retaddr);
1369 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1370                        TCGMemOpIdx oi, uintptr_t retaddr);
1371 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1372                        TCGMemOpIdx oi, uintptr_t retaddr);
1373 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1374                        TCGMemOpIdx oi, uintptr_t retaddr);
1375 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1376                        TCGMemOpIdx oi, uintptr_t retaddr);
1377 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1378                        TCGMemOpIdx oi, uintptr_t retaddr);
1379 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1380                        TCGMemOpIdx oi, uintptr_t retaddr);
1381 
1382 /* Temporary aliases until backends are converted.  */
1383 #ifdef TARGET_WORDS_BIGENDIAN
1384 # define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
1385 # define helper_ret_lduw_mmu  helper_be_lduw_mmu
1386 # define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
1387 # define helper_ret_ldul_mmu  helper_be_ldul_mmu
1388 # define helper_ret_ldl_mmu   helper_be_ldul_mmu
1389 # define helper_ret_ldq_mmu   helper_be_ldq_mmu
1390 # define helper_ret_stw_mmu   helper_be_stw_mmu
1391 # define helper_ret_stl_mmu   helper_be_stl_mmu
1392 # define helper_ret_stq_mmu   helper_be_stq_mmu
1393 #else
1394 # define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
1395 # define helper_ret_lduw_mmu  helper_le_lduw_mmu
1396 # define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
1397 # define helper_ret_ldul_mmu  helper_le_ldul_mmu
1398 # define helper_ret_ldl_mmu   helper_le_ldul_mmu
1399 # define helper_ret_ldq_mmu   helper_le_ldq_mmu
1400 # define helper_ret_stw_mmu   helper_le_stw_mmu
1401 # define helper_ret_stl_mmu   helper_le_stl_mmu
1402 # define helper_ret_stq_mmu   helper_le_stq_mmu
1403 #endif
1404 
1405 uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1406                                     uint32_t cmpv, uint32_t newv,
1407                                     TCGMemOpIdx oi, uintptr_t retaddr);
1408 uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1409                                        uint32_t cmpv, uint32_t newv,
1410                                        TCGMemOpIdx oi, uintptr_t retaddr);
1411 uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1412                                        uint32_t cmpv, uint32_t newv,
1413                                        TCGMemOpIdx oi, uintptr_t retaddr);
1414 uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1415                                        uint64_t cmpv, uint64_t newv,
1416                                        TCGMemOpIdx oi, uintptr_t retaddr);
1417 uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1418                                        uint32_t cmpv, uint32_t newv,
1419                                        TCGMemOpIdx oi, uintptr_t retaddr);
1420 uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1421                                        uint32_t cmpv, uint32_t newv,
1422                                        TCGMemOpIdx oi, uintptr_t retaddr);
1423 uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1424                                        uint64_t cmpv, uint64_t newv,
1425                                        TCGMemOpIdx oi, uintptr_t retaddr);
1426 
1427 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
1428 TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu         \
1429     (CPUArchState *env, target_ulong addr, TYPE val,  \
1430      TCGMemOpIdx oi, uintptr_t retaddr);
1431 
1432 #ifdef CONFIG_ATOMIC64
1433 #define GEN_ATOMIC_HELPER_ALL(NAME)          \
1434     GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1435     GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1436     GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1437     GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1438     GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
1439     GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
1440     GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1441 #else
1442 #define GEN_ATOMIC_HELPER_ALL(NAME)          \
1443     GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1444     GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1445     GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1446     GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1447     GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1448 #endif
1449 
1450 GEN_ATOMIC_HELPER_ALL(fetch_add)
1451 GEN_ATOMIC_HELPER_ALL(fetch_sub)
1452 GEN_ATOMIC_HELPER_ALL(fetch_and)
1453 GEN_ATOMIC_HELPER_ALL(fetch_or)
1454 GEN_ATOMIC_HELPER_ALL(fetch_xor)
1455 GEN_ATOMIC_HELPER_ALL(fetch_smin)
1456 GEN_ATOMIC_HELPER_ALL(fetch_umin)
1457 GEN_ATOMIC_HELPER_ALL(fetch_smax)
1458 GEN_ATOMIC_HELPER_ALL(fetch_umax)
1459 
1460 GEN_ATOMIC_HELPER_ALL(add_fetch)
1461 GEN_ATOMIC_HELPER_ALL(sub_fetch)
1462 GEN_ATOMIC_HELPER_ALL(and_fetch)
1463 GEN_ATOMIC_HELPER_ALL(or_fetch)
1464 GEN_ATOMIC_HELPER_ALL(xor_fetch)
1465 GEN_ATOMIC_HELPER_ALL(smin_fetch)
1466 GEN_ATOMIC_HELPER_ALL(umin_fetch)
1467 GEN_ATOMIC_HELPER_ALL(smax_fetch)
1468 GEN_ATOMIC_HELPER_ALL(umax_fetch)
1469 
1470 GEN_ATOMIC_HELPER_ALL(xchg)
1471 
1472 #undef GEN_ATOMIC_HELPER_ALL
1473 #undef GEN_ATOMIC_HELPER
1474 #endif /* CONFIG_SOFTMMU */
1475 
1476 /*
1477  * These aren't really a "proper" helpers because TCG cannot manage Int128.
1478  * However, use the same format as the others, for use by the backends.
1479  *
1480  * The cmpxchg functions are only defined if HAVE_CMPXCHG128;
1481  * the ld/st functions are only defined if HAVE_ATOMIC128,
1482  * as defined by <qemu/atomic128.h>.
1483  */
1484 Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1485                                      Int128 cmpv, Int128 newv,
1486                                      TCGMemOpIdx oi, uintptr_t retaddr);
1487 Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1488                                      Int128 cmpv, Int128 newv,
1489                                      TCGMemOpIdx oi, uintptr_t retaddr);
1490 
1491 Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1492                                 TCGMemOpIdx oi, uintptr_t retaddr);
1493 Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1494                                 TCGMemOpIdx oi, uintptr_t retaddr);
1495 void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1496                               TCGMemOpIdx oi, uintptr_t retaddr);
1497 void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1498                               TCGMemOpIdx oi, uintptr_t retaddr);
1499 
1500 #ifdef CONFIG_DEBUG_TCG
1501 void tcg_assert_listed_vecop(TCGOpcode);
1502 #else
1503 static inline void tcg_assert_listed_vecop(TCGOpcode op) { }
1504 #endif
1505 
1506 static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
1507 {
1508 #ifdef CONFIG_DEBUG_TCG
1509     const TCGOpcode *o = tcg_ctx->vecop_list;
1510     tcg_ctx->vecop_list = n;
1511     return o;
1512 #else
1513     return NULL;
1514 #endif
1515 }
1516 
1517 bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
1518 
1519 #endif /* TCG_H */
1520