1 /* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #ifndef TCG_H 26 #define TCG_H 27 28 #include "cpu.h" 29 #include "exec/memop.h" 30 #include "exec/memopidx.h" 31 #include "qemu/bitops.h" 32 #include "qemu/plugin.h" 33 #include "qemu/queue.h" 34 #include "tcg/tcg-mo.h" 35 #include "tcg-target.h" 36 #include "tcg/tcg-cond.h" 37 38 /* XXX: make safe guess about sizes */ 39 #define MAX_OP_PER_INSTR 266 40 41 #if HOST_LONG_BITS == 32 42 #define MAX_OPC_PARAM_PER_ARG 2 43 #else 44 #define MAX_OPC_PARAM_PER_ARG 1 45 #endif 46 #define MAX_OPC_PARAM_IARGS 7 47 #define MAX_OPC_PARAM_OARGS 1 48 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) 49 50 /* A Call op needs up to 4 + 2N parameters on 32-bit archs, 51 * and up to 4 + N parameters on 64-bit archs 52 * (N = number of input arguments + output arguments). */ 53 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) 54 55 #define CPU_TEMP_BUF_NLONGS 128 56 #define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long)) 57 58 /* Default target word size to pointer size. */ 59 #ifndef TCG_TARGET_REG_BITS 60 # if UINTPTR_MAX == UINT32_MAX 61 # define TCG_TARGET_REG_BITS 32 62 # elif UINTPTR_MAX == UINT64_MAX 63 # define TCG_TARGET_REG_BITS 64 64 # else 65 # error Unknown pointer size for tcg target 66 # endif 67 #endif 68 69 #if TCG_TARGET_REG_BITS == 32 70 typedef int32_t tcg_target_long; 71 typedef uint32_t tcg_target_ulong; 72 #define TCG_PRIlx PRIx32 73 #define TCG_PRIld PRId32 74 #elif TCG_TARGET_REG_BITS == 64 75 typedef int64_t tcg_target_long; 76 typedef uint64_t tcg_target_ulong; 77 #define TCG_PRIlx PRIx64 78 #define TCG_PRIld PRId64 79 #else 80 #error unsupported 81 #endif 82 83 /* Oversized TCG guests make things like MTTCG hard 84 * as we can't use atomics for cputlb updates. 85 */ 86 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS 87 #define TCG_OVERSIZED_GUEST 1 88 #else 89 #define TCG_OVERSIZED_GUEST 0 90 #endif 91 92 #if TCG_TARGET_NB_REGS <= 32 93 typedef uint32_t TCGRegSet; 94 #elif TCG_TARGET_NB_REGS <= 64 95 typedef uint64_t TCGRegSet; 96 #else 97 #error unsupported 98 #endif 99 100 #if TCG_TARGET_REG_BITS == 32 101 /* Turn some undef macros into false macros. */ 102 #define TCG_TARGET_HAS_extrl_i64_i32 0 103 #define TCG_TARGET_HAS_extrh_i64_i32 0 104 #define TCG_TARGET_HAS_div_i64 0 105 #define TCG_TARGET_HAS_rem_i64 0 106 #define TCG_TARGET_HAS_div2_i64 0 107 #define TCG_TARGET_HAS_rot_i64 0 108 #define TCG_TARGET_HAS_ext8s_i64 0 109 #define TCG_TARGET_HAS_ext16s_i64 0 110 #define TCG_TARGET_HAS_ext32s_i64 0 111 #define TCG_TARGET_HAS_ext8u_i64 0 112 #define TCG_TARGET_HAS_ext16u_i64 0 113 #define TCG_TARGET_HAS_ext32u_i64 0 114 #define TCG_TARGET_HAS_bswap16_i64 0 115 #define TCG_TARGET_HAS_bswap32_i64 0 116 #define TCG_TARGET_HAS_bswap64_i64 0 117 #define TCG_TARGET_HAS_neg_i64 0 118 #define TCG_TARGET_HAS_not_i64 0 119 #define TCG_TARGET_HAS_andc_i64 0 120 #define TCG_TARGET_HAS_orc_i64 0 121 #define TCG_TARGET_HAS_eqv_i64 0 122 #define TCG_TARGET_HAS_nand_i64 0 123 #define TCG_TARGET_HAS_nor_i64 0 124 #define TCG_TARGET_HAS_clz_i64 0 125 #define TCG_TARGET_HAS_ctz_i64 0 126 #define TCG_TARGET_HAS_ctpop_i64 0 127 #define TCG_TARGET_HAS_deposit_i64 0 128 #define TCG_TARGET_HAS_extract_i64 0 129 #define TCG_TARGET_HAS_sextract_i64 0 130 #define TCG_TARGET_HAS_extract2_i64 0 131 #define TCG_TARGET_HAS_movcond_i64 0 132 #define TCG_TARGET_HAS_add2_i64 0 133 #define TCG_TARGET_HAS_sub2_i64 0 134 #define TCG_TARGET_HAS_mulu2_i64 0 135 #define TCG_TARGET_HAS_muls2_i64 0 136 #define TCG_TARGET_HAS_muluh_i64 0 137 #define TCG_TARGET_HAS_mulsh_i64 0 138 /* Turn some undef macros into true macros. */ 139 #define TCG_TARGET_HAS_add2_i32 1 140 #define TCG_TARGET_HAS_sub2_i32 1 141 #endif 142 143 #ifndef TCG_TARGET_deposit_i32_valid 144 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1 145 #endif 146 #ifndef TCG_TARGET_deposit_i64_valid 147 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1 148 #endif 149 #ifndef TCG_TARGET_extract_i32_valid 150 #define TCG_TARGET_extract_i32_valid(ofs, len) 1 151 #endif 152 #ifndef TCG_TARGET_extract_i64_valid 153 #define TCG_TARGET_extract_i64_valid(ofs, len) 1 154 #endif 155 156 /* Only one of DIV or DIV2 should be defined. */ 157 #if defined(TCG_TARGET_HAS_div_i32) 158 #define TCG_TARGET_HAS_div2_i32 0 159 #elif defined(TCG_TARGET_HAS_div2_i32) 160 #define TCG_TARGET_HAS_div_i32 0 161 #define TCG_TARGET_HAS_rem_i32 0 162 #endif 163 #if defined(TCG_TARGET_HAS_div_i64) 164 #define TCG_TARGET_HAS_div2_i64 0 165 #elif defined(TCG_TARGET_HAS_div2_i64) 166 #define TCG_TARGET_HAS_div_i64 0 167 #define TCG_TARGET_HAS_rem_i64 0 168 #endif 169 170 /* For 32-bit targets, some sort of unsigned widening multiply is required. */ 171 #if TCG_TARGET_REG_BITS == 32 \ 172 && !(defined(TCG_TARGET_HAS_mulu2_i32) \ 173 || defined(TCG_TARGET_HAS_muluh_i32)) 174 # error "Missing unsigned widening multiply" 175 #endif 176 177 #if !defined(TCG_TARGET_HAS_v64) \ 178 && !defined(TCG_TARGET_HAS_v128) \ 179 && !defined(TCG_TARGET_HAS_v256) 180 #define TCG_TARGET_MAYBE_vec 0 181 #define TCG_TARGET_HAS_abs_vec 0 182 #define TCG_TARGET_HAS_neg_vec 0 183 #define TCG_TARGET_HAS_not_vec 0 184 #define TCG_TARGET_HAS_andc_vec 0 185 #define TCG_TARGET_HAS_orc_vec 0 186 #define TCG_TARGET_HAS_nand_vec 0 187 #define TCG_TARGET_HAS_nor_vec 0 188 #define TCG_TARGET_HAS_eqv_vec 0 189 #define TCG_TARGET_HAS_roti_vec 0 190 #define TCG_TARGET_HAS_rots_vec 0 191 #define TCG_TARGET_HAS_rotv_vec 0 192 #define TCG_TARGET_HAS_shi_vec 0 193 #define TCG_TARGET_HAS_shs_vec 0 194 #define TCG_TARGET_HAS_shv_vec 0 195 #define TCG_TARGET_HAS_mul_vec 0 196 #define TCG_TARGET_HAS_sat_vec 0 197 #define TCG_TARGET_HAS_minmax_vec 0 198 #define TCG_TARGET_HAS_bitsel_vec 0 199 #define TCG_TARGET_HAS_cmpsel_vec 0 200 #else 201 #define TCG_TARGET_MAYBE_vec 1 202 #endif 203 #ifndef TCG_TARGET_HAS_v64 204 #define TCG_TARGET_HAS_v64 0 205 #endif 206 #ifndef TCG_TARGET_HAS_v128 207 #define TCG_TARGET_HAS_v128 0 208 #endif 209 #ifndef TCG_TARGET_HAS_v256 210 #define TCG_TARGET_HAS_v256 0 211 #endif 212 213 #ifndef TARGET_INSN_START_EXTRA_WORDS 214 # define TARGET_INSN_START_WORDS 1 215 #else 216 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS) 217 #endif 218 219 typedef enum TCGOpcode { 220 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, 221 #include "tcg/tcg-opc.h" 222 #undef DEF 223 NB_OPS, 224 } TCGOpcode; 225 226 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r)) 227 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r))) 228 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) 229 230 #ifndef TCG_TARGET_INSN_UNIT_SIZE 231 # error "Missing TCG_TARGET_INSN_UNIT_SIZE" 232 #elif TCG_TARGET_INSN_UNIT_SIZE == 1 233 typedef uint8_t tcg_insn_unit; 234 #elif TCG_TARGET_INSN_UNIT_SIZE == 2 235 typedef uint16_t tcg_insn_unit; 236 #elif TCG_TARGET_INSN_UNIT_SIZE == 4 237 typedef uint32_t tcg_insn_unit; 238 #elif TCG_TARGET_INSN_UNIT_SIZE == 8 239 typedef uint64_t tcg_insn_unit; 240 #else 241 /* The port better have done this. */ 242 #endif 243 244 245 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS 246 # define tcg_debug_assert(X) do { assert(X); } while (0) 247 #else 248 # define tcg_debug_assert(X) \ 249 do { if (!(X)) { __builtin_unreachable(); } } while (0) 250 #endif 251 252 typedef struct TCGRelocation TCGRelocation; 253 struct TCGRelocation { 254 QSIMPLEQ_ENTRY(TCGRelocation) next; 255 tcg_insn_unit *ptr; 256 intptr_t addend; 257 int type; 258 }; 259 260 typedef struct TCGLabel TCGLabel; 261 struct TCGLabel { 262 unsigned present : 1; 263 unsigned has_value : 1; 264 unsigned id : 14; 265 unsigned refs : 16; 266 union { 267 uintptr_t value; 268 const tcg_insn_unit *value_ptr; 269 } u; 270 QSIMPLEQ_HEAD(, TCGRelocation) relocs; 271 QSIMPLEQ_ENTRY(TCGLabel) next; 272 }; 273 274 typedef struct TCGPool { 275 struct TCGPool *next; 276 int size; 277 uint8_t data[] __attribute__ ((aligned)); 278 } TCGPool; 279 280 #define TCG_POOL_CHUNK_SIZE 32768 281 282 #define TCG_MAX_TEMPS 512 283 #define TCG_MAX_INSNS 512 284 285 /* when the size of the arguments of a called function is smaller than 286 this value, they are statically allocated in the TB stack frame */ 287 #define TCG_STATIC_CALL_ARGS_SIZE 128 288 289 typedef enum TCGType { 290 TCG_TYPE_I32, 291 TCG_TYPE_I64, 292 293 TCG_TYPE_V64, 294 TCG_TYPE_V128, 295 TCG_TYPE_V256, 296 297 TCG_TYPE_COUNT, /* number of different types */ 298 299 /* An alias for the size of the host register. */ 300 #if TCG_TARGET_REG_BITS == 32 301 TCG_TYPE_REG = TCG_TYPE_I32, 302 #else 303 TCG_TYPE_REG = TCG_TYPE_I64, 304 #endif 305 306 /* An alias for the size of the native pointer. */ 307 #if UINTPTR_MAX == UINT32_MAX 308 TCG_TYPE_PTR = TCG_TYPE_I32, 309 #else 310 TCG_TYPE_PTR = TCG_TYPE_I64, 311 #endif 312 313 /* An alias for the size of the target "long", aka register. */ 314 #if TARGET_LONG_BITS == 64 315 TCG_TYPE_TL = TCG_TYPE_I64, 316 #else 317 TCG_TYPE_TL = TCG_TYPE_I32, 318 #endif 319 } TCGType; 320 321 /** 322 * get_alignment_bits 323 * @memop: MemOp value 324 * 325 * Extract the alignment size from the memop. 326 */ 327 static inline unsigned get_alignment_bits(MemOp memop) 328 { 329 unsigned a = memop & MO_AMASK; 330 331 if (a == MO_UNALN) { 332 /* No alignment required. */ 333 a = 0; 334 } else if (a == MO_ALIGN) { 335 /* A natural alignment requirement. */ 336 a = memop & MO_SIZE; 337 } else { 338 /* A specific alignment requirement. */ 339 a = a >> MO_ASHIFT; 340 } 341 #if defined(CONFIG_SOFTMMU) 342 /* The requested alignment cannot overlap the TLB flags. */ 343 tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0); 344 #endif 345 return a; 346 } 347 348 typedef tcg_target_ulong TCGArg; 349 350 /* Define type and accessor macros for TCG variables. 351 352 TCG variables are the inputs and outputs of TCG ops, as described 353 in tcg/README. Target CPU front-end code uses these types to deal 354 with TCG variables as it emits TCG code via the tcg_gen_* functions. 355 They come in several flavours: 356 * TCGv_i32 : 32 bit integer type 357 * TCGv_i64 : 64 bit integer type 358 * TCGv_ptr : a host pointer type 359 * TCGv_vec : a host vector type; the exact size is not exposed 360 to the CPU front-end code. 361 * TCGv : an integer type the same size as target_ulong 362 (an alias for either TCGv_i32 or TCGv_i64) 363 The compiler's type checking will complain if you mix them 364 up and pass the wrong sized TCGv to a function. 365 366 Users of tcg_gen_* don't need to know about any of the internal 367 details of these, and should treat them as opaque types. 368 You won't be able to look inside them in a debugger either. 369 370 Internal implementation details follow: 371 372 Note that there is no definition of the structs TCGv_i32_d etc anywhere. 373 This is deliberate, because the values we store in variables of type 374 TCGv_i32 are not really pointers-to-structures. They're just small 375 integers, but keeping them in pointer types like this means that the 376 compiler will complain if you accidentally pass a TCGv_i32 to a 377 function which takes a TCGv_i64, and so on. Only the internals of 378 TCG need to care about the actual contents of the types. */ 379 380 typedef struct TCGv_i32_d *TCGv_i32; 381 typedef struct TCGv_i64_d *TCGv_i64; 382 typedef struct TCGv_ptr_d *TCGv_ptr; 383 typedef struct TCGv_vec_d *TCGv_vec; 384 typedef TCGv_ptr TCGv_env; 385 #if TARGET_LONG_BITS == 32 386 #define TCGv TCGv_i32 387 #elif TARGET_LONG_BITS == 64 388 #define TCGv TCGv_i64 389 #else 390 #error Unhandled TARGET_LONG_BITS value 391 #endif 392 393 /* call flags */ 394 /* Helper does not read globals (either directly or through an exception). It 395 implies TCG_CALL_NO_WRITE_GLOBALS. */ 396 #define TCG_CALL_NO_READ_GLOBALS 0x0001 397 /* Helper does not write globals */ 398 #define TCG_CALL_NO_WRITE_GLOBALS 0x0002 399 /* Helper can be safely suppressed if the return value is not used. */ 400 #define TCG_CALL_NO_SIDE_EFFECTS 0x0004 401 /* Helper is QEMU_NORETURN. */ 402 #define TCG_CALL_NO_RETURN 0x0008 403 404 /* convenience version of most used call flags */ 405 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS 406 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS 407 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS 408 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) 409 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) 410 411 /* Used to align parameters. See the comment before tcgv_i32_temp. */ 412 #define TCG_CALL_DUMMY_ARG ((TCGArg)0) 413 414 /* 415 * Flags for the bswap opcodes. 416 * If IZ, the input is zero-extended, otherwise unknown. 417 * If OZ or OS, the output is zero- or sign-extended respectively, 418 * otherwise the high bits are undefined. 419 */ 420 enum { 421 TCG_BSWAP_IZ = 1, 422 TCG_BSWAP_OZ = 2, 423 TCG_BSWAP_OS = 4, 424 }; 425 426 typedef enum TCGTempVal { 427 TEMP_VAL_DEAD, 428 TEMP_VAL_REG, 429 TEMP_VAL_MEM, 430 TEMP_VAL_CONST, 431 } TCGTempVal; 432 433 typedef enum TCGTempKind { 434 /* Temp is dead at the end of all basic blocks. */ 435 TEMP_NORMAL, 436 /* Temp is saved across basic blocks but dead at the end of TBs. */ 437 TEMP_LOCAL, 438 /* Temp is saved across both basic blocks and translation blocks. */ 439 TEMP_GLOBAL, 440 /* Temp is in a fixed register. */ 441 TEMP_FIXED, 442 /* Temp is a fixed constant. */ 443 TEMP_CONST, 444 } TCGTempKind; 445 446 typedef struct TCGTemp { 447 TCGReg reg:8; 448 TCGTempVal val_type:8; 449 TCGType base_type:8; 450 TCGType type:8; 451 TCGTempKind kind:3; 452 unsigned int indirect_reg:1; 453 unsigned int indirect_base:1; 454 unsigned int mem_coherent:1; 455 unsigned int mem_allocated:1; 456 unsigned int temp_allocated:1; 457 458 int64_t val; 459 struct TCGTemp *mem_base; 460 intptr_t mem_offset; 461 const char *name; 462 463 /* Pass-specific information that can be stored for a temporary. 464 One word worth of integer data, and one pointer to data 465 allocated separately. */ 466 uintptr_t state; 467 void *state_ptr; 468 } TCGTemp; 469 470 typedef struct TCGContext TCGContext; 471 472 typedef struct TCGTempSet { 473 unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; 474 } TCGTempSet; 475 476 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding, 477 this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands. 478 There are never more than 2 outputs, which means that we can store all 479 dead + sync data within 16 bits. */ 480 #define DEAD_ARG 4 481 #define SYNC_ARG 1 482 typedef uint16_t TCGLifeData; 483 484 /* The layout here is designed to avoid a bitfield crossing of 485 a 32-bit boundary, which would cause GCC to add extra padding. */ 486 typedef struct TCGOp { 487 TCGOpcode opc : 8; /* 8 */ 488 489 /* Parameters for this opcode. See below. */ 490 unsigned param1 : 4; /* 12 */ 491 unsigned param2 : 4; /* 16 */ 492 493 /* Lifetime data of the operands. */ 494 unsigned life : 16; /* 32 */ 495 496 /* Next and previous opcodes. */ 497 QTAILQ_ENTRY(TCGOp) link; 498 499 /* Arguments for the opcode. */ 500 TCGArg args[MAX_OPC_PARAM]; 501 502 /* Register preferences for the output(s). */ 503 TCGRegSet output_pref[2]; 504 } TCGOp; 505 506 #define TCGOP_CALLI(X) (X)->param1 507 #define TCGOP_CALLO(X) (X)->param2 508 509 #define TCGOP_VECL(X) (X)->param1 510 #define TCGOP_VECE(X) (X)->param2 511 512 /* Make sure operands fit in the bitfields above. */ 513 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); 514 515 typedef struct TCGProfile { 516 int64_t cpu_exec_time; 517 int64_t tb_count1; 518 int64_t tb_count; 519 int64_t op_count; /* total insn count */ 520 int op_count_max; /* max insn per TB */ 521 int temp_count_max; 522 int64_t temp_count; 523 int64_t del_op_count; 524 int64_t code_in_len; 525 int64_t code_out_len; 526 int64_t search_out_len; 527 int64_t interm_time; 528 int64_t code_time; 529 int64_t la_time; 530 int64_t opt_time; 531 int64_t restore_count; 532 int64_t restore_time; 533 int64_t table_op_count[NB_OPS]; 534 } TCGProfile; 535 536 struct TCGContext { 537 uint8_t *pool_cur, *pool_end; 538 TCGPool *pool_first, *pool_current, *pool_first_large; 539 int nb_labels; 540 int nb_globals; 541 int nb_temps; 542 int nb_indirects; 543 int nb_ops; 544 545 /* goto_tb support */ 546 tcg_insn_unit *code_buf; 547 uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */ 548 uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */ 549 uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */ 550 551 TCGRegSet reserved_regs; 552 uint32_t tb_cflags; /* cflags of the current TB */ 553 intptr_t current_frame_offset; 554 intptr_t frame_start; 555 intptr_t frame_end; 556 TCGTemp *frame_temp; 557 558 tcg_insn_unit *code_ptr; 559 560 #ifdef CONFIG_PROFILER 561 TCGProfile prof; 562 #endif 563 564 #ifdef CONFIG_DEBUG_TCG 565 int temps_in_use; 566 int goto_tb_issue_mask; 567 const TCGOpcode *vecop_list; 568 #endif 569 570 /* Code generation. Note that we specifically do not use tcg_insn_unit 571 here, because there's too much arithmetic throughout that relies 572 on addition and subtraction working on bytes. Rely on the GCC 573 extension that allows arithmetic on void*. */ 574 void *code_gen_buffer; 575 size_t code_gen_buffer_size; 576 void *code_gen_ptr; 577 void *data_gen_ptr; 578 579 /* Threshold to flush the translated code buffer. */ 580 void *code_gen_highwater; 581 582 /* Track which vCPU triggers events */ 583 CPUState *cpu; /* *_trans */ 584 585 /* These structures are private to tcg-target.c.inc. */ 586 #ifdef TCG_TARGET_NEED_LDST_LABELS 587 QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels; 588 #endif 589 #ifdef TCG_TARGET_NEED_POOL_LABELS 590 struct TCGLabelPoolData *pool_labels; 591 #endif 592 593 TCGLabel *exitreq_label; 594 595 #ifdef CONFIG_PLUGIN 596 /* 597 * We keep one plugin_tb struct per TCGContext. Note that on every TB 598 * translation we clear but do not free its contents; this way we 599 * avoid a lot of malloc/free churn, since after a few TB's it's 600 * unlikely that we'll need to allocate either more instructions or more 601 * space for instructions (for variable-instruction-length ISAs). 602 */ 603 struct qemu_plugin_tb *plugin_tb; 604 605 /* descriptor of the instruction being translated */ 606 struct qemu_plugin_insn *plugin_insn; 607 #endif 608 609 GHashTable *const_table[TCG_TYPE_COUNT]; 610 TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; 611 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ 612 613 QTAILQ_HEAD(, TCGOp) ops, free_ops; 614 QSIMPLEQ_HEAD(, TCGLabel) labels; 615 616 /* Tells which temporary holds a given register. 617 It does not take into account fixed registers */ 618 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; 619 620 uint16_t gen_insn_end_off[TCG_MAX_INSNS]; 621 target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; 622 623 /* Exit to translator on overflow. */ 624 sigjmp_buf jmp_trans; 625 }; 626 627 static inline bool temp_readonly(TCGTemp *ts) 628 { 629 return ts->kind >= TEMP_FIXED; 630 } 631 632 extern __thread TCGContext *tcg_ctx; 633 extern const void *tcg_code_gen_epilogue; 634 extern uintptr_t tcg_splitwx_diff; 635 extern TCGv_env cpu_env; 636 637 bool in_code_gen_buffer(const void *p); 638 639 #ifdef CONFIG_DEBUG_TCG 640 const void *tcg_splitwx_to_rx(void *rw); 641 void *tcg_splitwx_to_rw(const void *rx); 642 #else 643 static inline const void *tcg_splitwx_to_rx(void *rw) 644 { 645 return rw ? rw + tcg_splitwx_diff : NULL; 646 } 647 648 static inline void *tcg_splitwx_to_rw(const void *rx) 649 { 650 return rx ? (void *)rx - tcg_splitwx_diff : NULL; 651 } 652 #endif 653 654 static inline size_t temp_idx(TCGTemp *ts) 655 { 656 ptrdiff_t n = ts - tcg_ctx->temps; 657 tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps); 658 return n; 659 } 660 661 static inline TCGArg temp_arg(TCGTemp *ts) 662 { 663 return (uintptr_t)ts; 664 } 665 666 static inline TCGTemp *arg_temp(TCGArg a) 667 { 668 return (TCGTemp *)(uintptr_t)a; 669 } 670 671 /* Using the offset of a temporary, relative to TCGContext, rather than 672 its index means that we don't use 0. That leaves offset 0 free for 673 a NULL representation without having to leave index 0 unused. */ 674 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v) 675 { 676 uintptr_t o = (uintptr_t)v; 677 TCGTemp *t = (void *)tcg_ctx + o; 678 tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o); 679 return t; 680 } 681 682 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v) 683 { 684 return tcgv_i32_temp((TCGv_i32)v); 685 } 686 687 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v) 688 { 689 return tcgv_i32_temp((TCGv_i32)v); 690 } 691 692 static inline TCGTemp *tcgv_vec_temp(TCGv_vec v) 693 { 694 return tcgv_i32_temp((TCGv_i32)v); 695 } 696 697 static inline TCGArg tcgv_i32_arg(TCGv_i32 v) 698 { 699 return temp_arg(tcgv_i32_temp(v)); 700 } 701 702 static inline TCGArg tcgv_i64_arg(TCGv_i64 v) 703 { 704 return temp_arg(tcgv_i64_temp(v)); 705 } 706 707 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v) 708 { 709 return temp_arg(tcgv_ptr_temp(v)); 710 } 711 712 static inline TCGArg tcgv_vec_arg(TCGv_vec v) 713 { 714 return temp_arg(tcgv_vec_temp(v)); 715 } 716 717 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t) 718 { 719 (void)temp_idx(t); /* trigger embedded assert */ 720 return (TCGv_i32)((void *)t - (void *)tcg_ctx); 721 } 722 723 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t) 724 { 725 return (TCGv_i64)temp_tcgv_i32(t); 726 } 727 728 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t) 729 { 730 return (TCGv_ptr)temp_tcgv_i32(t); 731 } 732 733 static inline TCGv_vec temp_tcgv_vec(TCGTemp *t) 734 { 735 return (TCGv_vec)temp_tcgv_i32(t); 736 } 737 738 #if TCG_TARGET_REG_BITS == 32 739 static inline TCGv_i32 TCGV_LOW(TCGv_i64 t) 740 { 741 return temp_tcgv_i32(tcgv_i64_temp(t)); 742 } 743 744 static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t) 745 { 746 return temp_tcgv_i32(tcgv_i64_temp(t) + 1); 747 } 748 #endif 749 750 static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg) 751 { 752 return op->args[arg]; 753 } 754 755 static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v) 756 { 757 op->args[arg] = v; 758 } 759 760 static inline target_ulong tcg_get_insn_start_param(TCGOp *op, int arg) 761 { 762 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS 763 return tcg_get_insn_param(op, arg); 764 #else 765 return tcg_get_insn_param(op, arg * 2) | 766 ((uint64_t)tcg_get_insn_param(op, arg * 2 + 1) << 32); 767 #endif 768 } 769 770 static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v) 771 { 772 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS 773 tcg_set_insn_param(op, arg, v); 774 #else 775 tcg_set_insn_param(op, arg * 2, v); 776 tcg_set_insn_param(op, arg * 2 + 1, v >> 32); 777 #endif 778 } 779 780 /* The last op that was emitted. */ 781 static inline TCGOp *tcg_last_op(void) 782 { 783 return QTAILQ_LAST(&tcg_ctx->ops); 784 } 785 786 /* Test for whether to terminate the TB for using too many opcodes. */ 787 static inline bool tcg_op_buf_full(void) 788 { 789 /* This is not a hard limit, it merely stops translation when 790 * we have produced "enough" opcodes. We want to limit TB size 791 * such that a RISC host can reasonably use a 16-bit signed 792 * branch within the TB. We also need to be mindful of the 793 * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[] 794 * and TCGContext.gen_insn_end_off[]. 795 */ 796 return tcg_ctx->nb_ops >= 4000; 797 } 798 799 /* pool based memory allocation */ 800 801 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */ 802 void *tcg_malloc_internal(TCGContext *s, int size); 803 void tcg_pool_reset(TCGContext *s); 804 TranslationBlock *tcg_tb_alloc(TCGContext *s); 805 806 void tcg_region_reset_all(void); 807 808 size_t tcg_code_size(void); 809 size_t tcg_code_capacity(void); 810 811 void tcg_tb_insert(TranslationBlock *tb); 812 void tcg_tb_remove(TranslationBlock *tb); 813 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr); 814 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data); 815 size_t tcg_nb_tbs(void); 816 817 /* user-mode: Called with mmap_lock held. */ 818 static inline void *tcg_malloc(int size) 819 { 820 TCGContext *s = tcg_ctx; 821 uint8_t *ptr, *ptr_end; 822 823 /* ??? This is a weak placeholder for minimum malloc alignment. */ 824 size = QEMU_ALIGN_UP(size, 8); 825 826 ptr = s->pool_cur; 827 ptr_end = ptr + size; 828 if (unlikely(ptr_end > s->pool_end)) { 829 return tcg_malloc_internal(tcg_ctx, size); 830 } else { 831 s->pool_cur = ptr_end; 832 return ptr; 833 } 834 } 835 836 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus); 837 void tcg_register_thread(void); 838 void tcg_prologue_init(TCGContext *s); 839 void tcg_func_start(TCGContext *s); 840 841 int tcg_gen_code(TCGContext *s, TranslationBlock *tb); 842 843 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); 844 845 TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr, 846 intptr_t, const char *); 847 TCGTemp *tcg_temp_new_internal(TCGType, bool); 848 void tcg_temp_free_internal(TCGTemp *); 849 TCGv_vec tcg_temp_new_vec(TCGType type); 850 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match); 851 852 static inline void tcg_temp_free_i32(TCGv_i32 arg) 853 { 854 tcg_temp_free_internal(tcgv_i32_temp(arg)); 855 } 856 857 static inline void tcg_temp_free_i64(TCGv_i64 arg) 858 { 859 tcg_temp_free_internal(tcgv_i64_temp(arg)); 860 } 861 862 static inline void tcg_temp_free_ptr(TCGv_ptr arg) 863 { 864 tcg_temp_free_internal(tcgv_ptr_temp(arg)); 865 } 866 867 static inline void tcg_temp_free_vec(TCGv_vec arg) 868 { 869 tcg_temp_free_internal(tcgv_vec_temp(arg)); 870 } 871 872 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset, 873 const char *name) 874 { 875 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name); 876 return temp_tcgv_i32(t); 877 } 878 879 static inline TCGv_i32 tcg_temp_new_i32(void) 880 { 881 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false); 882 return temp_tcgv_i32(t); 883 } 884 885 static inline TCGv_i32 tcg_temp_local_new_i32(void) 886 { 887 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true); 888 return temp_tcgv_i32(t); 889 } 890 891 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset, 892 const char *name) 893 { 894 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name); 895 return temp_tcgv_i64(t); 896 } 897 898 static inline TCGv_i64 tcg_temp_new_i64(void) 899 { 900 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false); 901 return temp_tcgv_i64(t); 902 } 903 904 static inline TCGv_i64 tcg_temp_local_new_i64(void) 905 { 906 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true); 907 return temp_tcgv_i64(t); 908 } 909 910 static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset, 911 const char *name) 912 { 913 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name); 914 return temp_tcgv_ptr(t); 915 } 916 917 static inline TCGv_ptr tcg_temp_new_ptr(void) 918 { 919 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false); 920 return temp_tcgv_ptr(t); 921 } 922 923 static inline TCGv_ptr tcg_temp_local_new_ptr(void) 924 { 925 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true); 926 return temp_tcgv_ptr(t); 927 } 928 929 #if defined(CONFIG_DEBUG_TCG) 930 /* If you call tcg_clear_temp_count() at the start of a section of 931 * code which is not supposed to leak any TCG temporaries, then 932 * calling tcg_check_temp_count() at the end of the section will 933 * return 1 if the section did in fact leak a temporary. 934 */ 935 void tcg_clear_temp_count(void); 936 int tcg_check_temp_count(void); 937 #else 938 #define tcg_clear_temp_count() do { } while (0) 939 #define tcg_check_temp_count() 0 940 #endif 941 942 int64_t tcg_cpu_exec_time(void); 943 void tcg_dump_info(GString *buf); 944 void tcg_dump_op_count(GString *buf); 945 946 #define TCG_CT_CONST 1 /* any constant of register size */ 947 948 typedef struct TCGArgConstraint { 949 unsigned ct : 16; 950 unsigned alias_index : 4; 951 unsigned sort_index : 4; 952 bool oalias : 1; 953 bool ialias : 1; 954 bool newreg : 1; 955 TCGRegSet regs; 956 } TCGArgConstraint; 957 958 #define TCG_MAX_OP_ARGS 16 959 960 /* Bits for TCGOpDef->flags, 8 bits available, all used. */ 961 enum { 962 /* Instruction exits the translation block. */ 963 TCG_OPF_BB_EXIT = 0x01, 964 /* Instruction defines the end of a basic block. */ 965 TCG_OPF_BB_END = 0x02, 966 /* Instruction clobbers call registers and potentially update globals. */ 967 TCG_OPF_CALL_CLOBBER = 0x04, 968 /* Instruction has side effects: it cannot be removed if its outputs 969 are not used, and might trigger exceptions. */ 970 TCG_OPF_SIDE_EFFECTS = 0x08, 971 /* Instruction operands are 64-bits (otherwise 32-bits). */ 972 TCG_OPF_64BIT = 0x10, 973 /* Instruction is optional and not implemented by the host, or insn 974 is generic and should not be implemened by the host. */ 975 TCG_OPF_NOT_PRESENT = 0x20, 976 /* Instruction operands are vectors. */ 977 TCG_OPF_VECTOR = 0x40, 978 /* Instruction is a conditional branch. */ 979 TCG_OPF_COND_BRANCH = 0x80 980 }; 981 982 typedef struct TCGOpDef { 983 const char *name; 984 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; 985 uint8_t flags; 986 TCGArgConstraint *args_ct; 987 } TCGOpDef; 988 989 extern TCGOpDef tcg_op_defs[]; 990 extern const size_t tcg_op_defs_max; 991 992 typedef struct TCGTargetOpDef { 993 TCGOpcode op; 994 const char *args_ct_str[TCG_MAX_OP_ARGS]; 995 } TCGTargetOpDef; 996 997 #define tcg_abort() \ 998 do {\ 999 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\ 1000 abort();\ 1001 } while (0) 1002 1003 bool tcg_op_supported(TCGOpcode op); 1004 1005 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args); 1006 1007 TCGOp *tcg_emit_op(TCGOpcode opc); 1008 void tcg_op_remove(TCGContext *s, TCGOp *op); 1009 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc); 1010 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc); 1011 1012 /** 1013 * tcg_remove_ops_after: 1014 * @op: target operation 1015 * 1016 * Discard any opcodes emitted since @op. Expected usage is to save 1017 * a starting point with tcg_last_op(), speculatively emit opcodes, 1018 * then decide whether or not to keep those opcodes after the fact. 1019 */ 1020 void tcg_remove_ops_after(TCGOp *op); 1021 1022 void tcg_optimize(TCGContext *s); 1023 1024 /* Allocate a new temporary and initialize it with a constant. */ 1025 TCGv_i32 tcg_const_i32(int32_t val); 1026 TCGv_i64 tcg_const_i64(int64_t val); 1027 TCGv_i32 tcg_const_local_i32(int32_t val); 1028 TCGv_i64 tcg_const_local_i64(int64_t val); 1029 TCGv_vec tcg_const_zeros_vec(TCGType); 1030 TCGv_vec tcg_const_ones_vec(TCGType); 1031 TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec); 1032 TCGv_vec tcg_const_ones_vec_matching(TCGv_vec); 1033 1034 /* 1035 * Locate or create a read-only temporary that is a constant. 1036 * This kind of temporary need not be freed, but for convenience 1037 * will be silently ignored by tcg_temp_free_*. 1038 */ 1039 TCGTemp *tcg_constant_internal(TCGType type, int64_t val); 1040 1041 static inline TCGv_i32 tcg_constant_i32(int32_t val) 1042 { 1043 return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val)); 1044 } 1045 1046 static inline TCGv_i64 tcg_constant_i64(int64_t val) 1047 { 1048 return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val)); 1049 } 1050 1051 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val); 1052 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val); 1053 1054 #if UINTPTR_MAX == UINT32_MAX 1055 # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) 1056 # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x))) 1057 #else 1058 # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x))) 1059 # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x))) 1060 #endif 1061 1062 TCGLabel *gen_new_label(void); 1063 1064 /** 1065 * label_arg 1066 * @l: label 1067 * 1068 * Encode a label for storage in the TCG opcode stream. 1069 */ 1070 1071 static inline TCGArg label_arg(TCGLabel *l) 1072 { 1073 return (uintptr_t)l; 1074 } 1075 1076 /** 1077 * arg_label 1078 * @i: value 1079 * 1080 * The opposite of label_arg. Retrieve a label from the 1081 * encoding of the TCG opcode stream. 1082 */ 1083 1084 static inline TCGLabel *arg_label(TCGArg i) 1085 { 1086 return (TCGLabel *)(uintptr_t)i; 1087 } 1088 1089 /** 1090 * tcg_ptr_byte_diff 1091 * @a, @b: addresses to be differenced 1092 * 1093 * There are many places within the TCG backends where we need a byte 1094 * difference between two pointers. While this can be accomplished 1095 * with local casting, it's easy to get wrong -- especially if one is 1096 * concerned with the signedness of the result. 1097 * 1098 * This version relies on GCC's void pointer arithmetic to get the 1099 * correct result. 1100 */ 1101 1102 static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b) 1103 { 1104 return a - b; 1105 } 1106 1107 /** 1108 * tcg_pcrel_diff 1109 * @s: the tcg context 1110 * @target: address of the target 1111 * 1112 * Produce a pc-relative difference, from the current code_ptr 1113 * to the destination address. 1114 */ 1115 1116 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target) 1117 { 1118 return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr)); 1119 } 1120 1121 /** 1122 * tcg_tbrel_diff 1123 * @s: the tcg context 1124 * @target: address of the target 1125 * 1126 * Produce a difference, from the beginning of the current TB code 1127 * to the destination address. 1128 */ 1129 static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target) 1130 { 1131 return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf)); 1132 } 1133 1134 /** 1135 * tcg_current_code_size 1136 * @s: the tcg context 1137 * 1138 * Compute the current code size within the translation block. 1139 * This is used to fill in qemu's data structures for goto_tb. 1140 */ 1141 1142 static inline size_t tcg_current_code_size(TCGContext *s) 1143 { 1144 return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); 1145 } 1146 1147 /** 1148 * tcg_qemu_tb_exec: 1149 * @env: pointer to CPUArchState for the CPU 1150 * @tb_ptr: address of generated code for the TB to execute 1151 * 1152 * Start executing code from a given translation block. 1153 * Where translation blocks have been linked, execution 1154 * may proceed from the given TB into successive ones. 1155 * Control eventually returns only when some action is needed 1156 * from the top-level loop: either control must pass to a TB 1157 * which has not yet been directly linked, or an asynchronous 1158 * event such as an interrupt needs handling. 1159 * 1160 * Return: The return value is the value passed to the corresponding 1161 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute. 1162 * The value is either zero or a 4-byte aligned pointer to that TB combined 1163 * with additional information in its two least significant bits. The 1164 * additional information is encoded as follows: 1165 * 0, 1: the link between this TB and the next is via the specified 1166 * TB index (0 or 1). That is, we left the TB via (the equivalent 1167 * of) "goto_tb <index>". The main loop uses this to determine 1168 * how to link the TB just executed to the next. 1169 * 2: we are using instruction counting code generation, and we 1170 * did not start executing this TB because the instruction counter 1171 * would hit zero midway through it. In this case the pointer 1172 * returned is the TB we were about to execute, and the caller must 1173 * arrange to execute the remaining count of instructions. 1174 * 3: we stopped because the CPU's exit_request flag was set 1175 * (usually meaning that there is an interrupt that needs to be 1176 * handled). The pointer returned is the TB we were about to execute 1177 * when we noticed the pending exit request. 1178 * 1179 * If the bottom two bits indicate an exit-via-index then the CPU 1180 * state is correctly synchronised and ready for execution of the next 1181 * TB (and in particular the guest PC is the address to execute next). 1182 * Otherwise, we gave up on execution of this TB before it started, and 1183 * the caller must fix up the CPU state by calling the CPU's 1184 * synchronize_from_tb() method with the TB pointer we return (falling 1185 * back to calling the CPU's set_pc method with tb->pb if no 1186 * synchronize_from_tb() method exists). 1187 * 1188 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec 1189 * to this default (which just calls the prologue.code emitted by 1190 * tcg_target_qemu_prologue()). 1191 */ 1192 #define TB_EXIT_MASK 3 1193 #define TB_EXIT_IDX0 0 1194 #define TB_EXIT_IDX1 1 1195 #define TB_EXIT_IDXMAX 1 1196 #define TB_EXIT_REQUESTED 3 1197 1198 #ifdef CONFIG_TCG_INTERPRETER 1199 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr); 1200 #else 1201 typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr); 1202 extern tcg_prologue_fn *tcg_qemu_tb_exec; 1203 #endif 1204 1205 void tcg_register_jit(const void *buf, size_t buf_size); 1206 1207 #if TCG_TARGET_MAYBE_vec 1208 /* Return zero if the tuple (opc, type, vece) is unsupportable; 1209 return > 0 if it is directly supportable; 1210 return < 0 if we must call tcg_expand_vec_op. */ 1211 int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned); 1212 #else 1213 static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve) 1214 { 1215 return 0; 1216 } 1217 #endif 1218 1219 /* Expand the tuple (opc, type, vece) on the given arguments. */ 1220 void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...); 1221 1222 /* Replicate a constant C accoring to the log2 of the element size. */ 1223 uint64_t dup_const(unsigned vece, uint64_t c); 1224 1225 #define dup_const(VECE, C) \ 1226 (__builtin_constant_p(VECE) \ 1227 ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \ 1228 : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \ 1229 : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \ 1230 : (VECE) == MO_64 ? (uint64_t)(C) \ 1231 : (qemu_build_not_reached_always(), 0)) \ 1232 : dup_const(VECE, C)) 1233 1234 #if TARGET_LONG_BITS == 64 1235 # define dup_const_tl dup_const 1236 #else 1237 # define dup_const_tl(VECE, C) \ 1238 (__builtin_constant_p(VECE) \ 1239 ? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \ 1240 : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \ 1241 : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \ 1242 : (qemu_build_not_reached_always(), 0)) \ 1243 : (target_long)dup_const(VECE, C)) 1244 #endif 1245 1246 #ifdef CONFIG_DEBUG_TCG 1247 void tcg_assert_listed_vecop(TCGOpcode); 1248 #else 1249 static inline void tcg_assert_listed_vecop(TCGOpcode op) { } 1250 #endif 1251 1252 static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n) 1253 { 1254 #ifdef CONFIG_DEBUG_TCG 1255 const TCGOpcode *o = tcg_ctx->vecop_list; 1256 tcg_ctx->vecop_list = n; 1257 return o; 1258 #else 1259 return NULL; 1260 #endif 1261 } 1262 1263 bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned); 1264 1265 #endif /* TCG_H */ 1266