1 /* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #ifndef TCG_H 26 #define TCG_H 27 28 #include "cpu.h" 29 #include "exec/memop.h" 30 #include "exec/tb-context.h" 31 #include "qemu/bitops.h" 32 #include "qemu/plugin.h" 33 #include "qemu/queue.h" 34 #include "tcg/tcg-mo.h" 35 #include "tcg-target.h" 36 #include "qemu/int128.h" 37 38 /* XXX: make safe guess about sizes */ 39 #define MAX_OP_PER_INSTR 266 40 41 #if HOST_LONG_BITS == 32 42 #define MAX_OPC_PARAM_PER_ARG 2 43 #else 44 #define MAX_OPC_PARAM_PER_ARG 1 45 #endif 46 #define MAX_OPC_PARAM_IARGS 6 47 #define MAX_OPC_PARAM_OARGS 1 48 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) 49 50 /* A Call op needs up to 4 + 2N parameters on 32-bit archs, 51 * and up to 4 + N parameters on 64-bit archs 52 * (N = number of input arguments + output arguments). */ 53 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) 54 55 #define CPU_TEMP_BUF_NLONGS 128 56 57 /* Default target word size to pointer size. */ 58 #ifndef TCG_TARGET_REG_BITS 59 # if UINTPTR_MAX == UINT32_MAX 60 # define TCG_TARGET_REG_BITS 32 61 # elif UINTPTR_MAX == UINT64_MAX 62 # define TCG_TARGET_REG_BITS 64 63 # else 64 # error Unknown pointer size for tcg target 65 # endif 66 #endif 67 68 #if TCG_TARGET_REG_BITS == 32 69 typedef int32_t tcg_target_long; 70 typedef uint32_t tcg_target_ulong; 71 #define TCG_PRIlx PRIx32 72 #define TCG_PRIld PRId32 73 #elif TCG_TARGET_REG_BITS == 64 74 typedef int64_t tcg_target_long; 75 typedef uint64_t tcg_target_ulong; 76 #define TCG_PRIlx PRIx64 77 #define TCG_PRIld PRId64 78 #else 79 #error unsupported 80 #endif 81 82 /* Oversized TCG guests make things like MTTCG hard 83 * as we can't use atomics for cputlb updates. 84 */ 85 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS 86 #define TCG_OVERSIZED_GUEST 1 87 #else 88 #define TCG_OVERSIZED_GUEST 0 89 #endif 90 91 #if TCG_TARGET_NB_REGS <= 32 92 typedef uint32_t TCGRegSet; 93 #elif TCG_TARGET_NB_REGS <= 64 94 typedef uint64_t TCGRegSet; 95 #else 96 #error unsupported 97 #endif 98 99 #if TCG_TARGET_REG_BITS == 32 100 /* Turn some undef macros into false macros. */ 101 #define TCG_TARGET_HAS_extrl_i64_i32 0 102 #define TCG_TARGET_HAS_extrh_i64_i32 0 103 #define TCG_TARGET_HAS_div_i64 0 104 #define TCG_TARGET_HAS_rem_i64 0 105 #define TCG_TARGET_HAS_div2_i64 0 106 #define TCG_TARGET_HAS_rot_i64 0 107 #define TCG_TARGET_HAS_ext8s_i64 0 108 #define TCG_TARGET_HAS_ext16s_i64 0 109 #define TCG_TARGET_HAS_ext32s_i64 0 110 #define TCG_TARGET_HAS_ext8u_i64 0 111 #define TCG_TARGET_HAS_ext16u_i64 0 112 #define TCG_TARGET_HAS_ext32u_i64 0 113 #define TCG_TARGET_HAS_bswap16_i64 0 114 #define TCG_TARGET_HAS_bswap32_i64 0 115 #define TCG_TARGET_HAS_bswap64_i64 0 116 #define TCG_TARGET_HAS_neg_i64 0 117 #define TCG_TARGET_HAS_not_i64 0 118 #define TCG_TARGET_HAS_andc_i64 0 119 #define TCG_TARGET_HAS_orc_i64 0 120 #define TCG_TARGET_HAS_eqv_i64 0 121 #define TCG_TARGET_HAS_nand_i64 0 122 #define TCG_TARGET_HAS_nor_i64 0 123 #define TCG_TARGET_HAS_clz_i64 0 124 #define TCG_TARGET_HAS_ctz_i64 0 125 #define TCG_TARGET_HAS_ctpop_i64 0 126 #define TCG_TARGET_HAS_deposit_i64 0 127 #define TCG_TARGET_HAS_extract_i64 0 128 #define TCG_TARGET_HAS_sextract_i64 0 129 #define TCG_TARGET_HAS_extract2_i64 0 130 #define TCG_TARGET_HAS_movcond_i64 0 131 #define TCG_TARGET_HAS_add2_i64 0 132 #define TCG_TARGET_HAS_sub2_i64 0 133 #define TCG_TARGET_HAS_mulu2_i64 0 134 #define TCG_TARGET_HAS_muls2_i64 0 135 #define TCG_TARGET_HAS_muluh_i64 0 136 #define TCG_TARGET_HAS_mulsh_i64 0 137 /* Turn some undef macros into true macros. */ 138 #define TCG_TARGET_HAS_add2_i32 1 139 #define TCG_TARGET_HAS_sub2_i32 1 140 #endif 141 142 #ifndef TCG_TARGET_deposit_i32_valid 143 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1 144 #endif 145 #ifndef TCG_TARGET_deposit_i64_valid 146 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1 147 #endif 148 #ifndef TCG_TARGET_extract_i32_valid 149 #define TCG_TARGET_extract_i32_valid(ofs, len) 1 150 #endif 151 #ifndef TCG_TARGET_extract_i64_valid 152 #define TCG_TARGET_extract_i64_valid(ofs, len) 1 153 #endif 154 155 /* Only one of DIV or DIV2 should be defined. */ 156 #if defined(TCG_TARGET_HAS_div_i32) 157 #define TCG_TARGET_HAS_div2_i32 0 158 #elif defined(TCG_TARGET_HAS_div2_i32) 159 #define TCG_TARGET_HAS_div_i32 0 160 #define TCG_TARGET_HAS_rem_i32 0 161 #endif 162 #if defined(TCG_TARGET_HAS_div_i64) 163 #define TCG_TARGET_HAS_div2_i64 0 164 #elif defined(TCG_TARGET_HAS_div2_i64) 165 #define TCG_TARGET_HAS_div_i64 0 166 #define TCG_TARGET_HAS_rem_i64 0 167 #endif 168 169 /* For 32-bit targets, some sort of unsigned widening multiply is required. */ 170 #if TCG_TARGET_REG_BITS == 32 \ 171 && !(defined(TCG_TARGET_HAS_mulu2_i32) \ 172 || defined(TCG_TARGET_HAS_muluh_i32)) 173 # error "Missing unsigned widening multiply" 174 #endif 175 176 #if !defined(TCG_TARGET_HAS_v64) \ 177 && !defined(TCG_TARGET_HAS_v128) \ 178 && !defined(TCG_TARGET_HAS_v256) 179 #define TCG_TARGET_MAYBE_vec 0 180 #define TCG_TARGET_HAS_abs_vec 0 181 #define TCG_TARGET_HAS_neg_vec 0 182 #define TCG_TARGET_HAS_not_vec 0 183 #define TCG_TARGET_HAS_andc_vec 0 184 #define TCG_TARGET_HAS_orc_vec 0 185 #define TCG_TARGET_HAS_shi_vec 0 186 #define TCG_TARGET_HAS_shs_vec 0 187 #define TCG_TARGET_HAS_shv_vec 0 188 #define TCG_TARGET_HAS_mul_vec 0 189 #define TCG_TARGET_HAS_sat_vec 0 190 #define TCG_TARGET_HAS_minmax_vec 0 191 #define TCG_TARGET_HAS_bitsel_vec 0 192 #define TCG_TARGET_HAS_cmpsel_vec 0 193 #else 194 #define TCG_TARGET_MAYBE_vec 1 195 #endif 196 #ifndef TCG_TARGET_HAS_v64 197 #define TCG_TARGET_HAS_v64 0 198 #endif 199 #ifndef TCG_TARGET_HAS_v128 200 #define TCG_TARGET_HAS_v128 0 201 #endif 202 #ifndef TCG_TARGET_HAS_v256 203 #define TCG_TARGET_HAS_v256 0 204 #endif 205 206 #ifndef TARGET_INSN_START_EXTRA_WORDS 207 # define TARGET_INSN_START_WORDS 1 208 #else 209 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS) 210 #endif 211 212 typedef enum TCGOpcode { 213 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, 214 #include "tcg/tcg-opc.h" 215 #undef DEF 216 NB_OPS, 217 } TCGOpcode; 218 219 #define tcg_regset_set_reg(d, r) ((d) |= (TCGRegSet)1 << (r)) 220 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r))) 221 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) 222 223 #ifndef TCG_TARGET_INSN_UNIT_SIZE 224 # error "Missing TCG_TARGET_INSN_UNIT_SIZE" 225 #elif TCG_TARGET_INSN_UNIT_SIZE == 1 226 typedef uint8_t tcg_insn_unit; 227 #elif TCG_TARGET_INSN_UNIT_SIZE == 2 228 typedef uint16_t tcg_insn_unit; 229 #elif TCG_TARGET_INSN_UNIT_SIZE == 4 230 typedef uint32_t tcg_insn_unit; 231 #elif TCG_TARGET_INSN_UNIT_SIZE == 8 232 typedef uint64_t tcg_insn_unit; 233 #else 234 /* The port better have done this. */ 235 #endif 236 237 238 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS 239 # define tcg_debug_assert(X) do { assert(X); } while (0) 240 #else 241 # define tcg_debug_assert(X) \ 242 do { if (!(X)) { __builtin_unreachable(); } } while (0) 243 #endif 244 245 typedef struct TCGRelocation TCGRelocation; 246 struct TCGRelocation { 247 QSIMPLEQ_ENTRY(TCGRelocation) next; 248 tcg_insn_unit *ptr; 249 intptr_t addend; 250 int type; 251 }; 252 253 typedef struct TCGLabel TCGLabel; 254 struct TCGLabel { 255 unsigned present : 1; 256 unsigned has_value : 1; 257 unsigned id : 14; 258 unsigned refs : 16; 259 union { 260 uintptr_t value; 261 tcg_insn_unit *value_ptr; 262 } u; 263 QSIMPLEQ_HEAD(, TCGRelocation) relocs; 264 QSIMPLEQ_ENTRY(TCGLabel) next; 265 }; 266 267 typedef struct TCGPool { 268 struct TCGPool *next; 269 int size; 270 uint8_t data[] __attribute__ ((aligned)); 271 } TCGPool; 272 273 #define TCG_POOL_CHUNK_SIZE 32768 274 275 #define TCG_MAX_TEMPS 512 276 #define TCG_MAX_INSNS 512 277 278 /* when the size of the arguments of a called function is smaller than 279 this value, they are statically allocated in the TB stack frame */ 280 #define TCG_STATIC_CALL_ARGS_SIZE 128 281 282 typedef enum TCGType { 283 TCG_TYPE_I32, 284 TCG_TYPE_I64, 285 286 TCG_TYPE_V64, 287 TCG_TYPE_V128, 288 TCG_TYPE_V256, 289 290 TCG_TYPE_COUNT, /* number of different types */ 291 292 /* An alias for the size of the host register. */ 293 #if TCG_TARGET_REG_BITS == 32 294 TCG_TYPE_REG = TCG_TYPE_I32, 295 #else 296 TCG_TYPE_REG = TCG_TYPE_I64, 297 #endif 298 299 /* An alias for the size of the native pointer. */ 300 #if UINTPTR_MAX == UINT32_MAX 301 TCG_TYPE_PTR = TCG_TYPE_I32, 302 #else 303 TCG_TYPE_PTR = TCG_TYPE_I64, 304 #endif 305 306 /* An alias for the size of the target "long", aka register. */ 307 #if TARGET_LONG_BITS == 64 308 TCG_TYPE_TL = TCG_TYPE_I64, 309 #else 310 TCG_TYPE_TL = TCG_TYPE_I32, 311 #endif 312 } TCGType; 313 314 /** 315 * get_alignment_bits 316 * @memop: MemOp value 317 * 318 * Extract the alignment size from the memop. 319 */ 320 static inline unsigned get_alignment_bits(MemOp memop) 321 { 322 unsigned a = memop & MO_AMASK; 323 324 if (a == MO_UNALN) { 325 /* No alignment required. */ 326 a = 0; 327 } else if (a == MO_ALIGN) { 328 /* A natural alignment requirement. */ 329 a = memop & MO_SIZE; 330 } else { 331 /* A specific alignment requirement. */ 332 a = a >> MO_ASHIFT; 333 } 334 #if defined(CONFIG_SOFTMMU) 335 /* The requested alignment cannot overlap the TLB flags. */ 336 tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0); 337 #endif 338 return a; 339 } 340 341 typedef tcg_target_ulong TCGArg; 342 343 /* Define type and accessor macros for TCG variables. 344 345 TCG variables are the inputs and outputs of TCG ops, as described 346 in tcg/README. Target CPU front-end code uses these types to deal 347 with TCG variables as it emits TCG code via the tcg_gen_* functions. 348 They come in several flavours: 349 * TCGv_i32 : 32 bit integer type 350 * TCGv_i64 : 64 bit integer type 351 * TCGv_ptr : a host pointer type 352 * TCGv_vec : a host vector type; the exact size is not exposed 353 to the CPU front-end code. 354 * TCGv : an integer type the same size as target_ulong 355 (an alias for either TCGv_i32 or TCGv_i64) 356 The compiler's type checking will complain if you mix them 357 up and pass the wrong sized TCGv to a function. 358 359 Users of tcg_gen_* don't need to know about any of the internal 360 details of these, and should treat them as opaque types. 361 You won't be able to look inside them in a debugger either. 362 363 Internal implementation details follow: 364 365 Note that there is no definition of the structs TCGv_i32_d etc anywhere. 366 This is deliberate, because the values we store in variables of type 367 TCGv_i32 are not really pointers-to-structures. They're just small 368 integers, but keeping them in pointer types like this means that the 369 compiler will complain if you accidentally pass a TCGv_i32 to a 370 function which takes a TCGv_i64, and so on. Only the internals of 371 TCG need to care about the actual contents of the types. */ 372 373 typedef struct TCGv_i32_d *TCGv_i32; 374 typedef struct TCGv_i64_d *TCGv_i64; 375 typedef struct TCGv_ptr_d *TCGv_ptr; 376 typedef struct TCGv_vec_d *TCGv_vec; 377 typedef TCGv_ptr TCGv_env; 378 #if TARGET_LONG_BITS == 32 379 #define TCGv TCGv_i32 380 #elif TARGET_LONG_BITS == 64 381 #define TCGv TCGv_i64 382 #else 383 #error Unhandled TARGET_LONG_BITS value 384 #endif 385 386 /* call flags */ 387 /* Helper does not read globals (either directly or through an exception). It 388 implies TCG_CALL_NO_WRITE_GLOBALS. */ 389 #define TCG_CALL_NO_READ_GLOBALS 0x0001 390 /* Helper does not write globals */ 391 #define TCG_CALL_NO_WRITE_GLOBALS 0x0002 392 /* Helper can be safely suppressed if the return value is not used. */ 393 #define TCG_CALL_NO_SIDE_EFFECTS 0x0004 394 /* Helper is QEMU_NORETURN. */ 395 #define TCG_CALL_NO_RETURN 0x0008 396 397 /* convenience version of most used call flags */ 398 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS 399 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS 400 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS 401 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) 402 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) 403 404 /* Used to align parameters. See the comment before tcgv_i32_temp. */ 405 #define TCG_CALL_DUMMY_ARG ((TCGArg)0) 406 407 /* Conditions. Note that these are laid out for easy manipulation by 408 the functions below: 409 bit 0 is used for inverting; 410 bit 1 is signed, 411 bit 2 is unsigned, 412 bit 3 is used with bit 0 for swapping signed/unsigned. */ 413 typedef enum { 414 /* non-signed */ 415 TCG_COND_NEVER = 0 | 0 | 0 | 0, 416 TCG_COND_ALWAYS = 0 | 0 | 0 | 1, 417 TCG_COND_EQ = 8 | 0 | 0 | 0, 418 TCG_COND_NE = 8 | 0 | 0 | 1, 419 /* signed */ 420 TCG_COND_LT = 0 | 0 | 2 | 0, 421 TCG_COND_GE = 0 | 0 | 2 | 1, 422 TCG_COND_LE = 8 | 0 | 2 | 0, 423 TCG_COND_GT = 8 | 0 | 2 | 1, 424 /* unsigned */ 425 TCG_COND_LTU = 0 | 4 | 0 | 0, 426 TCG_COND_GEU = 0 | 4 | 0 | 1, 427 TCG_COND_LEU = 8 | 4 | 0 | 0, 428 TCG_COND_GTU = 8 | 4 | 0 | 1, 429 } TCGCond; 430 431 /* Invert the sense of the comparison. */ 432 static inline TCGCond tcg_invert_cond(TCGCond c) 433 { 434 return (TCGCond)(c ^ 1); 435 } 436 437 /* Swap the operands in a comparison. */ 438 static inline TCGCond tcg_swap_cond(TCGCond c) 439 { 440 return c & 6 ? (TCGCond)(c ^ 9) : c; 441 } 442 443 /* Create an "unsigned" version of a "signed" comparison. */ 444 static inline TCGCond tcg_unsigned_cond(TCGCond c) 445 { 446 return c & 2 ? (TCGCond)(c ^ 6) : c; 447 } 448 449 /* Create a "signed" version of an "unsigned" comparison. */ 450 static inline TCGCond tcg_signed_cond(TCGCond c) 451 { 452 return c & 4 ? (TCGCond)(c ^ 6) : c; 453 } 454 455 /* Must a comparison be considered unsigned? */ 456 static inline bool is_unsigned_cond(TCGCond c) 457 { 458 return (c & 4) != 0; 459 } 460 461 /* Create a "high" version of a double-word comparison. 462 This removes equality from a LTE or GTE comparison. */ 463 static inline TCGCond tcg_high_cond(TCGCond c) 464 { 465 switch (c) { 466 case TCG_COND_GE: 467 case TCG_COND_LE: 468 case TCG_COND_GEU: 469 case TCG_COND_LEU: 470 return (TCGCond)(c ^ 8); 471 default: 472 return c; 473 } 474 } 475 476 typedef enum TCGTempVal { 477 TEMP_VAL_DEAD, 478 TEMP_VAL_REG, 479 TEMP_VAL_MEM, 480 TEMP_VAL_CONST, 481 } TCGTempVal; 482 483 typedef struct TCGTemp { 484 TCGReg reg:8; 485 TCGTempVal val_type:8; 486 TCGType base_type:8; 487 TCGType type:8; 488 unsigned int fixed_reg:1; 489 unsigned int indirect_reg:1; 490 unsigned int indirect_base:1; 491 unsigned int mem_coherent:1; 492 unsigned int mem_allocated:1; 493 /* If true, the temp is saved across both basic blocks and 494 translation blocks. */ 495 unsigned int temp_global:1; 496 /* If true, the temp is saved across basic blocks but dead 497 at the end of translation blocks. If false, the temp is 498 dead at the end of basic blocks. */ 499 unsigned int temp_local:1; 500 unsigned int temp_allocated:1; 501 502 tcg_target_long val; 503 struct TCGTemp *mem_base; 504 intptr_t mem_offset; 505 const char *name; 506 507 /* Pass-specific information that can be stored for a temporary. 508 One word worth of integer data, and one pointer to data 509 allocated separately. */ 510 uintptr_t state; 511 void *state_ptr; 512 } TCGTemp; 513 514 typedef struct TCGContext TCGContext; 515 516 typedef struct TCGTempSet { 517 unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; 518 } TCGTempSet; 519 520 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding, 521 this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands. 522 There are never more than 2 outputs, which means that we can store all 523 dead + sync data within 16 bits. */ 524 #define DEAD_ARG 4 525 #define SYNC_ARG 1 526 typedef uint16_t TCGLifeData; 527 528 /* The layout here is designed to avoid a bitfield crossing of 529 a 32-bit boundary, which would cause GCC to add extra padding. */ 530 typedef struct TCGOp { 531 TCGOpcode opc : 8; /* 8 */ 532 533 /* Parameters for this opcode. See below. */ 534 unsigned param1 : 4; /* 12 */ 535 unsigned param2 : 4; /* 16 */ 536 537 /* Lifetime data of the operands. */ 538 unsigned life : 16; /* 32 */ 539 540 /* Next and previous opcodes. */ 541 QTAILQ_ENTRY(TCGOp) link; 542 #ifdef CONFIG_PLUGIN 543 QSIMPLEQ_ENTRY(TCGOp) plugin_link; 544 #endif 545 546 /* Arguments for the opcode. */ 547 TCGArg args[MAX_OPC_PARAM]; 548 549 /* Register preferences for the output(s). */ 550 TCGRegSet output_pref[2]; 551 } TCGOp; 552 553 #define TCGOP_CALLI(X) (X)->param1 554 #define TCGOP_CALLO(X) (X)->param2 555 556 #define TCGOP_VECL(X) (X)->param1 557 #define TCGOP_VECE(X) (X)->param2 558 559 /* Make sure operands fit in the bitfields above. */ 560 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); 561 562 typedef struct TCGProfile { 563 int64_t cpu_exec_time; 564 int64_t tb_count1; 565 int64_t tb_count; 566 int64_t op_count; /* total insn count */ 567 int op_count_max; /* max insn per TB */ 568 int temp_count_max; 569 int64_t temp_count; 570 int64_t del_op_count; 571 int64_t code_in_len; 572 int64_t code_out_len; 573 int64_t search_out_len; 574 int64_t interm_time; 575 int64_t code_time; 576 int64_t la_time; 577 int64_t opt_time; 578 int64_t restore_count; 579 int64_t restore_time; 580 int64_t table_op_count[NB_OPS]; 581 } TCGProfile; 582 583 struct TCGContext { 584 uint8_t *pool_cur, *pool_end; 585 TCGPool *pool_first, *pool_current, *pool_first_large; 586 int nb_labels; 587 int nb_globals; 588 int nb_temps; 589 int nb_indirects; 590 int nb_ops; 591 592 /* goto_tb support */ 593 tcg_insn_unit *code_buf; 594 uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */ 595 uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */ 596 uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */ 597 598 TCGRegSet reserved_regs; 599 uint32_t tb_cflags; /* cflags of the current TB */ 600 intptr_t current_frame_offset; 601 intptr_t frame_start; 602 intptr_t frame_end; 603 TCGTemp *frame_temp; 604 605 tcg_insn_unit *code_ptr; 606 607 #ifdef CONFIG_PROFILER 608 TCGProfile prof; 609 #endif 610 611 #ifdef CONFIG_DEBUG_TCG 612 int temps_in_use; 613 int goto_tb_issue_mask; 614 const TCGOpcode *vecop_list; 615 #endif 616 617 /* Code generation. Note that we specifically do not use tcg_insn_unit 618 here, because there's too much arithmetic throughout that relies 619 on addition and subtraction working on bytes. Rely on the GCC 620 extension that allows arithmetic on void*. */ 621 void *code_gen_prologue; 622 void *code_gen_epilogue; 623 void *code_gen_buffer; 624 size_t code_gen_buffer_size; 625 void *code_gen_ptr; 626 void *data_gen_ptr; 627 628 /* Threshold to flush the translated code buffer. */ 629 void *code_gen_highwater; 630 631 size_t tb_phys_invalidate_count; 632 633 /* Track which vCPU triggers events */ 634 CPUState *cpu; /* *_trans */ 635 636 /* These structures are private to tcg-target.inc.c. */ 637 #ifdef TCG_TARGET_NEED_LDST_LABELS 638 QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels; 639 #endif 640 #ifdef TCG_TARGET_NEED_POOL_LABELS 641 struct TCGLabelPoolData *pool_labels; 642 #endif 643 644 TCGLabel *exitreq_label; 645 646 #ifdef CONFIG_PLUGIN 647 /* 648 * We keep one plugin_tb struct per TCGContext. Note that on every TB 649 * translation we clear but do not free its contents; this way we 650 * avoid a lot of malloc/free churn, since after a few TB's it's 651 * unlikely that we'll need to allocate either more instructions or more 652 * space for instructions (for variable-instruction-length ISAs). 653 */ 654 struct qemu_plugin_tb *plugin_tb; 655 656 /* descriptor of the instruction being translated */ 657 struct qemu_plugin_insn *plugin_insn; 658 659 /* list to quickly access the injected ops */ 660 QSIMPLEQ_HEAD(, TCGOp) plugin_ops; 661 #endif 662 663 TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; 664 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ 665 666 QTAILQ_HEAD(, TCGOp) ops, free_ops; 667 QSIMPLEQ_HEAD(, TCGLabel) labels; 668 669 /* Tells which temporary holds a given register. 670 It does not take into account fixed registers */ 671 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; 672 673 uint16_t gen_insn_end_off[TCG_MAX_INSNS]; 674 target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; 675 }; 676 677 extern TCGContext tcg_init_ctx; 678 extern __thread TCGContext *tcg_ctx; 679 extern TCGv_env cpu_env; 680 681 static inline size_t temp_idx(TCGTemp *ts) 682 { 683 ptrdiff_t n = ts - tcg_ctx->temps; 684 tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps); 685 return n; 686 } 687 688 static inline TCGArg temp_arg(TCGTemp *ts) 689 { 690 return (uintptr_t)ts; 691 } 692 693 static inline TCGTemp *arg_temp(TCGArg a) 694 { 695 return (TCGTemp *)(uintptr_t)a; 696 } 697 698 /* Using the offset of a temporary, relative to TCGContext, rather than 699 its index means that we don't use 0. That leaves offset 0 free for 700 a NULL representation without having to leave index 0 unused. */ 701 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v) 702 { 703 uintptr_t o = (uintptr_t)v; 704 TCGTemp *t = (void *)tcg_ctx + o; 705 tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o); 706 return t; 707 } 708 709 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v) 710 { 711 return tcgv_i32_temp((TCGv_i32)v); 712 } 713 714 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v) 715 { 716 return tcgv_i32_temp((TCGv_i32)v); 717 } 718 719 static inline TCGTemp *tcgv_vec_temp(TCGv_vec v) 720 { 721 return tcgv_i32_temp((TCGv_i32)v); 722 } 723 724 static inline TCGArg tcgv_i32_arg(TCGv_i32 v) 725 { 726 return temp_arg(tcgv_i32_temp(v)); 727 } 728 729 static inline TCGArg tcgv_i64_arg(TCGv_i64 v) 730 { 731 return temp_arg(tcgv_i64_temp(v)); 732 } 733 734 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v) 735 { 736 return temp_arg(tcgv_ptr_temp(v)); 737 } 738 739 static inline TCGArg tcgv_vec_arg(TCGv_vec v) 740 { 741 return temp_arg(tcgv_vec_temp(v)); 742 } 743 744 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t) 745 { 746 (void)temp_idx(t); /* trigger embedded assert */ 747 return (TCGv_i32)((void *)t - (void *)tcg_ctx); 748 } 749 750 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t) 751 { 752 return (TCGv_i64)temp_tcgv_i32(t); 753 } 754 755 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t) 756 { 757 return (TCGv_ptr)temp_tcgv_i32(t); 758 } 759 760 static inline TCGv_vec temp_tcgv_vec(TCGTemp *t) 761 { 762 return (TCGv_vec)temp_tcgv_i32(t); 763 } 764 765 #if TCG_TARGET_REG_BITS == 32 766 static inline TCGv_i32 TCGV_LOW(TCGv_i64 t) 767 { 768 return temp_tcgv_i32(tcgv_i64_temp(t)); 769 } 770 771 static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t) 772 { 773 return temp_tcgv_i32(tcgv_i64_temp(t) + 1); 774 } 775 #endif 776 777 static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v) 778 { 779 op->args[arg] = v; 780 } 781 782 static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v) 783 { 784 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS 785 tcg_set_insn_param(op, arg, v); 786 #else 787 tcg_set_insn_param(op, arg * 2, v); 788 tcg_set_insn_param(op, arg * 2 + 1, v >> 32); 789 #endif 790 } 791 792 /* The last op that was emitted. */ 793 static inline TCGOp *tcg_last_op(void) 794 { 795 return QTAILQ_LAST(&tcg_ctx->ops); 796 } 797 798 /* Test for whether to terminate the TB for using too many opcodes. */ 799 static inline bool tcg_op_buf_full(void) 800 { 801 /* This is not a hard limit, it merely stops translation when 802 * we have produced "enough" opcodes. We want to limit TB size 803 * such that a RISC host can reasonably use a 16-bit signed 804 * branch within the TB. We also need to be mindful of the 805 * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[] 806 * and TCGContext.gen_insn_end_off[]. 807 */ 808 return tcg_ctx->nb_ops >= 4000; 809 } 810 811 /* pool based memory allocation */ 812 813 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */ 814 void *tcg_malloc_internal(TCGContext *s, int size); 815 void tcg_pool_reset(TCGContext *s); 816 TranslationBlock *tcg_tb_alloc(TCGContext *s); 817 818 void tcg_region_init(void); 819 void tcg_region_reset_all(void); 820 821 size_t tcg_code_size(void); 822 size_t tcg_code_capacity(void); 823 824 void tcg_tb_insert(TranslationBlock *tb); 825 void tcg_tb_remove(TranslationBlock *tb); 826 size_t tcg_tb_phys_invalidate_count(void); 827 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr); 828 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data); 829 size_t tcg_nb_tbs(void); 830 831 /* user-mode: Called with mmap_lock held. */ 832 static inline void *tcg_malloc(int size) 833 { 834 TCGContext *s = tcg_ctx; 835 uint8_t *ptr, *ptr_end; 836 837 /* ??? This is a weak placeholder for minimum malloc alignment. */ 838 size = QEMU_ALIGN_UP(size, 8); 839 840 ptr = s->pool_cur; 841 ptr_end = ptr + size; 842 if (unlikely(ptr_end > s->pool_end)) { 843 return tcg_malloc_internal(tcg_ctx, size); 844 } else { 845 s->pool_cur = ptr_end; 846 return ptr; 847 } 848 } 849 850 void tcg_context_init(TCGContext *s); 851 void tcg_register_thread(void); 852 void tcg_prologue_init(TCGContext *s); 853 void tcg_func_start(TCGContext *s); 854 855 int tcg_gen_code(TCGContext *s, TranslationBlock *tb); 856 857 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); 858 859 TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr, 860 intptr_t, const char *); 861 TCGTemp *tcg_temp_new_internal(TCGType, bool); 862 void tcg_temp_free_internal(TCGTemp *); 863 TCGv_vec tcg_temp_new_vec(TCGType type); 864 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match); 865 866 static inline void tcg_temp_free_i32(TCGv_i32 arg) 867 { 868 tcg_temp_free_internal(tcgv_i32_temp(arg)); 869 } 870 871 static inline void tcg_temp_free_i64(TCGv_i64 arg) 872 { 873 tcg_temp_free_internal(tcgv_i64_temp(arg)); 874 } 875 876 static inline void tcg_temp_free_ptr(TCGv_ptr arg) 877 { 878 tcg_temp_free_internal(tcgv_ptr_temp(arg)); 879 } 880 881 static inline void tcg_temp_free_vec(TCGv_vec arg) 882 { 883 tcg_temp_free_internal(tcgv_vec_temp(arg)); 884 } 885 886 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset, 887 const char *name) 888 { 889 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name); 890 return temp_tcgv_i32(t); 891 } 892 893 static inline TCGv_i32 tcg_temp_new_i32(void) 894 { 895 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false); 896 return temp_tcgv_i32(t); 897 } 898 899 static inline TCGv_i32 tcg_temp_local_new_i32(void) 900 { 901 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true); 902 return temp_tcgv_i32(t); 903 } 904 905 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset, 906 const char *name) 907 { 908 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name); 909 return temp_tcgv_i64(t); 910 } 911 912 static inline TCGv_i64 tcg_temp_new_i64(void) 913 { 914 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false); 915 return temp_tcgv_i64(t); 916 } 917 918 static inline TCGv_i64 tcg_temp_local_new_i64(void) 919 { 920 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true); 921 return temp_tcgv_i64(t); 922 } 923 924 static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset, 925 const char *name) 926 { 927 TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name); 928 return temp_tcgv_ptr(t); 929 } 930 931 static inline TCGv_ptr tcg_temp_new_ptr(void) 932 { 933 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false); 934 return temp_tcgv_ptr(t); 935 } 936 937 static inline TCGv_ptr tcg_temp_local_new_ptr(void) 938 { 939 TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true); 940 return temp_tcgv_ptr(t); 941 } 942 943 #if defined(CONFIG_DEBUG_TCG) 944 /* If you call tcg_clear_temp_count() at the start of a section of 945 * code which is not supposed to leak any TCG temporaries, then 946 * calling tcg_check_temp_count() at the end of the section will 947 * return 1 if the section did in fact leak a temporary. 948 */ 949 void tcg_clear_temp_count(void); 950 int tcg_check_temp_count(void); 951 #else 952 #define tcg_clear_temp_count() do { } while (0) 953 #define tcg_check_temp_count() 0 954 #endif 955 956 int64_t tcg_cpu_exec_time(void); 957 void tcg_dump_info(void); 958 void tcg_dump_op_count(void); 959 960 #define TCG_CT_ALIAS 0x80 961 #define TCG_CT_IALIAS 0x40 962 #define TCG_CT_NEWREG 0x20 /* output requires a new register */ 963 #define TCG_CT_REG 0x01 964 #define TCG_CT_CONST 0x02 /* any constant of register size */ 965 966 typedef struct TCGArgConstraint { 967 uint16_t ct; 968 uint8_t alias_index; 969 union { 970 TCGRegSet regs; 971 } u; 972 } TCGArgConstraint; 973 974 #define TCG_MAX_OP_ARGS 16 975 976 /* Bits for TCGOpDef->flags, 8 bits available. */ 977 enum { 978 /* Instruction exits the translation block. */ 979 TCG_OPF_BB_EXIT = 0x01, 980 /* Instruction defines the end of a basic block. */ 981 TCG_OPF_BB_END = 0x02, 982 /* Instruction clobbers call registers and potentially update globals. */ 983 TCG_OPF_CALL_CLOBBER = 0x04, 984 /* Instruction has side effects: it cannot be removed if its outputs 985 are not used, and might trigger exceptions. */ 986 TCG_OPF_SIDE_EFFECTS = 0x08, 987 /* Instruction operands are 64-bits (otherwise 32-bits). */ 988 TCG_OPF_64BIT = 0x10, 989 /* Instruction is optional and not implemented by the host, or insn 990 is generic and should not be implemened by the host. */ 991 TCG_OPF_NOT_PRESENT = 0x20, 992 /* Instruction operands are vectors. */ 993 TCG_OPF_VECTOR = 0x40, 994 }; 995 996 typedef struct TCGOpDef { 997 const char *name; 998 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; 999 uint8_t flags; 1000 TCGArgConstraint *args_ct; 1001 int *sorted_args; 1002 #if defined(CONFIG_DEBUG_TCG) 1003 int used; 1004 #endif 1005 } TCGOpDef; 1006 1007 extern TCGOpDef tcg_op_defs[]; 1008 extern const size_t tcg_op_defs_max; 1009 1010 typedef struct TCGTargetOpDef { 1011 TCGOpcode op; 1012 const char *args_ct_str[TCG_MAX_OP_ARGS]; 1013 } TCGTargetOpDef; 1014 1015 #define tcg_abort() \ 1016 do {\ 1017 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\ 1018 abort();\ 1019 } while (0) 1020 1021 bool tcg_op_supported(TCGOpcode op); 1022 1023 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args); 1024 1025 TCGOp *tcg_emit_op(TCGOpcode opc); 1026 void tcg_op_remove(TCGContext *s, TCGOp *op); 1027 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc); 1028 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc); 1029 1030 void tcg_optimize(TCGContext *s); 1031 1032 TCGv_i32 tcg_const_i32(int32_t val); 1033 TCGv_i64 tcg_const_i64(int64_t val); 1034 TCGv_i32 tcg_const_local_i32(int32_t val); 1035 TCGv_i64 tcg_const_local_i64(int64_t val); 1036 TCGv_vec tcg_const_zeros_vec(TCGType); 1037 TCGv_vec tcg_const_ones_vec(TCGType); 1038 TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec); 1039 TCGv_vec tcg_const_ones_vec_matching(TCGv_vec); 1040 1041 #if UINTPTR_MAX == UINT32_MAX 1042 # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i32((intptr_t)(x))) 1043 # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x))) 1044 #else 1045 # define tcg_const_ptr(x) ((TCGv_ptr)tcg_const_i64((intptr_t)(x))) 1046 # define tcg_const_local_ptr(x) ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x))) 1047 #endif 1048 1049 TCGLabel *gen_new_label(void); 1050 1051 /** 1052 * label_arg 1053 * @l: label 1054 * 1055 * Encode a label for storage in the TCG opcode stream. 1056 */ 1057 1058 static inline TCGArg label_arg(TCGLabel *l) 1059 { 1060 return (uintptr_t)l; 1061 } 1062 1063 /** 1064 * arg_label 1065 * @i: value 1066 * 1067 * The opposite of label_arg. Retrieve a label from the 1068 * encoding of the TCG opcode stream. 1069 */ 1070 1071 static inline TCGLabel *arg_label(TCGArg i) 1072 { 1073 return (TCGLabel *)(uintptr_t)i; 1074 } 1075 1076 /** 1077 * tcg_ptr_byte_diff 1078 * @a, @b: addresses to be differenced 1079 * 1080 * There are many places within the TCG backends where we need a byte 1081 * difference between two pointers. While this can be accomplished 1082 * with local casting, it's easy to get wrong -- especially if one is 1083 * concerned with the signedness of the result. 1084 * 1085 * This version relies on GCC's void pointer arithmetic to get the 1086 * correct result. 1087 */ 1088 1089 static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b) 1090 { 1091 return a - b; 1092 } 1093 1094 /** 1095 * tcg_pcrel_diff 1096 * @s: the tcg context 1097 * @target: address of the target 1098 * 1099 * Produce a pc-relative difference, from the current code_ptr 1100 * to the destination address. 1101 */ 1102 1103 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target) 1104 { 1105 return tcg_ptr_byte_diff(target, s->code_ptr); 1106 } 1107 1108 /** 1109 * tcg_current_code_size 1110 * @s: the tcg context 1111 * 1112 * Compute the current code size within the translation block. 1113 * This is used to fill in qemu's data structures for goto_tb. 1114 */ 1115 1116 static inline size_t tcg_current_code_size(TCGContext *s) 1117 { 1118 return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); 1119 } 1120 1121 /* Combine the MemOp and mmu_idx parameters into a single value. */ 1122 typedef uint32_t TCGMemOpIdx; 1123 1124 /** 1125 * make_memop_idx 1126 * @op: memory operation 1127 * @idx: mmu index 1128 * 1129 * Encode these values into a single parameter. 1130 */ 1131 static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) 1132 { 1133 tcg_debug_assert(idx <= 15); 1134 return (op << 4) | idx; 1135 } 1136 1137 /** 1138 * get_memop 1139 * @oi: combined op/idx parameter 1140 * 1141 * Extract the memory operation from the combined value. 1142 */ 1143 static inline MemOp get_memop(TCGMemOpIdx oi) 1144 { 1145 return oi >> 4; 1146 } 1147 1148 /** 1149 * get_mmuidx 1150 * @oi: combined op/idx parameter 1151 * 1152 * Extract the mmu index from the combined value. 1153 */ 1154 static inline unsigned get_mmuidx(TCGMemOpIdx oi) 1155 { 1156 return oi & 15; 1157 } 1158 1159 /** 1160 * tcg_qemu_tb_exec: 1161 * @env: pointer to CPUArchState for the CPU 1162 * @tb_ptr: address of generated code for the TB to execute 1163 * 1164 * Start executing code from a given translation block. 1165 * Where translation blocks have been linked, execution 1166 * may proceed from the given TB into successive ones. 1167 * Control eventually returns only when some action is needed 1168 * from the top-level loop: either control must pass to a TB 1169 * which has not yet been directly linked, or an asynchronous 1170 * event such as an interrupt needs handling. 1171 * 1172 * Return: The return value is the value passed to the corresponding 1173 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute. 1174 * The value is either zero or a 4-byte aligned pointer to that TB combined 1175 * with additional information in its two least significant bits. The 1176 * additional information is encoded as follows: 1177 * 0, 1: the link between this TB and the next is via the specified 1178 * TB index (0 or 1). That is, we left the TB via (the equivalent 1179 * of) "goto_tb <index>". The main loop uses this to determine 1180 * how to link the TB just executed to the next. 1181 * 2: we are using instruction counting code generation, and we 1182 * did not start executing this TB because the instruction counter 1183 * would hit zero midway through it. In this case the pointer 1184 * returned is the TB we were about to execute, and the caller must 1185 * arrange to execute the remaining count of instructions. 1186 * 3: we stopped because the CPU's exit_request flag was set 1187 * (usually meaning that there is an interrupt that needs to be 1188 * handled). The pointer returned is the TB we were about to execute 1189 * when we noticed the pending exit request. 1190 * 1191 * If the bottom two bits indicate an exit-via-index then the CPU 1192 * state is correctly synchronised and ready for execution of the next 1193 * TB (and in particular the guest PC is the address to execute next). 1194 * Otherwise, we gave up on execution of this TB before it started, and 1195 * the caller must fix up the CPU state by calling the CPU's 1196 * synchronize_from_tb() method with the TB pointer we return (falling 1197 * back to calling the CPU's set_pc method with tb->pb if no 1198 * synchronize_from_tb() method exists). 1199 * 1200 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec 1201 * to this default (which just calls the prologue.code emitted by 1202 * tcg_target_qemu_prologue()). 1203 */ 1204 #define TB_EXIT_MASK 3 1205 #define TB_EXIT_IDX0 0 1206 #define TB_EXIT_IDX1 1 1207 #define TB_EXIT_IDXMAX 1 1208 #define TB_EXIT_REQUESTED 3 1209 1210 #ifdef HAVE_TCG_QEMU_TB_EXEC 1211 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr); 1212 #else 1213 # define tcg_qemu_tb_exec(env, tb_ptr) \ 1214 ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr) 1215 #endif 1216 1217 void tcg_register_jit(void *buf, size_t buf_size); 1218 1219 #if TCG_TARGET_MAYBE_vec 1220 /* Return zero if the tuple (opc, type, vece) is unsupportable; 1221 return > 0 if it is directly supportable; 1222 return < 0 if we must call tcg_expand_vec_op. */ 1223 int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned); 1224 #else 1225 static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve) 1226 { 1227 return 0; 1228 } 1229 #endif 1230 1231 /* Expand the tuple (opc, type, vece) on the given arguments. */ 1232 void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...); 1233 1234 /* Replicate a constant C accoring to the log2 of the element size. */ 1235 uint64_t dup_const(unsigned vece, uint64_t c); 1236 1237 #define dup_const(VECE, C) \ 1238 (__builtin_constant_p(VECE) \ 1239 ? ( (VECE) == MO_8 ? 0x0101010101010101ull * (uint8_t)(C) \ 1240 : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C) \ 1241 : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C) \ 1242 : dup_const(VECE, C)) \ 1243 : dup_const(VECE, C)) 1244 1245 1246 /* 1247 * Memory helpers that will be used by TCG generated code. 1248 */ 1249 #ifdef CONFIG_SOFTMMU 1250 /* Value zero-extended to tcg register size. */ 1251 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, 1252 TCGMemOpIdx oi, uintptr_t retaddr); 1253 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, 1254 TCGMemOpIdx oi, uintptr_t retaddr); 1255 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, 1256 TCGMemOpIdx oi, uintptr_t retaddr); 1257 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, 1258 TCGMemOpIdx oi, uintptr_t retaddr); 1259 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, 1260 TCGMemOpIdx oi, uintptr_t retaddr); 1261 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, 1262 TCGMemOpIdx oi, uintptr_t retaddr); 1263 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, 1264 TCGMemOpIdx oi, uintptr_t retaddr); 1265 1266 /* Value sign-extended to tcg register size. */ 1267 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, 1268 TCGMemOpIdx oi, uintptr_t retaddr); 1269 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, 1270 TCGMemOpIdx oi, uintptr_t retaddr); 1271 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, 1272 TCGMemOpIdx oi, uintptr_t retaddr); 1273 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, 1274 TCGMemOpIdx oi, uintptr_t retaddr); 1275 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, 1276 TCGMemOpIdx oi, uintptr_t retaddr); 1277 1278 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 1279 TCGMemOpIdx oi, uintptr_t retaddr); 1280 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 1281 TCGMemOpIdx oi, uintptr_t retaddr); 1282 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 1283 TCGMemOpIdx oi, uintptr_t retaddr); 1284 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 1285 TCGMemOpIdx oi, uintptr_t retaddr); 1286 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 1287 TCGMemOpIdx oi, uintptr_t retaddr); 1288 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 1289 TCGMemOpIdx oi, uintptr_t retaddr); 1290 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 1291 TCGMemOpIdx oi, uintptr_t retaddr); 1292 1293 /* Temporary aliases until backends are converted. */ 1294 #ifdef TARGET_WORDS_BIGENDIAN 1295 # define helper_ret_ldsw_mmu helper_be_ldsw_mmu 1296 # define helper_ret_lduw_mmu helper_be_lduw_mmu 1297 # define helper_ret_ldsl_mmu helper_be_ldsl_mmu 1298 # define helper_ret_ldul_mmu helper_be_ldul_mmu 1299 # define helper_ret_ldl_mmu helper_be_ldul_mmu 1300 # define helper_ret_ldq_mmu helper_be_ldq_mmu 1301 # define helper_ret_stw_mmu helper_be_stw_mmu 1302 # define helper_ret_stl_mmu helper_be_stl_mmu 1303 # define helper_ret_stq_mmu helper_be_stq_mmu 1304 #else 1305 # define helper_ret_ldsw_mmu helper_le_ldsw_mmu 1306 # define helper_ret_lduw_mmu helper_le_lduw_mmu 1307 # define helper_ret_ldsl_mmu helper_le_ldsl_mmu 1308 # define helper_ret_ldul_mmu helper_le_ldul_mmu 1309 # define helper_ret_ldl_mmu helper_le_ldul_mmu 1310 # define helper_ret_ldq_mmu helper_le_ldq_mmu 1311 # define helper_ret_stw_mmu helper_le_stw_mmu 1312 # define helper_ret_stl_mmu helper_le_stl_mmu 1313 # define helper_ret_stq_mmu helper_le_stq_mmu 1314 #endif 1315 1316 uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, 1317 uint32_t cmpv, uint32_t newv, 1318 TCGMemOpIdx oi, uintptr_t retaddr); 1319 uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, 1320 uint32_t cmpv, uint32_t newv, 1321 TCGMemOpIdx oi, uintptr_t retaddr); 1322 uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, 1323 uint32_t cmpv, uint32_t newv, 1324 TCGMemOpIdx oi, uintptr_t retaddr); 1325 uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, 1326 uint64_t cmpv, uint64_t newv, 1327 TCGMemOpIdx oi, uintptr_t retaddr); 1328 uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, 1329 uint32_t cmpv, uint32_t newv, 1330 TCGMemOpIdx oi, uintptr_t retaddr); 1331 uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, 1332 uint32_t cmpv, uint32_t newv, 1333 TCGMemOpIdx oi, uintptr_t retaddr); 1334 uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, 1335 uint64_t cmpv, uint64_t newv, 1336 TCGMemOpIdx oi, uintptr_t retaddr); 1337 1338 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ 1339 TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \ 1340 (CPUArchState *env, target_ulong addr, TYPE val, \ 1341 TCGMemOpIdx oi, uintptr_t retaddr); 1342 1343 #ifdef CONFIG_ATOMIC64 1344 #define GEN_ATOMIC_HELPER_ALL(NAME) \ 1345 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ 1346 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ 1347 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ 1348 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ 1349 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ 1350 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ 1351 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) 1352 #else 1353 #define GEN_ATOMIC_HELPER_ALL(NAME) \ 1354 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ 1355 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ 1356 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ 1357 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ 1358 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) 1359 #endif 1360 1361 GEN_ATOMIC_HELPER_ALL(fetch_add) 1362 GEN_ATOMIC_HELPER_ALL(fetch_sub) 1363 GEN_ATOMIC_HELPER_ALL(fetch_and) 1364 GEN_ATOMIC_HELPER_ALL(fetch_or) 1365 GEN_ATOMIC_HELPER_ALL(fetch_xor) 1366 GEN_ATOMIC_HELPER_ALL(fetch_smin) 1367 GEN_ATOMIC_HELPER_ALL(fetch_umin) 1368 GEN_ATOMIC_HELPER_ALL(fetch_smax) 1369 GEN_ATOMIC_HELPER_ALL(fetch_umax) 1370 1371 GEN_ATOMIC_HELPER_ALL(add_fetch) 1372 GEN_ATOMIC_HELPER_ALL(sub_fetch) 1373 GEN_ATOMIC_HELPER_ALL(and_fetch) 1374 GEN_ATOMIC_HELPER_ALL(or_fetch) 1375 GEN_ATOMIC_HELPER_ALL(xor_fetch) 1376 GEN_ATOMIC_HELPER_ALL(smin_fetch) 1377 GEN_ATOMIC_HELPER_ALL(umin_fetch) 1378 GEN_ATOMIC_HELPER_ALL(smax_fetch) 1379 GEN_ATOMIC_HELPER_ALL(umax_fetch) 1380 1381 GEN_ATOMIC_HELPER_ALL(xchg) 1382 1383 #undef GEN_ATOMIC_HELPER_ALL 1384 #undef GEN_ATOMIC_HELPER 1385 #endif /* CONFIG_SOFTMMU */ 1386 1387 /* 1388 * These aren't really a "proper" helpers because TCG cannot manage Int128. 1389 * However, use the same format as the others, for use by the backends. 1390 * 1391 * The cmpxchg functions are only defined if HAVE_CMPXCHG128; 1392 * the ld/st functions are only defined if HAVE_ATOMIC128, 1393 * as defined by <qemu/atomic128.h>. 1394 */ 1395 Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, 1396 Int128 cmpv, Int128 newv, 1397 TCGMemOpIdx oi, uintptr_t retaddr); 1398 Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, 1399 Int128 cmpv, Int128 newv, 1400 TCGMemOpIdx oi, uintptr_t retaddr); 1401 1402 Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, 1403 TCGMemOpIdx oi, uintptr_t retaddr); 1404 Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, 1405 TCGMemOpIdx oi, uintptr_t retaddr); 1406 void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, 1407 TCGMemOpIdx oi, uintptr_t retaddr); 1408 void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, 1409 TCGMemOpIdx oi, uintptr_t retaddr); 1410 1411 #ifdef CONFIG_DEBUG_TCG 1412 void tcg_assert_listed_vecop(TCGOpcode); 1413 #else 1414 static inline void tcg_assert_listed_vecop(TCGOpcode op) { } 1415 #endif 1416 1417 static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n) 1418 { 1419 #ifdef CONFIG_DEBUG_TCG 1420 const TCGOpcode *o = tcg_ctx->vecop_list; 1421 tcg_ctx->vecop_list = n; 1422 return o; 1423 #else 1424 return NULL; 1425 #endif 1426 } 1427 1428 bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned); 1429 1430 #endif /* TCG_H */ 1431