1 /* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 /* define it to use liveness analysis (better code) */ 26 #define USE_TCG_OPTIMIZATIONS 27 28 #include "qemu/osdep.h" 29 30 /* Define to jump the ELF file used to communicate with GDB. */ 31 #undef DEBUG_JIT 32 33 #include "qemu/error-report.h" 34 #include "qemu/cutils.h" 35 #include "qemu/host-utils.h" 36 #include "qemu/qemu-print.h" 37 #include "qemu/timer.h" 38 #include "qemu/cacheflush.h" 39 #include "qemu/cacheinfo.h" 40 41 /* Note: the long term plan is to reduce the dependencies on the QEMU 42 CPU definitions. Currently they are used for qemu_ld/st 43 instructions */ 44 #define NO_CPU_IO_DEFS 45 46 #include "exec/exec-all.h" 47 #include "tcg/tcg-op.h" 48 49 #if UINTPTR_MAX == UINT32_MAX 50 # define ELF_CLASS ELFCLASS32 51 #else 52 # define ELF_CLASS ELFCLASS64 53 #endif 54 #if HOST_BIG_ENDIAN 55 # define ELF_DATA ELFDATA2MSB 56 #else 57 # define ELF_DATA ELFDATA2LSB 58 #endif 59 60 #include "elf.h" 61 #include "exec/log.h" 62 #include "tcg/tcg-ldst.h" 63 #include "tcg-internal.h" 64 #include "accel/tcg/perf.h" 65 66 /* Forward declarations for functions declared in tcg-target.c.inc and 67 used here. */ 68 static void tcg_target_init(TCGContext *s); 69 static void tcg_target_qemu_prologue(TCGContext *s); 70 static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 71 intptr_t value, intptr_t addend); 72 73 /* The CIE and FDE header definitions will be common to all hosts. */ 74 typedef struct { 75 uint32_t len __attribute__((aligned((sizeof(void *))))); 76 uint32_t id; 77 uint8_t version; 78 char augmentation[1]; 79 uint8_t code_align; 80 uint8_t data_align; 81 uint8_t return_column; 82 } DebugFrameCIE; 83 84 typedef struct QEMU_PACKED { 85 uint32_t len __attribute__((aligned((sizeof(void *))))); 86 uint32_t cie_offset; 87 uintptr_t func_start; 88 uintptr_t func_len; 89 } DebugFrameFDEHeader; 90 91 typedef struct QEMU_PACKED { 92 DebugFrameCIE cie; 93 DebugFrameFDEHeader fde; 94 } DebugFrameHeader; 95 96 static void tcg_register_jit_int(const void *buf, size_t size, 97 const void *debug_frame, 98 size_t debug_frame_size) 99 __attribute__((unused)); 100 101 /* Forward declarations for functions declared and used in tcg-target.c.inc. */ 102 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, 103 intptr_t arg2); 104 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); 105 static void tcg_out_movi(TCGContext *s, TCGType type, 106 TCGReg ret, tcg_target_long arg); 107 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg); 108 static void tcg_out_goto_tb(TCGContext *s, int which); 109 static void tcg_out_op(TCGContext *s, TCGOpcode opc, 110 const TCGArg args[TCG_MAX_OP_ARGS], 111 const int const_args[TCG_MAX_OP_ARGS]); 112 #if TCG_TARGET_MAYBE_vec 113 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 114 TCGReg dst, TCGReg src); 115 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 116 TCGReg dst, TCGReg base, intptr_t offset); 117 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 118 TCGReg dst, int64_t arg); 119 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 120 unsigned vecl, unsigned vece, 121 const TCGArg args[TCG_MAX_OP_ARGS], 122 const int const_args[TCG_MAX_OP_ARGS]); 123 #else 124 static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, 125 TCGReg dst, TCGReg src) 126 { 127 g_assert_not_reached(); 128 } 129 static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, 130 TCGReg dst, TCGReg base, intptr_t offset) 131 { 132 g_assert_not_reached(); 133 } 134 static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, 135 TCGReg dst, int64_t arg) 136 { 137 g_assert_not_reached(); 138 } 139 static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, 140 unsigned vecl, unsigned vece, 141 const TCGArg args[TCG_MAX_OP_ARGS], 142 const int const_args[TCG_MAX_OP_ARGS]) 143 { 144 g_assert_not_reached(); 145 } 146 #endif 147 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, 148 intptr_t arg2); 149 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 150 TCGReg base, intptr_t ofs); 151 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target, 152 const TCGHelperInfo *info); 153 static bool tcg_target_const_match(int64_t val, TCGType type, int ct); 154 #ifdef TCG_TARGET_NEED_LDST_LABELS 155 static int tcg_out_ldst_finalize(TCGContext *s); 156 #endif 157 158 TCGContext tcg_init_ctx; 159 __thread TCGContext *tcg_ctx; 160 161 TCGContext **tcg_ctxs; 162 unsigned int tcg_cur_ctxs; 163 unsigned int tcg_max_ctxs; 164 TCGv_env cpu_env = 0; 165 const void *tcg_code_gen_epilogue; 166 uintptr_t tcg_splitwx_diff; 167 168 #ifndef CONFIG_TCG_INTERPRETER 169 tcg_prologue_fn *tcg_qemu_tb_exec; 170 #endif 171 172 static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT]; 173 static TCGRegSet tcg_target_call_clobber_regs; 174 175 #if TCG_TARGET_INSN_UNIT_SIZE == 1 176 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v) 177 { 178 *s->code_ptr++ = v; 179 } 180 181 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p, 182 uint8_t v) 183 { 184 *p = v; 185 } 186 #endif 187 188 #if TCG_TARGET_INSN_UNIT_SIZE <= 2 189 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v) 190 { 191 if (TCG_TARGET_INSN_UNIT_SIZE == 2) { 192 *s->code_ptr++ = v; 193 } else { 194 tcg_insn_unit *p = s->code_ptr; 195 memcpy(p, &v, sizeof(v)); 196 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE); 197 } 198 } 199 200 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p, 201 uint16_t v) 202 { 203 if (TCG_TARGET_INSN_UNIT_SIZE == 2) { 204 *p = v; 205 } else { 206 memcpy(p, &v, sizeof(v)); 207 } 208 } 209 #endif 210 211 #if TCG_TARGET_INSN_UNIT_SIZE <= 4 212 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v) 213 { 214 if (TCG_TARGET_INSN_UNIT_SIZE == 4) { 215 *s->code_ptr++ = v; 216 } else { 217 tcg_insn_unit *p = s->code_ptr; 218 memcpy(p, &v, sizeof(v)); 219 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE); 220 } 221 } 222 223 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p, 224 uint32_t v) 225 { 226 if (TCG_TARGET_INSN_UNIT_SIZE == 4) { 227 *p = v; 228 } else { 229 memcpy(p, &v, sizeof(v)); 230 } 231 } 232 #endif 233 234 #if TCG_TARGET_INSN_UNIT_SIZE <= 8 235 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v) 236 { 237 if (TCG_TARGET_INSN_UNIT_SIZE == 8) { 238 *s->code_ptr++ = v; 239 } else { 240 tcg_insn_unit *p = s->code_ptr; 241 memcpy(p, &v, sizeof(v)); 242 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE); 243 } 244 } 245 246 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p, 247 uint64_t v) 248 { 249 if (TCG_TARGET_INSN_UNIT_SIZE == 8) { 250 *p = v; 251 } else { 252 memcpy(p, &v, sizeof(v)); 253 } 254 } 255 #endif 256 257 /* label relocation processing */ 258 259 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type, 260 TCGLabel *l, intptr_t addend) 261 { 262 TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation)); 263 264 r->type = type; 265 r->ptr = code_ptr; 266 r->addend = addend; 267 QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next); 268 } 269 270 static void tcg_out_label(TCGContext *s, TCGLabel *l) 271 { 272 tcg_debug_assert(!l->has_value); 273 l->has_value = 1; 274 l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr); 275 } 276 277 TCGLabel *gen_new_label(void) 278 { 279 TCGContext *s = tcg_ctx; 280 TCGLabel *l = tcg_malloc(sizeof(TCGLabel)); 281 282 memset(l, 0, sizeof(TCGLabel)); 283 l->id = s->nb_labels++; 284 QSIMPLEQ_INIT(&l->relocs); 285 286 QSIMPLEQ_INSERT_TAIL(&s->labels, l, next); 287 288 return l; 289 } 290 291 static bool tcg_resolve_relocs(TCGContext *s) 292 { 293 TCGLabel *l; 294 295 QSIMPLEQ_FOREACH(l, &s->labels, next) { 296 TCGRelocation *r; 297 uintptr_t value = l->u.value; 298 299 QSIMPLEQ_FOREACH(r, &l->relocs, next) { 300 if (!patch_reloc(r->ptr, r->type, value, r->addend)) { 301 return false; 302 } 303 } 304 } 305 return true; 306 } 307 308 static void set_jmp_reset_offset(TCGContext *s, int which) 309 { 310 /* 311 * We will check for overflow at the end of the opcode loop in 312 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX. 313 */ 314 s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s); 315 } 316 317 static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which) 318 { 319 /* 320 * We will check for overflow at the end of the opcode loop in 321 * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX. 322 */ 323 s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s); 324 } 325 326 static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which) 327 { 328 /* 329 * Return the read-execute version of the pointer, for the benefit 330 * of any pc-relative addressing mode. 331 */ 332 return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]); 333 } 334 335 /* Signal overflow, starting over with fewer guest insns. */ 336 static G_NORETURN 337 void tcg_raise_tb_overflow(TCGContext *s) 338 { 339 siglongjmp(s->jmp_trans, -2); 340 } 341 342 #define C_PFX1(P, A) P##A 343 #define C_PFX2(P, A, B) P##A##_##B 344 #define C_PFX3(P, A, B, C) P##A##_##B##_##C 345 #define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D 346 #define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E 347 #define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F 348 349 /* Define an enumeration for the various combinations. */ 350 351 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1), 352 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2), 353 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3), 354 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4), 355 356 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1), 357 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2), 358 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3), 359 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4), 360 361 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2), 362 363 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1), 364 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2), 365 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3), 366 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4), 367 368 typedef enum { 369 #include "tcg-target-con-set.h" 370 } TCGConstraintSetIndex; 371 372 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode); 373 374 #undef C_O0_I1 375 #undef C_O0_I2 376 #undef C_O0_I3 377 #undef C_O0_I4 378 #undef C_O1_I1 379 #undef C_O1_I2 380 #undef C_O1_I3 381 #undef C_O1_I4 382 #undef C_N1_I2 383 #undef C_O2_I1 384 #undef C_O2_I2 385 #undef C_O2_I3 386 #undef C_O2_I4 387 388 /* Put all of the constraint sets into an array, indexed by the enum. */ 389 390 #define C_O0_I1(I1) { .args_ct_str = { #I1 } }, 391 #define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } }, 392 #define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } }, 393 #define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } }, 394 395 #define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } }, 396 #define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } }, 397 #define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } }, 398 #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } }, 399 400 #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } }, 401 402 #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } }, 403 #define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } }, 404 #define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } }, 405 #define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } }, 406 407 static const TCGTargetOpDef constraint_sets[] = { 408 #include "tcg-target-con-set.h" 409 }; 410 411 412 #undef C_O0_I1 413 #undef C_O0_I2 414 #undef C_O0_I3 415 #undef C_O0_I4 416 #undef C_O1_I1 417 #undef C_O1_I2 418 #undef C_O1_I3 419 #undef C_O1_I4 420 #undef C_N1_I2 421 #undef C_O2_I1 422 #undef C_O2_I2 423 #undef C_O2_I3 424 #undef C_O2_I4 425 426 /* Expand the enumerator to be returned from tcg_target_op_def(). */ 427 428 #define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1) 429 #define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2) 430 #define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3) 431 #define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4) 432 433 #define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1) 434 #define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2) 435 #define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3) 436 #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4) 437 438 #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2) 439 440 #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1) 441 #define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2) 442 #define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3) 443 #define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4) 444 445 #include "tcg-target.c.inc" 446 447 static void alloc_tcg_plugin_context(TCGContext *s) 448 { 449 #ifdef CONFIG_PLUGIN 450 s->plugin_tb = g_new0(struct qemu_plugin_tb, 1); 451 s->plugin_tb->insns = 452 g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn); 453 #endif 454 } 455 456 /* 457 * All TCG threads except the parent (i.e. the one that called tcg_context_init 458 * and registered the target's TCG globals) must register with this function 459 * before initiating translation. 460 * 461 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation 462 * of tcg_region_init() for the reasoning behind this. 463 * 464 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in 465 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context 466 * is not used anymore for translation once this function is called. 467 * 468 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates 469 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode. 470 */ 471 #ifdef CONFIG_USER_ONLY 472 void tcg_register_thread(void) 473 { 474 tcg_ctx = &tcg_init_ctx; 475 } 476 #else 477 void tcg_register_thread(void) 478 { 479 TCGContext *s = g_malloc(sizeof(*s)); 480 unsigned int i, n; 481 482 *s = tcg_init_ctx; 483 484 /* Relink mem_base. */ 485 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) { 486 if (tcg_init_ctx.temps[i].mem_base) { 487 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps; 488 tcg_debug_assert(b >= 0 && b < n); 489 s->temps[i].mem_base = &s->temps[b]; 490 } 491 } 492 493 /* Claim an entry in tcg_ctxs */ 494 n = qatomic_fetch_inc(&tcg_cur_ctxs); 495 g_assert(n < tcg_max_ctxs); 496 qatomic_set(&tcg_ctxs[n], s); 497 498 if (n > 0) { 499 alloc_tcg_plugin_context(s); 500 tcg_region_initial_alloc(s); 501 } 502 503 tcg_ctx = s; 504 } 505 #endif /* !CONFIG_USER_ONLY */ 506 507 /* pool based memory allocation */ 508 void *tcg_malloc_internal(TCGContext *s, int size) 509 { 510 TCGPool *p; 511 int pool_size; 512 513 if (size > TCG_POOL_CHUNK_SIZE) { 514 /* big malloc: insert a new pool (XXX: could optimize) */ 515 p = g_malloc(sizeof(TCGPool) + size); 516 p->size = size; 517 p->next = s->pool_first_large; 518 s->pool_first_large = p; 519 return p->data; 520 } else { 521 p = s->pool_current; 522 if (!p) { 523 p = s->pool_first; 524 if (!p) 525 goto new_pool; 526 } else { 527 if (!p->next) { 528 new_pool: 529 pool_size = TCG_POOL_CHUNK_SIZE; 530 p = g_malloc(sizeof(TCGPool) + pool_size); 531 p->size = pool_size; 532 p->next = NULL; 533 if (s->pool_current) { 534 s->pool_current->next = p; 535 } else { 536 s->pool_first = p; 537 } 538 } else { 539 p = p->next; 540 } 541 } 542 } 543 s->pool_current = p; 544 s->pool_cur = p->data + size; 545 s->pool_end = p->data + p->size; 546 return p->data; 547 } 548 549 void tcg_pool_reset(TCGContext *s) 550 { 551 TCGPool *p, *t; 552 for (p = s->pool_first_large; p; p = t) { 553 t = p->next; 554 g_free(p); 555 } 556 s->pool_first_large = NULL; 557 s->pool_cur = s->pool_end = NULL; 558 s->pool_current = NULL; 559 } 560 561 #include "exec/helper-proto.h" 562 563 static TCGHelperInfo all_helpers[] = { 564 #include "exec/helper-tcg.h" 565 }; 566 static GHashTable *helper_table; 567 568 #ifdef CONFIG_TCG_INTERPRETER 569 static ffi_type *typecode_to_ffi(int argmask) 570 { 571 switch (argmask) { 572 case dh_typecode_void: 573 return &ffi_type_void; 574 case dh_typecode_i32: 575 return &ffi_type_uint32; 576 case dh_typecode_s32: 577 return &ffi_type_sint32; 578 case dh_typecode_i64: 579 return &ffi_type_uint64; 580 case dh_typecode_s64: 581 return &ffi_type_sint64; 582 case dh_typecode_ptr: 583 return &ffi_type_pointer; 584 } 585 g_assert_not_reached(); 586 } 587 588 static void init_ffi_layouts(void) 589 { 590 /* g_direct_hash/equal for direct comparisons on uint32_t. */ 591 GHashTable *ffi_table = g_hash_table_new(NULL, NULL); 592 593 for (int i = 0; i < ARRAY_SIZE(all_helpers); ++i) { 594 TCGHelperInfo *info = &all_helpers[i]; 595 unsigned typemask = info->typemask; 596 gpointer hash = (gpointer)(uintptr_t)typemask; 597 struct { 598 ffi_cif cif; 599 ffi_type *args[]; 600 } *ca; 601 ffi_status status; 602 int nargs; 603 ffi_cif *cif; 604 605 cif = g_hash_table_lookup(ffi_table, hash); 606 if (cif) { 607 info->cif = cif; 608 continue; 609 } 610 611 /* Ignoring the return type, find the last non-zero field. */ 612 nargs = 32 - clz32(typemask >> 3); 613 nargs = DIV_ROUND_UP(nargs, 3); 614 615 ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *)); 616 ca->cif.rtype = typecode_to_ffi(typemask & 7); 617 ca->cif.nargs = nargs; 618 619 if (nargs != 0) { 620 ca->cif.arg_types = ca->args; 621 for (int j = 0; j < nargs; ++j) { 622 int typecode = extract32(typemask, (j + 1) * 3, 3); 623 ca->args[j] = typecode_to_ffi(typecode); 624 } 625 } 626 627 status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs, 628 ca->cif.rtype, ca->cif.arg_types); 629 assert(status == FFI_OK); 630 631 cif = &ca->cif; 632 info->cif = cif; 633 g_hash_table_insert(ffi_table, hash, (gpointer)cif); 634 } 635 636 g_hash_table_destroy(ffi_table); 637 } 638 #endif /* CONFIG_TCG_INTERPRETER */ 639 640 typedef struct TCGCumulativeArgs { 641 int arg_idx; /* tcg_gen_callN args[] */ 642 int info_in_idx; /* TCGHelperInfo in[] */ 643 int arg_slot; /* regs+stack slot */ 644 int ref_slot; /* stack slots for references */ 645 } TCGCumulativeArgs; 646 647 static void layout_arg_even(TCGCumulativeArgs *cum) 648 { 649 cum->arg_slot += cum->arg_slot & 1; 650 } 651 652 static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info, 653 TCGCallArgumentKind kind) 654 { 655 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx]; 656 657 *loc = (TCGCallArgumentLoc){ 658 .kind = kind, 659 .arg_idx = cum->arg_idx, 660 .arg_slot = cum->arg_slot, 661 }; 662 cum->info_in_idx++; 663 cum->arg_slot++; 664 } 665 666 static void layout_arg_normal_n(TCGCumulativeArgs *cum, 667 TCGHelperInfo *info, int n) 668 { 669 TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx]; 670 671 for (int i = 0; i < n; ++i) { 672 /* Layout all using the same arg_idx, adjusting the subindex. */ 673 loc[i] = (TCGCallArgumentLoc){ 674 .kind = TCG_CALL_ARG_NORMAL, 675 .arg_idx = cum->arg_idx, 676 .tmp_subindex = i, 677 .arg_slot = cum->arg_slot + i, 678 }; 679 } 680 cum->info_in_idx += n; 681 cum->arg_slot += n; 682 } 683 684 static void init_call_layout(TCGHelperInfo *info) 685 { 686 int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs); 687 int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long); 688 unsigned typemask = info->typemask; 689 unsigned typecode; 690 TCGCumulativeArgs cum = { }; 691 692 /* 693 * Parse and place any function return value. 694 */ 695 typecode = typemask & 7; 696 switch (typecode) { 697 case dh_typecode_void: 698 info->nr_out = 0; 699 break; 700 case dh_typecode_i32: 701 case dh_typecode_s32: 702 case dh_typecode_ptr: 703 info->nr_out = 1; 704 info->out_kind = TCG_CALL_RET_NORMAL; 705 break; 706 case dh_typecode_i64: 707 case dh_typecode_s64: 708 info->nr_out = 64 / TCG_TARGET_REG_BITS; 709 info->out_kind = TCG_CALL_RET_NORMAL; 710 break; 711 default: 712 g_assert_not_reached(); 713 } 714 assert(info->nr_out <= ARRAY_SIZE(tcg_target_call_oarg_regs)); 715 716 /* 717 * Parse and place function arguments. 718 */ 719 for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) { 720 TCGCallArgumentKind kind; 721 TCGType type; 722 723 typecode = typemask & 7; 724 switch (typecode) { 725 case dh_typecode_i32: 726 case dh_typecode_s32: 727 type = TCG_TYPE_I32; 728 break; 729 case dh_typecode_i64: 730 case dh_typecode_s64: 731 type = TCG_TYPE_I64; 732 break; 733 case dh_typecode_ptr: 734 type = TCG_TYPE_PTR; 735 break; 736 default: 737 g_assert_not_reached(); 738 } 739 740 switch (type) { 741 case TCG_TYPE_I32: 742 switch (TCG_TARGET_CALL_ARG_I32) { 743 case TCG_CALL_ARG_EVEN: 744 layout_arg_even(&cum); 745 /* fall through */ 746 case TCG_CALL_ARG_NORMAL: 747 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL); 748 break; 749 case TCG_CALL_ARG_EXTEND: 750 kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1); 751 layout_arg_1(&cum, info, kind); 752 break; 753 default: 754 qemu_build_not_reached(); 755 } 756 break; 757 758 case TCG_TYPE_I64: 759 switch (TCG_TARGET_CALL_ARG_I64) { 760 case TCG_CALL_ARG_EVEN: 761 layout_arg_even(&cum); 762 /* fall through */ 763 case TCG_CALL_ARG_NORMAL: 764 if (TCG_TARGET_REG_BITS == 32) { 765 layout_arg_normal_n(&cum, info, 2); 766 } else { 767 layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL); 768 } 769 break; 770 default: 771 qemu_build_not_reached(); 772 } 773 break; 774 775 default: 776 g_assert_not_reached(); 777 } 778 } 779 info->nr_in = cum.info_in_idx; 780 781 /* Validate that we didn't overrun the input array. */ 782 assert(cum.info_in_idx <= ARRAY_SIZE(info->in)); 783 /* Validate the backend has enough argument space. */ 784 assert(cum.arg_slot <= max_reg_slots + max_stk_slots); 785 assert(cum.ref_slot <= max_stk_slots); 786 } 787 788 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)]; 789 static void process_op_defs(TCGContext *s); 790 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, 791 TCGReg reg, const char *name); 792 793 static void tcg_context_init(unsigned max_cpus) 794 { 795 TCGContext *s = &tcg_init_ctx; 796 int op, total_args, n, i; 797 TCGOpDef *def; 798 TCGArgConstraint *args_ct; 799 TCGTemp *ts; 800 801 memset(s, 0, sizeof(*s)); 802 s->nb_globals = 0; 803 804 /* Count total number of arguments and allocate the corresponding 805 space */ 806 total_args = 0; 807 for(op = 0; op < NB_OPS; op++) { 808 def = &tcg_op_defs[op]; 809 n = def->nb_iargs + def->nb_oargs; 810 total_args += n; 811 } 812 813 args_ct = g_new0(TCGArgConstraint, total_args); 814 815 for(op = 0; op < NB_OPS; op++) { 816 def = &tcg_op_defs[op]; 817 def->args_ct = args_ct; 818 n = def->nb_iargs + def->nb_oargs; 819 args_ct += n; 820 } 821 822 /* Register helpers. */ 823 /* Use g_direct_hash/equal for direct pointer comparisons on func. */ 824 helper_table = g_hash_table_new(NULL, NULL); 825 826 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) { 827 init_call_layout(&all_helpers[i]); 828 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func, 829 (gpointer)&all_helpers[i]); 830 } 831 832 #ifdef CONFIG_TCG_INTERPRETER 833 init_ffi_layouts(); 834 #endif 835 836 tcg_target_init(s); 837 process_op_defs(s); 838 839 /* Reverse the order of the saved registers, assuming they're all at 840 the start of tcg_target_reg_alloc_order. */ 841 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) { 842 int r = tcg_target_reg_alloc_order[n]; 843 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) { 844 break; 845 } 846 } 847 for (i = 0; i < n; ++i) { 848 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i]; 849 } 850 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) { 851 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i]; 852 } 853 854 alloc_tcg_plugin_context(s); 855 856 tcg_ctx = s; 857 /* 858 * In user-mode we simply share the init context among threads, since we 859 * use a single region. See the documentation tcg_region_init() for the 860 * reasoning behind this. 861 * In softmmu we will have at most max_cpus TCG threads. 862 */ 863 #ifdef CONFIG_USER_ONLY 864 tcg_ctxs = &tcg_ctx; 865 tcg_cur_ctxs = 1; 866 tcg_max_ctxs = 1; 867 #else 868 tcg_max_ctxs = max_cpus; 869 tcg_ctxs = g_new0(TCGContext *, max_cpus); 870 #endif 871 872 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0)); 873 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env"); 874 cpu_env = temp_tcgv_ptr(ts); 875 } 876 877 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus) 878 { 879 tcg_context_init(max_cpus); 880 tcg_region_init(tb_size, splitwx, max_cpus); 881 } 882 883 /* 884 * Allocate TBs right before their corresponding translated code, making 885 * sure that TBs and code are on different cache lines. 886 */ 887 TranslationBlock *tcg_tb_alloc(TCGContext *s) 888 { 889 uintptr_t align = qemu_icache_linesize; 890 TranslationBlock *tb; 891 void *next; 892 893 retry: 894 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align); 895 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align); 896 897 if (unlikely(next > s->code_gen_highwater)) { 898 if (tcg_region_alloc(s)) { 899 return NULL; 900 } 901 goto retry; 902 } 903 qatomic_set(&s->code_gen_ptr, next); 904 s->data_gen_ptr = NULL; 905 return tb; 906 } 907 908 void tcg_prologue_init(TCGContext *s) 909 { 910 size_t prologue_size; 911 912 s->code_ptr = s->code_gen_ptr; 913 s->code_buf = s->code_gen_ptr; 914 s->data_gen_ptr = NULL; 915 916 #ifndef CONFIG_TCG_INTERPRETER 917 tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr); 918 #endif 919 920 #ifdef TCG_TARGET_NEED_POOL_LABELS 921 s->pool_labels = NULL; 922 #endif 923 924 qemu_thread_jit_write(); 925 /* Generate the prologue. */ 926 tcg_target_qemu_prologue(s); 927 928 #ifdef TCG_TARGET_NEED_POOL_LABELS 929 /* Allow the prologue to put e.g. guest_base into a pool entry. */ 930 { 931 int result = tcg_out_pool_finalize(s); 932 tcg_debug_assert(result == 0); 933 } 934 #endif 935 936 prologue_size = tcg_current_code_size(s); 937 perf_report_prologue(s->code_gen_ptr, prologue_size); 938 939 #ifndef CONFIG_TCG_INTERPRETER 940 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf), 941 (uintptr_t)s->code_buf, prologue_size); 942 #endif 943 944 #ifdef DEBUG_DISAS 945 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { 946 FILE *logfile = qemu_log_trylock(); 947 if (logfile) { 948 fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size); 949 if (s->data_gen_ptr) { 950 size_t code_size = s->data_gen_ptr - s->code_gen_ptr; 951 size_t data_size = prologue_size - code_size; 952 size_t i; 953 954 disas(logfile, s->code_gen_ptr, code_size); 955 956 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { 957 if (sizeof(tcg_target_ulong) == 8) { 958 fprintf(logfile, 959 "0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", 960 (uintptr_t)s->data_gen_ptr + i, 961 *(uint64_t *)(s->data_gen_ptr + i)); 962 } else { 963 fprintf(logfile, 964 "0x%08" PRIxPTR ": .long 0x%08x\n", 965 (uintptr_t)s->data_gen_ptr + i, 966 *(uint32_t *)(s->data_gen_ptr + i)); 967 } 968 } 969 } else { 970 disas(logfile, s->code_gen_ptr, prologue_size); 971 } 972 fprintf(logfile, "\n"); 973 qemu_log_unlock(logfile); 974 } 975 } 976 #endif 977 978 #ifndef CONFIG_TCG_INTERPRETER 979 /* 980 * Assert that goto_ptr is implemented completely, setting an epilogue. 981 * For tci, we use NULL as the signal to return from the interpreter, 982 * so skip this check. 983 */ 984 tcg_debug_assert(tcg_code_gen_epilogue != NULL); 985 #endif 986 987 tcg_region_prologue_set(s); 988 } 989 990 void tcg_func_start(TCGContext *s) 991 { 992 tcg_pool_reset(s); 993 s->nb_temps = s->nb_globals; 994 995 /* No temps have been previously allocated for size or locality. */ 996 memset(s->free_temps, 0, sizeof(s->free_temps)); 997 998 /* No constant temps have been previously allocated. */ 999 for (int i = 0; i < TCG_TYPE_COUNT; ++i) { 1000 if (s->const_table[i]) { 1001 g_hash_table_remove_all(s->const_table[i]); 1002 } 1003 } 1004 1005 s->nb_ops = 0; 1006 s->nb_labels = 0; 1007 s->current_frame_offset = s->frame_start; 1008 1009 #ifdef CONFIG_DEBUG_TCG 1010 s->goto_tb_issue_mask = 0; 1011 #endif 1012 1013 QTAILQ_INIT(&s->ops); 1014 QTAILQ_INIT(&s->free_ops); 1015 QSIMPLEQ_INIT(&s->labels); 1016 } 1017 1018 static TCGTemp *tcg_temp_alloc(TCGContext *s) 1019 { 1020 int n = s->nb_temps++; 1021 1022 if (n >= TCG_MAX_TEMPS) { 1023 tcg_raise_tb_overflow(s); 1024 } 1025 return memset(&s->temps[n], 0, sizeof(TCGTemp)); 1026 } 1027 1028 static TCGTemp *tcg_global_alloc(TCGContext *s) 1029 { 1030 TCGTemp *ts; 1031 1032 tcg_debug_assert(s->nb_globals == s->nb_temps); 1033 tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS); 1034 s->nb_globals++; 1035 ts = tcg_temp_alloc(s); 1036 ts->kind = TEMP_GLOBAL; 1037 1038 return ts; 1039 } 1040 1041 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, 1042 TCGReg reg, const char *name) 1043 { 1044 TCGTemp *ts; 1045 1046 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) { 1047 tcg_abort(); 1048 } 1049 1050 ts = tcg_global_alloc(s); 1051 ts->base_type = type; 1052 ts->type = type; 1053 ts->kind = TEMP_FIXED; 1054 ts->reg = reg; 1055 ts->name = name; 1056 tcg_regset_set_reg(s->reserved_regs, reg); 1057 1058 return ts; 1059 } 1060 1061 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size) 1062 { 1063 s->frame_start = start; 1064 s->frame_end = start + size; 1065 s->frame_temp 1066 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame"); 1067 } 1068 1069 TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, 1070 intptr_t offset, const char *name) 1071 { 1072 TCGContext *s = tcg_ctx; 1073 TCGTemp *base_ts = tcgv_ptr_temp(base); 1074 TCGTemp *ts = tcg_global_alloc(s); 1075 int indirect_reg = 0; 1076 1077 switch (base_ts->kind) { 1078 case TEMP_FIXED: 1079 break; 1080 case TEMP_GLOBAL: 1081 /* We do not support double-indirect registers. */ 1082 tcg_debug_assert(!base_ts->indirect_reg); 1083 base_ts->indirect_base = 1; 1084 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64 1085 ? 2 : 1); 1086 indirect_reg = 1; 1087 break; 1088 default: 1089 g_assert_not_reached(); 1090 } 1091 1092 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { 1093 TCGTemp *ts2 = tcg_global_alloc(s); 1094 char buf[64]; 1095 1096 ts->base_type = TCG_TYPE_I64; 1097 ts->type = TCG_TYPE_I32; 1098 ts->indirect_reg = indirect_reg; 1099 ts->mem_allocated = 1; 1100 ts->mem_base = base_ts; 1101 ts->mem_offset = offset; 1102 pstrcpy(buf, sizeof(buf), name); 1103 pstrcat(buf, sizeof(buf), "_0"); 1104 ts->name = strdup(buf); 1105 1106 tcg_debug_assert(ts2 == ts + 1); 1107 ts2->base_type = TCG_TYPE_I64; 1108 ts2->type = TCG_TYPE_I32; 1109 ts2->indirect_reg = indirect_reg; 1110 ts2->mem_allocated = 1; 1111 ts2->mem_base = base_ts; 1112 ts2->mem_offset = offset + 4; 1113 ts2->temp_subindex = 1; 1114 pstrcpy(buf, sizeof(buf), name); 1115 pstrcat(buf, sizeof(buf), "_1"); 1116 ts2->name = strdup(buf); 1117 } else { 1118 ts->base_type = type; 1119 ts->type = type; 1120 ts->indirect_reg = indirect_reg; 1121 ts->mem_allocated = 1; 1122 ts->mem_base = base_ts; 1123 ts->mem_offset = offset; 1124 ts->name = name; 1125 } 1126 return ts; 1127 } 1128 1129 TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local) 1130 { 1131 TCGContext *s = tcg_ctx; 1132 TCGTempKind kind = temp_local ? TEMP_LOCAL : TEMP_NORMAL; 1133 TCGTemp *ts; 1134 int idx, k; 1135 1136 k = type + (temp_local ? TCG_TYPE_COUNT : 0); 1137 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS); 1138 if (idx < TCG_MAX_TEMPS) { 1139 /* There is already an available temp with the right type. */ 1140 clear_bit(idx, s->free_temps[k].l); 1141 1142 ts = &s->temps[idx]; 1143 ts->temp_allocated = 1; 1144 tcg_debug_assert(ts->base_type == type); 1145 tcg_debug_assert(ts->kind == kind); 1146 } else { 1147 ts = tcg_temp_alloc(s); 1148 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { 1149 TCGTemp *ts2 = tcg_temp_alloc(s); 1150 1151 ts->base_type = type; 1152 ts->type = TCG_TYPE_I32; 1153 ts->temp_allocated = 1; 1154 ts->kind = kind; 1155 1156 tcg_debug_assert(ts2 == ts + 1); 1157 ts2->base_type = TCG_TYPE_I64; 1158 ts2->type = TCG_TYPE_I32; 1159 ts2->temp_allocated = 1; 1160 ts2->temp_subindex = 1; 1161 ts2->kind = kind; 1162 } else { 1163 ts->base_type = type; 1164 ts->type = type; 1165 ts->temp_allocated = 1; 1166 ts->kind = kind; 1167 } 1168 } 1169 1170 #if defined(CONFIG_DEBUG_TCG) 1171 s->temps_in_use++; 1172 #endif 1173 return ts; 1174 } 1175 1176 TCGv_vec tcg_temp_new_vec(TCGType type) 1177 { 1178 TCGTemp *t; 1179 1180 #ifdef CONFIG_DEBUG_TCG 1181 switch (type) { 1182 case TCG_TYPE_V64: 1183 assert(TCG_TARGET_HAS_v64); 1184 break; 1185 case TCG_TYPE_V128: 1186 assert(TCG_TARGET_HAS_v128); 1187 break; 1188 case TCG_TYPE_V256: 1189 assert(TCG_TARGET_HAS_v256); 1190 break; 1191 default: 1192 g_assert_not_reached(); 1193 } 1194 #endif 1195 1196 t = tcg_temp_new_internal(type, 0); 1197 return temp_tcgv_vec(t); 1198 } 1199 1200 /* Create a new temp of the same type as an existing temp. */ 1201 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match) 1202 { 1203 TCGTemp *t = tcgv_vec_temp(match); 1204 1205 tcg_debug_assert(t->temp_allocated != 0); 1206 1207 t = tcg_temp_new_internal(t->base_type, 0); 1208 return temp_tcgv_vec(t); 1209 } 1210 1211 void tcg_temp_free_internal(TCGTemp *ts) 1212 { 1213 TCGContext *s = tcg_ctx; 1214 int k, idx; 1215 1216 switch (ts->kind) { 1217 case TEMP_CONST: 1218 /* 1219 * In order to simplify users of tcg_constant_*, 1220 * silently ignore free. 1221 */ 1222 return; 1223 case TEMP_NORMAL: 1224 case TEMP_LOCAL: 1225 break; 1226 default: 1227 g_assert_not_reached(); 1228 } 1229 1230 #if defined(CONFIG_DEBUG_TCG) 1231 s->temps_in_use--; 1232 if (s->temps_in_use < 0) { 1233 fprintf(stderr, "More temporaries freed than allocated!\n"); 1234 } 1235 #endif 1236 1237 tcg_debug_assert(ts->temp_allocated != 0); 1238 ts->temp_allocated = 0; 1239 1240 idx = temp_idx(ts); 1241 k = ts->base_type + (ts->kind == TEMP_NORMAL ? 0 : TCG_TYPE_COUNT); 1242 set_bit(idx, s->free_temps[k].l); 1243 } 1244 1245 TCGTemp *tcg_constant_internal(TCGType type, int64_t val) 1246 { 1247 TCGContext *s = tcg_ctx; 1248 GHashTable *h = s->const_table[type]; 1249 TCGTemp *ts; 1250 1251 if (h == NULL) { 1252 h = g_hash_table_new(g_int64_hash, g_int64_equal); 1253 s->const_table[type] = h; 1254 } 1255 1256 ts = g_hash_table_lookup(h, &val); 1257 if (ts == NULL) { 1258 int64_t *val_ptr; 1259 1260 ts = tcg_temp_alloc(s); 1261 1262 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { 1263 TCGTemp *ts2 = tcg_temp_alloc(s); 1264 1265 tcg_debug_assert(ts2 == ts + 1); 1266 1267 ts->base_type = TCG_TYPE_I64; 1268 ts->type = TCG_TYPE_I32; 1269 ts->kind = TEMP_CONST; 1270 ts->temp_allocated = 1; 1271 1272 ts2->base_type = TCG_TYPE_I64; 1273 ts2->type = TCG_TYPE_I32; 1274 ts2->kind = TEMP_CONST; 1275 ts2->temp_allocated = 1; 1276 ts2->temp_subindex = 1; 1277 1278 /* 1279 * Retain the full value of the 64-bit constant in the low 1280 * part, so that the hash table works. Actual uses will 1281 * truncate the value to the low part. 1282 */ 1283 ts[HOST_BIG_ENDIAN].val = val; 1284 ts[!HOST_BIG_ENDIAN].val = val >> 32; 1285 val_ptr = &ts[HOST_BIG_ENDIAN].val; 1286 } else { 1287 ts->base_type = type; 1288 ts->type = type; 1289 ts->kind = TEMP_CONST; 1290 ts->temp_allocated = 1; 1291 ts->val = val; 1292 val_ptr = &ts->val; 1293 } 1294 g_hash_table_insert(h, val_ptr, ts); 1295 } 1296 1297 return ts; 1298 } 1299 1300 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val) 1301 { 1302 val = dup_const(vece, val); 1303 return temp_tcgv_vec(tcg_constant_internal(type, val)); 1304 } 1305 1306 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val) 1307 { 1308 TCGTemp *t = tcgv_vec_temp(match); 1309 1310 tcg_debug_assert(t->temp_allocated != 0); 1311 return tcg_constant_vec(t->base_type, vece, val); 1312 } 1313 1314 TCGv_i32 tcg_const_i32(int32_t val) 1315 { 1316 TCGv_i32 t0; 1317 t0 = tcg_temp_new_i32(); 1318 tcg_gen_movi_i32(t0, val); 1319 return t0; 1320 } 1321 1322 TCGv_i64 tcg_const_i64(int64_t val) 1323 { 1324 TCGv_i64 t0; 1325 t0 = tcg_temp_new_i64(); 1326 tcg_gen_movi_i64(t0, val); 1327 return t0; 1328 } 1329 1330 TCGv_i32 tcg_const_local_i32(int32_t val) 1331 { 1332 TCGv_i32 t0; 1333 t0 = tcg_temp_local_new_i32(); 1334 tcg_gen_movi_i32(t0, val); 1335 return t0; 1336 } 1337 1338 TCGv_i64 tcg_const_local_i64(int64_t val) 1339 { 1340 TCGv_i64 t0; 1341 t0 = tcg_temp_local_new_i64(); 1342 tcg_gen_movi_i64(t0, val); 1343 return t0; 1344 } 1345 1346 #if defined(CONFIG_DEBUG_TCG) 1347 void tcg_clear_temp_count(void) 1348 { 1349 TCGContext *s = tcg_ctx; 1350 s->temps_in_use = 0; 1351 } 1352 1353 int tcg_check_temp_count(void) 1354 { 1355 TCGContext *s = tcg_ctx; 1356 if (s->temps_in_use) { 1357 /* Clear the count so that we don't give another 1358 * warning immediately next time around. 1359 */ 1360 s->temps_in_use = 0; 1361 return 1; 1362 } 1363 return 0; 1364 } 1365 #endif 1366 1367 /* Return true if OP may appear in the opcode stream. 1368 Test the runtime variable that controls each opcode. */ 1369 bool tcg_op_supported(TCGOpcode op) 1370 { 1371 const bool have_vec 1372 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256; 1373 1374 switch (op) { 1375 case INDEX_op_discard: 1376 case INDEX_op_set_label: 1377 case INDEX_op_call: 1378 case INDEX_op_br: 1379 case INDEX_op_mb: 1380 case INDEX_op_insn_start: 1381 case INDEX_op_exit_tb: 1382 case INDEX_op_goto_tb: 1383 case INDEX_op_goto_ptr: 1384 case INDEX_op_qemu_ld_i32: 1385 case INDEX_op_qemu_st_i32: 1386 case INDEX_op_qemu_ld_i64: 1387 case INDEX_op_qemu_st_i64: 1388 return true; 1389 1390 case INDEX_op_qemu_st8_i32: 1391 return TCG_TARGET_HAS_qemu_st8_i32; 1392 1393 case INDEX_op_mov_i32: 1394 case INDEX_op_setcond_i32: 1395 case INDEX_op_brcond_i32: 1396 case INDEX_op_ld8u_i32: 1397 case INDEX_op_ld8s_i32: 1398 case INDEX_op_ld16u_i32: 1399 case INDEX_op_ld16s_i32: 1400 case INDEX_op_ld_i32: 1401 case INDEX_op_st8_i32: 1402 case INDEX_op_st16_i32: 1403 case INDEX_op_st_i32: 1404 case INDEX_op_add_i32: 1405 case INDEX_op_sub_i32: 1406 case INDEX_op_mul_i32: 1407 case INDEX_op_and_i32: 1408 case INDEX_op_or_i32: 1409 case INDEX_op_xor_i32: 1410 case INDEX_op_shl_i32: 1411 case INDEX_op_shr_i32: 1412 case INDEX_op_sar_i32: 1413 return true; 1414 1415 case INDEX_op_movcond_i32: 1416 return TCG_TARGET_HAS_movcond_i32; 1417 case INDEX_op_div_i32: 1418 case INDEX_op_divu_i32: 1419 return TCG_TARGET_HAS_div_i32; 1420 case INDEX_op_rem_i32: 1421 case INDEX_op_remu_i32: 1422 return TCG_TARGET_HAS_rem_i32; 1423 case INDEX_op_div2_i32: 1424 case INDEX_op_divu2_i32: 1425 return TCG_TARGET_HAS_div2_i32; 1426 case INDEX_op_rotl_i32: 1427 case INDEX_op_rotr_i32: 1428 return TCG_TARGET_HAS_rot_i32; 1429 case INDEX_op_deposit_i32: 1430 return TCG_TARGET_HAS_deposit_i32; 1431 case INDEX_op_extract_i32: 1432 return TCG_TARGET_HAS_extract_i32; 1433 case INDEX_op_sextract_i32: 1434 return TCG_TARGET_HAS_sextract_i32; 1435 case INDEX_op_extract2_i32: 1436 return TCG_TARGET_HAS_extract2_i32; 1437 case INDEX_op_add2_i32: 1438 return TCG_TARGET_HAS_add2_i32; 1439 case INDEX_op_sub2_i32: 1440 return TCG_TARGET_HAS_sub2_i32; 1441 case INDEX_op_mulu2_i32: 1442 return TCG_TARGET_HAS_mulu2_i32; 1443 case INDEX_op_muls2_i32: 1444 return TCG_TARGET_HAS_muls2_i32; 1445 case INDEX_op_muluh_i32: 1446 return TCG_TARGET_HAS_muluh_i32; 1447 case INDEX_op_mulsh_i32: 1448 return TCG_TARGET_HAS_mulsh_i32; 1449 case INDEX_op_ext8s_i32: 1450 return TCG_TARGET_HAS_ext8s_i32; 1451 case INDEX_op_ext16s_i32: 1452 return TCG_TARGET_HAS_ext16s_i32; 1453 case INDEX_op_ext8u_i32: 1454 return TCG_TARGET_HAS_ext8u_i32; 1455 case INDEX_op_ext16u_i32: 1456 return TCG_TARGET_HAS_ext16u_i32; 1457 case INDEX_op_bswap16_i32: 1458 return TCG_TARGET_HAS_bswap16_i32; 1459 case INDEX_op_bswap32_i32: 1460 return TCG_TARGET_HAS_bswap32_i32; 1461 case INDEX_op_not_i32: 1462 return TCG_TARGET_HAS_not_i32; 1463 case INDEX_op_neg_i32: 1464 return TCG_TARGET_HAS_neg_i32; 1465 case INDEX_op_andc_i32: 1466 return TCG_TARGET_HAS_andc_i32; 1467 case INDEX_op_orc_i32: 1468 return TCG_TARGET_HAS_orc_i32; 1469 case INDEX_op_eqv_i32: 1470 return TCG_TARGET_HAS_eqv_i32; 1471 case INDEX_op_nand_i32: 1472 return TCG_TARGET_HAS_nand_i32; 1473 case INDEX_op_nor_i32: 1474 return TCG_TARGET_HAS_nor_i32; 1475 case INDEX_op_clz_i32: 1476 return TCG_TARGET_HAS_clz_i32; 1477 case INDEX_op_ctz_i32: 1478 return TCG_TARGET_HAS_ctz_i32; 1479 case INDEX_op_ctpop_i32: 1480 return TCG_TARGET_HAS_ctpop_i32; 1481 1482 case INDEX_op_brcond2_i32: 1483 case INDEX_op_setcond2_i32: 1484 return TCG_TARGET_REG_BITS == 32; 1485 1486 case INDEX_op_mov_i64: 1487 case INDEX_op_setcond_i64: 1488 case INDEX_op_brcond_i64: 1489 case INDEX_op_ld8u_i64: 1490 case INDEX_op_ld8s_i64: 1491 case INDEX_op_ld16u_i64: 1492 case INDEX_op_ld16s_i64: 1493 case INDEX_op_ld32u_i64: 1494 case INDEX_op_ld32s_i64: 1495 case INDEX_op_ld_i64: 1496 case INDEX_op_st8_i64: 1497 case INDEX_op_st16_i64: 1498 case INDEX_op_st32_i64: 1499 case INDEX_op_st_i64: 1500 case INDEX_op_add_i64: 1501 case INDEX_op_sub_i64: 1502 case INDEX_op_mul_i64: 1503 case INDEX_op_and_i64: 1504 case INDEX_op_or_i64: 1505 case INDEX_op_xor_i64: 1506 case INDEX_op_shl_i64: 1507 case INDEX_op_shr_i64: 1508 case INDEX_op_sar_i64: 1509 case INDEX_op_ext_i32_i64: 1510 case INDEX_op_extu_i32_i64: 1511 return TCG_TARGET_REG_BITS == 64; 1512 1513 case INDEX_op_movcond_i64: 1514 return TCG_TARGET_HAS_movcond_i64; 1515 case INDEX_op_div_i64: 1516 case INDEX_op_divu_i64: 1517 return TCG_TARGET_HAS_div_i64; 1518 case INDEX_op_rem_i64: 1519 case INDEX_op_remu_i64: 1520 return TCG_TARGET_HAS_rem_i64; 1521 case INDEX_op_div2_i64: 1522 case INDEX_op_divu2_i64: 1523 return TCG_TARGET_HAS_div2_i64; 1524 case INDEX_op_rotl_i64: 1525 case INDEX_op_rotr_i64: 1526 return TCG_TARGET_HAS_rot_i64; 1527 case INDEX_op_deposit_i64: 1528 return TCG_TARGET_HAS_deposit_i64; 1529 case INDEX_op_extract_i64: 1530 return TCG_TARGET_HAS_extract_i64; 1531 case INDEX_op_sextract_i64: 1532 return TCG_TARGET_HAS_sextract_i64; 1533 case INDEX_op_extract2_i64: 1534 return TCG_TARGET_HAS_extract2_i64; 1535 case INDEX_op_extrl_i64_i32: 1536 return TCG_TARGET_HAS_extrl_i64_i32; 1537 case INDEX_op_extrh_i64_i32: 1538 return TCG_TARGET_HAS_extrh_i64_i32; 1539 case INDEX_op_ext8s_i64: 1540 return TCG_TARGET_HAS_ext8s_i64; 1541 case INDEX_op_ext16s_i64: 1542 return TCG_TARGET_HAS_ext16s_i64; 1543 case INDEX_op_ext32s_i64: 1544 return TCG_TARGET_HAS_ext32s_i64; 1545 case INDEX_op_ext8u_i64: 1546 return TCG_TARGET_HAS_ext8u_i64; 1547 case INDEX_op_ext16u_i64: 1548 return TCG_TARGET_HAS_ext16u_i64; 1549 case INDEX_op_ext32u_i64: 1550 return TCG_TARGET_HAS_ext32u_i64; 1551 case INDEX_op_bswap16_i64: 1552 return TCG_TARGET_HAS_bswap16_i64; 1553 case INDEX_op_bswap32_i64: 1554 return TCG_TARGET_HAS_bswap32_i64; 1555 case INDEX_op_bswap64_i64: 1556 return TCG_TARGET_HAS_bswap64_i64; 1557 case INDEX_op_not_i64: 1558 return TCG_TARGET_HAS_not_i64; 1559 case INDEX_op_neg_i64: 1560 return TCG_TARGET_HAS_neg_i64; 1561 case INDEX_op_andc_i64: 1562 return TCG_TARGET_HAS_andc_i64; 1563 case INDEX_op_orc_i64: 1564 return TCG_TARGET_HAS_orc_i64; 1565 case INDEX_op_eqv_i64: 1566 return TCG_TARGET_HAS_eqv_i64; 1567 case INDEX_op_nand_i64: 1568 return TCG_TARGET_HAS_nand_i64; 1569 case INDEX_op_nor_i64: 1570 return TCG_TARGET_HAS_nor_i64; 1571 case INDEX_op_clz_i64: 1572 return TCG_TARGET_HAS_clz_i64; 1573 case INDEX_op_ctz_i64: 1574 return TCG_TARGET_HAS_ctz_i64; 1575 case INDEX_op_ctpop_i64: 1576 return TCG_TARGET_HAS_ctpop_i64; 1577 case INDEX_op_add2_i64: 1578 return TCG_TARGET_HAS_add2_i64; 1579 case INDEX_op_sub2_i64: 1580 return TCG_TARGET_HAS_sub2_i64; 1581 case INDEX_op_mulu2_i64: 1582 return TCG_TARGET_HAS_mulu2_i64; 1583 case INDEX_op_muls2_i64: 1584 return TCG_TARGET_HAS_muls2_i64; 1585 case INDEX_op_muluh_i64: 1586 return TCG_TARGET_HAS_muluh_i64; 1587 case INDEX_op_mulsh_i64: 1588 return TCG_TARGET_HAS_mulsh_i64; 1589 1590 case INDEX_op_mov_vec: 1591 case INDEX_op_dup_vec: 1592 case INDEX_op_dupm_vec: 1593 case INDEX_op_ld_vec: 1594 case INDEX_op_st_vec: 1595 case INDEX_op_add_vec: 1596 case INDEX_op_sub_vec: 1597 case INDEX_op_and_vec: 1598 case INDEX_op_or_vec: 1599 case INDEX_op_xor_vec: 1600 case INDEX_op_cmp_vec: 1601 return have_vec; 1602 case INDEX_op_dup2_vec: 1603 return have_vec && TCG_TARGET_REG_BITS == 32; 1604 case INDEX_op_not_vec: 1605 return have_vec && TCG_TARGET_HAS_not_vec; 1606 case INDEX_op_neg_vec: 1607 return have_vec && TCG_TARGET_HAS_neg_vec; 1608 case INDEX_op_abs_vec: 1609 return have_vec && TCG_TARGET_HAS_abs_vec; 1610 case INDEX_op_andc_vec: 1611 return have_vec && TCG_TARGET_HAS_andc_vec; 1612 case INDEX_op_orc_vec: 1613 return have_vec && TCG_TARGET_HAS_orc_vec; 1614 case INDEX_op_nand_vec: 1615 return have_vec && TCG_TARGET_HAS_nand_vec; 1616 case INDEX_op_nor_vec: 1617 return have_vec && TCG_TARGET_HAS_nor_vec; 1618 case INDEX_op_eqv_vec: 1619 return have_vec && TCG_TARGET_HAS_eqv_vec; 1620 case INDEX_op_mul_vec: 1621 return have_vec && TCG_TARGET_HAS_mul_vec; 1622 case INDEX_op_shli_vec: 1623 case INDEX_op_shri_vec: 1624 case INDEX_op_sari_vec: 1625 return have_vec && TCG_TARGET_HAS_shi_vec; 1626 case INDEX_op_shls_vec: 1627 case INDEX_op_shrs_vec: 1628 case INDEX_op_sars_vec: 1629 return have_vec && TCG_TARGET_HAS_shs_vec; 1630 case INDEX_op_shlv_vec: 1631 case INDEX_op_shrv_vec: 1632 case INDEX_op_sarv_vec: 1633 return have_vec && TCG_TARGET_HAS_shv_vec; 1634 case INDEX_op_rotli_vec: 1635 return have_vec && TCG_TARGET_HAS_roti_vec; 1636 case INDEX_op_rotls_vec: 1637 return have_vec && TCG_TARGET_HAS_rots_vec; 1638 case INDEX_op_rotlv_vec: 1639 case INDEX_op_rotrv_vec: 1640 return have_vec && TCG_TARGET_HAS_rotv_vec; 1641 case INDEX_op_ssadd_vec: 1642 case INDEX_op_usadd_vec: 1643 case INDEX_op_sssub_vec: 1644 case INDEX_op_ussub_vec: 1645 return have_vec && TCG_TARGET_HAS_sat_vec; 1646 case INDEX_op_smin_vec: 1647 case INDEX_op_umin_vec: 1648 case INDEX_op_smax_vec: 1649 case INDEX_op_umax_vec: 1650 return have_vec && TCG_TARGET_HAS_minmax_vec; 1651 case INDEX_op_bitsel_vec: 1652 return have_vec && TCG_TARGET_HAS_bitsel_vec; 1653 case INDEX_op_cmpsel_vec: 1654 return have_vec && TCG_TARGET_HAS_cmpsel_vec; 1655 1656 default: 1657 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS); 1658 return true; 1659 } 1660 } 1661 1662 static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs); 1663 1664 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) 1665 { 1666 const TCGHelperInfo *info; 1667 TCGv_i64 extend_free[MAX_CALL_IARGS]; 1668 int n_extend = 0; 1669 TCGOp *op; 1670 int i, n, pi = 0, total_args; 1671 1672 info = g_hash_table_lookup(helper_table, (gpointer)func); 1673 total_args = info->nr_out + info->nr_in + 2; 1674 op = tcg_op_alloc(INDEX_op_call, total_args); 1675 1676 #ifdef CONFIG_PLUGIN 1677 /* detect non-plugin helpers */ 1678 if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) { 1679 tcg_ctx->plugin_insn->calls_helpers = true; 1680 } 1681 #endif 1682 1683 TCGOP_CALLO(op) = n = info->nr_out; 1684 switch (n) { 1685 case 0: 1686 tcg_debug_assert(ret == NULL); 1687 break; 1688 case 1: 1689 tcg_debug_assert(ret != NULL); 1690 op->args[pi++] = temp_arg(ret); 1691 break; 1692 case 2: 1693 tcg_debug_assert(ret != NULL); 1694 tcg_debug_assert(ret->base_type == ret->type + 1); 1695 tcg_debug_assert(ret->temp_subindex == 0); 1696 op->args[pi++] = temp_arg(ret); 1697 op->args[pi++] = temp_arg(ret + 1); 1698 break; 1699 default: 1700 g_assert_not_reached(); 1701 } 1702 1703 TCGOP_CALLI(op) = n = info->nr_in; 1704 for (i = 0; i < n; i++) { 1705 const TCGCallArgumentLoc *loc = &info->in[i]; 1706 TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex; 1707 1708 switch (loc->kind) { 1709 case TCG_CALL_ARG_NORMAL: 1710 op->args[pi++] = temp_arg(ts); 1711 break; 1712 1713 case TCG_CALL_ARG_EXTEND_U: 1714 case TCG_CALL_ARG_EXTEND_S: 1715 { 1716 TCGv_i64 temp = tcg_temp_new_i64(); 1717 TCGv_i32 orig = temp_tcgv_i32(ts); 1718 1719 if (loc->kind == TCG_CALL_ARG_EXTEND_S) { 1720 tcg_gen_ext_i32_i64(temp, orig); 1721 } else { 1722 tcg_gen_extu_i32_i64(temp, orig); 1723 } 1724 op->args[pi++] = tcgv_i64_arg(temp); 1725 extend_free[n_extend++] = temp; 1726 } 1727 break; 1728 1729 default: 1730 g_assert_not_reached(); 1731 } 1732 } 1733 op->args[pi++] = (uintptr_t)func; 1734 op->args[pi++] = (uintptr_t)info; 1735 tcg_debug_assert(pi == total_args); 1736 1737 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); 1738 1739 tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free)); 1740 for (i = 0; i < n_extend; ++i) { 1741 tcg_temp_free_i64(extend_free[i]); 1742 } 1743 } 1744 1745 static void tcg_reg_alloc_start(TCGContext *s) 1746 { 1747 int i, n; 1748 1749 for (i = 0, n = s->nb_temps; i < n; i++) { 1750 TCGTemp *ts = &s->temps[i]; 1751 TCGTempVal val = TEMP_VAL_MEM; 1752 1753 switch (ts->kind) { 1754 case TEMP_CONST: 1755 val = TEMP_VAL_CONST; 1756 break; 1757 case TEMP_FIXED: 1758 val = TEMP_VAL_REG; 1759 break; 1760 case TEMP_GLOBAL: 1761 break; 1762 case TEMP_NORMAL: 1763 case TEMP_EBB: 1764 val = TEMP_VAL_DEAD; 1765 /* fall through */ 1766 case TEMP_LOCAL: 1767 ts->mem_allocated = 0; 1768 break; 1769 default: 1770 g_assert_not_reached(); 1771 } 1772 ts->val_type = val; 1773 } 1774 1775 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp)); 1776 } 1777 1778 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, 1779 TCGTemp *ts) 1780 { 1781 int idx = temp_idx(ts); 1782 1783 switch (ts->kind) { 1784 case TEMP_FIXED: 1785 case TEMP_GLOBAL: 1786 pstrcpy(buf, buf_size, ts->name); 1787 break; 1788 case TEMP_LOCAL: 1789 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); 1790 break; 1791 case TEMP_EBB: 1792 snprintf(buf, buf_size, "ebb%d", idx - s->nb_globals); 1793 break; 1794 case TEMP_NORMAL: 1795 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); 1796 break; 1797 case TEMP_CONST: 1798 switch (ts->type) { 1799 case TCG_TYPE_I32: 1800 snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val); 1801 break; 1802 #if TCG_TARGET_REG_BITS > 32 1803 case TCG_TYPE_I64: 1804 snprintf(buf, buf_size, "$0x%" PRIx64, ts->val); 1805 break; 1806 #endif 1807 case TCG_TYPE_V64: 1808 case TCG_TYPE_V128: 1809 case TCG_TYPE_V256: 1810 snprintf(buf, buf_size, "v%d$0x%" PRIx64, 1811 64 << (ts->type - TCG_TYPE_V64), ts->val); 1812 break; 1813 default: 1814 g_assert_not_reached(); 1815 } 1816 break; 1817 } 1818 return buf; 1819 } 1820 1821 static char *tcg_get_arg_str(TCGContext *s, char *buf, 1822 int buf_size, TCGArg arg) 1823 { 1824 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg)); 1825 } 1826 1827 static const char * const cond_name[] = 1828 { 1829 [TCG_COND_NEVER] = "never", 1830 [TCG_COND_ALWAYS] = "always", 1831 [TCG_COND_EQ] = "eq", 1832 [TCG_COND_NE] = "ne", 1833 [TCG_COND_LT] = "lt", 1834 [TCG_COND_GE] = "ge", 1835 [TCG_COND_LE] = "le", 1836 [TCG_COND_GT] = "gt", 1837 [TCG_COND_LTU] = "ltu", 1838 [TCG_COND_GEU] = "geu", 1839 [TCG_COND_LEU] = "leu", 1840 [TCG_COND_GTU] = "gtu" 1841 }; 1842 1843 static const char * const ldst_name[] = 1844 { 1845 [MO_UB] = "ub", 1846 [MO_SB] = "sb", 1847 [MO_LEUW] = "leuw", 1848 [MO_LESW] = "lesw", 1849 [MO_LEUL] = "leul", 1850 [MO_LESL] = "lesl", 1851 [MO_LEUQ] = "leq", 1852 [MO_BEUW] = "beuw", 1853 [MO_BESW] = "besw", 1854 [MO_BEUL] = "beul", 1855 [MO_BESL] = "besl", 1856 [MO_BEUQ] = "beq", 1857 }; 1858 1859 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { 1860 #ifdef TARGET_ALIGNED_ONLY 1861 [MO_UNALN >> MO_ASHIFT] = "un+", 1862 [MO_ALIGN >> MO_ASHIFT] = "", 1863 #else 1864 [MO_UNALN >> MO_ASHIFT] = "", 1865 [MO_ALIGN >> MO_ASHIFT] = "al+", 1866 #endif 1867 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+", 1868 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+", 1869 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+", 1870 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+", 1871 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+", 1872 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+", 1873 }; 1874 1875 static const char bswap_flag_name[][6] = { 1876 [TCG_BSWAP_IZ] = "iz", 1877 [TCG_BSWAP_OZ] = "oz", 1878 [TCG_BSWAP_OS] = "os", 1879 [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz", 1880 [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os", 1881 }; 1882 1883 static inline bool tcg_regset_single(TCGRegSet d) 1884 { 1885 return (d & (d - 1)) == 0; 1886 } 1887 1888 static inline TCGReg tcg_regset_first(TCGRegSet d) 1889 { 1890 if (TCG_TARGET_NB_REGS <= 32) { 1891 return ctz32(d); 1892 } else { 1893 return ctz64(d); 1894 } 1895 } 1896 1897 /* Return only the number of characters output -- no error return. */ 1898 #define ne_fprintf(...) \ 1899 ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; }) 1900 1901 static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs) 1902 { 1903 char buf[128]; 1904 TCGOp *op; 1905 1906 QTAILQ_FOREACH(op, &s->ops, link) { 1907 int i, k, nb_oargs, nb_iargs, nb_cargs; 1908 const TCGOpDef *def; 1909 TCGOpcode c; 1910 int col = 0; 1911 1912 c = op->opc; 1913 def = &tcg_op_defs[c]; 1914 1915 if (c == INDEX_op_insn_start) { 1916 nb_oargs = 0; 1917 col += ne_fprintf(f, "\n ----"); 1918 1919 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { 1920 target_ulong a; 1921 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS 1922 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); 1923 #else 1924 a = op->args[i]; 1925 #endif 1926 col += ne_fprintf(f, " " TARGET_FMT_lx, a); 1927 } 1928 } else if (c == INDEX_op_call) { 1929 const TCGHelperInfo *info = tcg_call_info(op); 1930 void *func = tcg_call_func(op); 1931 1932 /* variable number of arguments */ 1933 nb_oargs = TCGOP_CALLO(op); 1934 nb_iargs = TCGOP_CALLI(op); 1935 nb_cargs = def->nb_cargs; 1936 1937 col += ne_fprintf(f, " %s ", def->name); 1938 1939 /* 1940 * Print the function name from TCGHelperInfo, if available. 1941 * Note that plugins have a template function for the info, 1942 * but the actual function pointer comes from the plugin. 1943 */ 1944 if (func == info->func) { 1945 col += ne_fprintf(f, "%s", info->name); 1946 } else { 1947 col += ne_fprintf(f, "plugin(%p)", func); 1948 } 1949 1950 col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs); 1951 for (i = 0; i < nb_oargs; i++) { 1952 col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf), 1953 op->args[i])); 1954 } 1955 for (i = 0; i < nb_iargs; i++) { 1956 TCGArg arg = op->args[nb_oargs + i]; 1957 const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg); 1958 col += ne_fprintf(f, ",%s", t); 1959 } 1960 } else { 1961 col += ne_fprintf(f, " %s ", def->name); 1962 1963 nb_oargs = def->nb_oargs; 1964 nb_iargs = def->nb_iargs; 1965 nb_cargs = def->nb_cargs; 1966 1967 if (def->flags & TCG_OPF_VECTOR) { 1968 col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op), 1969 8 << TCGOP_VECE(op)); 1970 } 1971 1972 k = 0; 1973 for (i = 0; i < nb_oargs; i++) { 1974 const char *sep = k ? "," : ""; 1975 col += ne_fprintf(f, "%s%s", sep, 1976 tcg_get_arg_str(s, buf, sizeof(buf), 1977 op->args[k++])); 1978 } 1979 for (i = 0; i < nb_iargs; i++) { 1980 const char *sep = k ? "," : ""; 1981 col += ne_fprintf(f, "%s%s", sep, 1982 tcg_get_arg_str(s, buf, sizeof(buf), 1983 op->args[k++])); 1984 } 1985 switch (c) { 1986 case INDEX_op_brcond_i32: 1987 case INDEX_op_setcond_i32: 1988 case INDEX_op_movcond_i32: 1989 case INDEX_op_brcond2_i32: 1990 case INDEX_op_setcond2_i32: 1991 case INDEX_op_brcond_i64: 1992 case INDEX_op_setcond_i64: 1993 case INDEX_op_movcond_i64: 1994 case INDEX_op_cmp_vec: 1995 case INDEX_op_cmpsel_vec: 1996 if (op->args[k] < ARRAY_SIZE(cond_name) 1997 && cond_name[op->args[k]]) { 1998 col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]); 1999 } else { 2000 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]); 2001 } 2002 i = 1; 2003 break; 2004 case INDEX_op_qemu_ld_i32: 2005 case INDEX_op_qemu_st_i32: 2006 case INDEX_op_qemu_st8_i32: 2007 case INDEX_op_qemu_ld_i64: 2008 case INDEX_op_qemu_st_i64: 2009 { 2010 MemOpIdx oi = op->args[k++]; 2011 MemOp op = get_memop(oi); 2012 unsigned ix = get_mmuidx(oi); 2013 2014 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { 2015 col += ne_fprintf(f, ",$0x%x,%u", op, ix); 2016 } else { 2017 const char *s_al, *s_op; 2018 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; 2019 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; 2020 col += ne_fprintf(f, ",%s%s,%u", s_al, s_op, ix); 2021 } 2022 i = 1; 2023 } 2024 break; 2025 case INDEX_op_bswap16_i32: 2026 case INDEX_op_bswap16_i64: 2027 case INDEX_op_bswap32_i32: 2028 case INDEX_op_bswap32_i64: 2029 case INDEX_op_bswap64_i64: 2030 { 2031 TCGArg flags = op->args[k]; 2032 const char *name = NULL; 2033 2034 if (flags < ARRAY_SIZE(bswap_flag_name)) { 2035 name = bswap_flag_name[flags]; 2036 } 2037 if (name) { 2038 col += ne_fprintf(f, ",%s", name); 2039 } else { 2040 col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags); 2041 } 2042 i = k = 1; 2043 } 2044 break; 2045 default: 2046 i = 0; 2047 break; 2048 } 2049 switch (c) { 2050 case INDEX_op_set_label: 2051 case INDEX_op_br: 2052 case INDEX_op_brcond_i32: 2053 case INDEX_op_brcond_i64: 2054 case INDEX_op_brcond2_i32: 2055 col += ne_fprintf(f, "%s$L%d", k ? "," : "", 2056 arg_label(op->args[k])->id); 2057 i++, k++; 2058 break; 2059 default: 2060 break; 2061 } 2062 for (; i < nb_cargs; i++, k++) { 2063 col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "", 2064 op->args[k]); 2065 } 2066 } 2067 2068 if (have_prefs || op->life) { 2069 for (; col < 40; ++col) { 2070 putc(' ', f); 2071 } 2072 } 2073 2074 if (op->life) { 2075 unsigned life = op->life; 2076 2077 if (life & (SYNC_ARG * 3)) { 2078 ne_fprintf(f, " sync:"); 2079 for (i = 0; i < 2; ++i) { 2080 if (life & (SYNC_ARG << i)) { 2081 ne_fprintf(f, " %d", i); 2082 } 2083 } 2084 } 2085 life /= DEAD_ARG; 2086 if (life) { 2087 ne_fprintf(f, " dead:"); 2088 for (i = 0; life; ++i, life >>= 1) { 2089 if (life & 1) { 2090 ne_fprintf(f, " %d", i); 2091 } 2092 } 2093 } 2094 } 2095 2096 if (have_prefs) { 2097 for (i = 0; i < nb_oargs; ++i) { 2098 TCGRegSet set = output_pref(op, i); 2099 2100 if (i == 0) { 2101 ne_fprintf(f, " pref="); 2102 } else { 2103 ne_fprintf(f, ","); 2104 } 2105 if (set == 0) { 2106 ne_fprintf(f, "none"); 2107 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) { 2108 ne_fprintf(f, "all"); 2109 #ifdef CONFIG_DEBUG_TCG 2110 } else if (tcg_regset_single(set)) { 2111 TCGReg reg = tcg_regset_first(set); 2112 ne_fprintf(f, "%s", tcg_target_reg_names[reg]); 2113 #endif 2114 } else if (TCG_TARGET_NB_REGS <= 32) { 2115 ne_fprintf(f, "0x%x", (uint32_t)set); 2116 } else { 2117 ne_fprintf(f, "0x%" PRIx64, (uint64_t)set); 2118 } 2119 } 2120 } 2121 2122 putc('\n', f); 2123 } 2124 } 2125 2126 /* we give more priority to constraints with less registers */ 2127 static int get_constraint_priority(const TCGOpDef *def, int k) 2128 { 2129 const TCGArgConstraint *arg_ct = &def->args_ct[k]; 2130 int n = ctpop64(arg_ct->regs); 2131 2132 /* 2133 * Sort constraints of a single register first, which includes output 2134 * aliases (which must exactly match the input already allocated). 2135 */ 2136 if (n == 1 || arg_ct->oalias) { 2137 return INT_MAX; 2138 } 2139 2140 /* 2141 * Sort register pairs next, first then second immediately after. 2142 * Arbitrarily sort multiple pairs by the index of the first reg; 2143 * there shouldn't be many pairs. 2144 */ 2145 switch (arg_ct->pair) { 2146 case 1: 2147 case 3: 2148 return (k + 1) * 2; 2149 case 2: 2150 return (arg_ct->pair_index + 1) * 2 - 1; 2151 } 2152 2153 /* Finally, sort by decreasing register count. */ 2154 assert(n > 1); 2155 return -n; 2156 } 2157 2158 /* sort from highest priority to lowest */ 2159 static void sort_constraints(TCGOpDef *def, int start, int n) 2160 { 2161 int i, j; 2162 TCGArgConstraint *a = def->args_ct; 2163 2164 for (i = 0; i < n; i++) { 2165 a[start + i].sort_index = start + i; 2166 } 2167 if (n <= 1) { 2168 return; 2169 } 2170 for (i = 0; i < n - 1; i++) { 2171 for (j = i + 1; j < n; j++) { 2172 int p1 = get_constraint_priority(def, a[start + i].sort_index); 2173 int p2 = get_constraint_priority(def, a[start + j].sort_index); 2174 if (p1 < p2) { 2175 int tmp = a[start + i].sort_index; 2176 a[start + i].sort_index = a[start + j].sort_index; 2177 a[start + j].sort_index = tmp; 2178 } 2179 } 2180 } 2181 } 2182 2183 static void process_op_defs(TCGContext *s) 2184 { 2185 TCGOpcode op; 2186 2187 for (op = 0; op < NB_OPS; op++) { 2188 TCGOpDef *def = &tcg_op_defs[op]; 2189 const TCGTargetOpDef *tdefs; 2190 bool saw_alias_pair = false; 2191 int i, o, i2, o2, nb_args; 2192 2193 if (def->flags & TCG_OPF_NOT_PRESENT) { 2194 continue; 2195 } 2196 2197 nb_args = def->nb_iargs + def->nb_oargs; 2198 if (nb_args == 0) { 2199 continue; 2200 } 2201 2202 /* 2203 * Macro magic should make it impossible, but double-check that 2204 * the array index is in range. Since the signness of an enum 2205 * is implementation defined, force the result to unsigned. 2206 */ 2207 unsigned con_set = tcg_target_op_def(op); 2208 tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets)); 2209 tdefs = &constraint_sets[con_set]; 2210 2211 for (i = 0; i < nb_args; i++) { 2212 const char *ct_str = tdefs->args_ct_str[i]; 2213 bool input_p = i >= def->nb_oargs; 2214 2215 /* Incomplete TCGTargetOpDef entry. */ 2216 tcg_debug_assert(ct_str != NULL); 2217 2218 switch (*ct_str) { 2219 case '0' ... '9': 2220 o = *ct_str - '0'; 2221 tcg_debug_assert(input_p); 2222 tcg_debug_assert(o < def->nb_oargs); 2223 tcg_debug_assert(def->args_ct[o].regs != 0); 2224 tcg_debug_assert(!def->args_ct[o].oalias); 2225 def->args_ct[i] = def->args_ct[o]; 2226 /* The output sets oalias. */ 2227 def->args_ct[o].oalias = 1; 2228 def->args_ct[o].alias_index = i; 2229 /* The input sets ialias. */ 2230 def->args_ct[i].ialias = 1; 2231 def->args_ct[i].alias_index = o; 2232 if (def->args_ct[i].pair) { 2233 saw_alias_pair = true; 2234 } 2235 tcg_debug_assert(ct_str[1] == '\0'); 2236 continue; 2237 2238 case '&': 2239 tcg_debug_assert(!input_p); 2240 def->args_ct[i].newreg = true; 2241 ct_str++; 2242 break; 2243 2244 case 'p': /* plus */ 2245 /* Allocate to the register after the previous. */ 2246 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0)); 2247 o = i - 1; 2248 tcg_debug_assert(!def->args_ct[o].pair); 2249 tcg_debug_assert(!def->args_ct[o].ct); 2250 def->args_ct[i] = (TCGArgConstraint){ 2251 .pair = 2, 2252 .pair_index = o, 2253 .regs = def->args_ct[o].regs << 1, 2254 }; 2255 def->args_ct[o].pair = 1; 2256 def->args_ct[o].pair_index = i; 2257 tcg_debug_assert(ct_str[1] == '\0'); 2258 continue; 2259 2260 case 'm': /* minus */ 2261 /* Allocate to the register before the previous. */ 2262 tcg_debug_assert(i > (input_p ? def->nb_oargs : 0)); 2263 o = i - 1; 2264 tcg_debug_assert(!def->args_ct[o].pair); 2265 tcg_debug_assert(!def->args_ct[o].ct); 2266 def->args_ct[i] = (TCGArgConstraint){ 2267 .pair = 1, 2268 .pair_index = o, 2269 .regs = def->args_ct[o].regs >> 1, 2270 }; 2271 def->args_ct[o].pair = 2; 2272 def->args_ct[o].pair_index = i; 2273 tcg_debug_assert(ct_str[1] == '\0'); 2274 continue; 2275 } 2276 2277 do { 2278 switch (*ct_str) { 2279 case 'i': 2280 def->args_ct[i].ct |= TCG_CT_CONST; 2281 break; 2282 2283 /* Include all of the target-specific constraints. */ 2284 2285 #undef CONST 2286 #define CONST(CASE, MASK) \ 2287 case CASE: def->args_ct[i].ct |= MASK; break; 2288 #define REGS(CASE, MASK) \ 2289 case CASE: def->args_ct[i].regs |= MASK; break; 2290 2291 #include "tcg-target-con-str.h" 2292 2293 #undef REGS 2294 #undef CONST 2295 default: 2296 case '0' ... '9': 2297 case '&': 2298 case 'p': 2299 case 'm': 2300 /* Typo in TCGTargetOpDef constraint. */ 2301 g_assert_not_reached(); 2302 } 2303 } while (*++ct_str != '\0'); 2304 } 2305 2306 /* TCGTargetOpDef entry with too much information? */ 2307 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); 2308 2309 /* 2310 * Fix up output pairs that are aliased with inputs. 2311 * When we created the alias, we copied pair from the output. 2312 * There are three cases: 2313 * (1a) Pairs of inputs alias pairs of outputs. 2314 * (1b) One input aliases the first of a pair of outputs. 2315 * (2) One input aliases the second of a pair of outputs. 2316 * 2317 * Case 1a is handled by making sure that the pair_index'es are 2318 * properly updated so that they appear the same as a pair of inputs. 2319 * 2320 * Case 1b is handled by setting the pair_index of the input to 2321 * itself, simply so it doesn't point to an unrelated argument. 2322 * Since we don't encounter the "second" during the input allocation 2323 * phase, nothing happens with the second half of the input pair. 2324 * 2325 * Case 2 is handled by setting the second input to pair=3, the 2326 * first output to pair=3, and the pair_index'es to match. 2327 */ 2328 if (saw_alias_pair) { 2329 for (i = def->nb_oargs; i < nb_args; i++) { 2330 /* 2331 * Since [0-9pm] must be alone in the constraint string, 2332 * the only way they can both be set is if the pair comes 2333 * from the output alias. 2334 */ 2335 if (!def->args_ct[i].ialias) { 2336 continue; 2337 } 2338 switch (def->args_ct[i].pair) { 2339 case 0: 2340 break; 2341 case 1: 2342 o = def->args_ct[i].alias_index; 2343 o2 = def->args_ct[o].pair_index; 2344 tcg_debug_assert(def->args_ct[o].pair == 1); 2345 tcg_debug_assert(def->args_ct[o2].pair == 2); 2346 if (def->args_ct[o2].oalias) { 2347 /* Case 1a */ 2348 i2 = def->args_ct[o2].alias_index; 2349 tcg_debug_assert(def->args_ct[i2].pair == 2); 2350 def->args_ct[i2].pair_index = i; 2351 def->args_ct[i].pair_index = i2; 2352 } else { 2353 /* Case 1b */ 2354 def->args_ct[i].pair_index = i; 2355 } 2356 break; 2357 case 2: 2358 o = def->args_ct[i].alias_index; 2359 o2 = def->args_ct[o].pair_index; 2360 tcg_debug_assert(def->args_ct[o].pair == 2); 2361 tcg_debug_assert(def->args_ct[o2].pair == 1); 2362 if (def->args_ct[o2].oalias) { 2363 /* Case 1a */ 2364 i2 = def->args_ct[o2].alias_index; 2365 tcg_debug_assert(def->args_ct[i2].pair == 1); 2366 def->args_ct[i2].pair_index = i; 2367 def->args_ct[i].pair_index = i2; 2368 } else { 2369 /* Case 2 */ 2370 def->args_ct[i].pair = 3; 2371 def->args_ct[o2].pair = 3; 2372 def->args_ct[i].pair_index = o2; 2373 def->args_ct[o2].pair_index = i; 2374 } 2375 break; 2376 default: 2377 g_assert_not_reached(); 2378 } 2379 } 2380 } 2381 2382 /* sort the constraints (XXX: this is just an heuristic) */ 2383 sort_constraints(def, 0, def->nb_oargs); 2384 sort_constraints(def, def->nb_oargs, def->nb_iargs); 2385 } 2386 } 2387 2388 void tcg_op_remove(TCGContext *s, TCGOp *op) 2389 { 2390 TCGLabel *label; 2391 2392 switch (op->opc) { 2393 case INDEX_op_br: 2394 label = arg_label(op->args[0]); 2395 label->refs--; 2396 break; 2397 case INDEX_op_brcond_i32: 2398 case INDEX_op_brcond_i64: 2399 label = arg_label(op->args[3]); 2400 label->refs--; 2401 break; 2402 case INDEX_op_brcond2_i32: 2403 label = arg_label(op->args[5]); 2404 label->refs--; 2405 break; 2406 default: 2407 break; 2408 } 2409 2410 QTAILQ_REMOVE(&s->ops, op, link); 2411 QTAILQ_INSERT_TAIL(&s->free_ops, op, link); 2412 s->nb_ops--; 2413 2414 #ifdef CONFIG_PROFILER 2415 qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1); 2416 #endif 2417 } 2418 2419 void tcg_remove_ops_after(TCGOp *op) 2420 { 2421 TCGContext *s = tcg_ctx; 2422 2423 while (true) { 2424 TCGOp *last = tcg_last_op(); 2425 if (last == op) { 2426 return; 2427 } 2428 tcg_op_remove(s, last); 2429 } 2430 } 2431 2432 static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs) 2433 { 2434 TCGContext *s = tcg_ctx; 2435 TCGOp *op = NULL; 2436 2437 if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) { 2438 QTAILQ_FOREACH(op, &s->free_ops, link) { 2439 if (nargs <= op->nargs) { 2440 QTAILQ_REMOVE(&s->free_ops, op, link); 2441 nargs = op->nargs; 2442 goto found; 2443 } 2444 } 2445 } 2446 2447 /* Most opcodes have 3 or 4 operands: reduce fragmentation. */ 2448 nargs = MAX(4, nargs); 2449 op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs); 2450 2451 found: 2452 memset(op, 0, offsetof(TCGOp, link)); 2453 op->opc = opc; 2454 op->nargs = nargs; 2455 2456 /* Check for bitfield overflow. */ 2457 tcg_debug_assert(op->nargs == nargs); 2458 2459 s->nb_ops++; 2460 return op; 2461 } 2462 2463 TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs) 2464 { 2465 TCGOp *op = tcg_op_alloc(opc, nargs); 2466 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); 2467 return op; 2468 } 2469 2470 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, 2471 TCGOpcode opc, unsigned nargs) 2472 { 2473 TCGOp *new_op = tcg_op_alloc(opc, nargs); 2474 QTAILQ_INSERT_BEFORE(old_op, new_op, link); 2475 return new_op; 2476 } 2477 2478 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, 2479 TCGOpcode opc, unsigned nargs) 2480 { 2481 TCGOp *new_op = tcg_op_alloc(opc, nargs); 2482 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link); 2483 return new_op; 2484 } 2485 2486 /* Reachable analysis : remove unreachable code. */ 2487 static void reachable_code_pass(TCGContext *s) 2488 { 2489 TCGOp *op, *op_next; 2490 bool dead = false; 2491 2492 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { 2493 bool remove = dead; 2494 TCGLabel *label; 2495 2496 switch (op->opc) { 2497 case INDEX_op_set_label: 2498 label = arg_label(op->args[0]); 2499 if (label->refs == 0) { 2500 /* 2501 * While there is an occasional backward branch, virtually 2502 * all branches generated by the translators are forward. 2503 * Which means that generally we will have already removed 2504 * all references to the label that will be, and there is 2505 * little to be gained by iterating. 2506 */ 2507 remove = true; 2508 } else { 2509 /* Once we see a label, insns become live again. */ 2510 dead = false; 2511 remove = false; 2512 2513 /* 2514 * Optimization can fold conditional branches to unconditional. 2515 * If we find a label with one reference which is preceded by 2516 * an unconditional branch to it, remove both. This needed to 2517 * wait until the dead code in between them was removed. 2518 */ 2519 if (label->refs == 1) { 2520 TCGOp *op_prev = QTAILQ_PREV(op, link); 2521 if (op_prev->opc == INDEX_op_br && 2522 label == arg_label(op_prev->args[0])) { 2523 tcg_op_remove(s, op_prev); 2524 remove = true; 2525 } 2526 } 2527 } 2528 break; 2529 2530 case INDEX_op_br: 2531 case INDEX_op_exit_tb: 2532 case INDEX_op_goto_ptr: 2533 /* Unconditional branches; everything following is dead. */ 2534 dead = true; 2535 break; 2536 2537 case INDEX_op_call: 2538 /* Notice noreturn helper calls, raising exceptions. */ 2539 if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) { 2540 dead = true; 2541 } 2542 break; 2543 2544 case INDEX_op_insn_start: 2545 /* Never remove -- we need to keep these for unwind. */ 2546 remove = false; 2547 break; 2548 2549 default: 2550 break; 2551 } 2552 2553 if (remove) { 2554 tcg_op_remove(s, op); 2555 } 2556 } 2557 } 2558 2559 #define TS_DEAD 1 2560 #define TS_MEM 2 2561 2562 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n))) 2563 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n))) 2564 2565 /* For liveness_pass_1, the register preferences for a given temp. */ 2566 static inline TCGRegSet *la_temp_pref(TCGTemp *ts) 2567 { 2568 return ts->state_ptr; 2569 } 2570 2571 /* For liveness_pass_1, reset the preferences for a given temp to the 2572 * maximal regset for its type. 2573 */ 2574 static inline void la_reset_pref(TCGTemp *ts) 2575 { 2576 *la_temp_pref(ts) 2577 = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]); 2578 } 2579 2580 /* liveness analysis: end of function: all temps are dead, and globals 2581 should be in memory. */ 2582 static void la_func_end(TCGContext *s, int ng, int nt) 2583 { 2584 int i; 2585 2586 for (i = 0; i < ng; ++i) { 2587 s->temps[i].state = TS_DEAD | TS_MEM; 2588 la_reset_pref(&s->temps[i]); 2589 } 2590 for (i = ng; i < nt; ++i) { 2591 s->temps[i].state = TS_DEAD; 2592 la_reset_pref(&s->temps[i]); 2593 } 2594 } 2595 2596 /* liveness analysis: end of basic block: all temps are dead, globals 2597 and local temps should be in memory. */ 2598 static void la_bb_end(TCGContext *s, int ng, int nt) 2599 { 2600 int i; 2601 2602 for (i = 0; i < nt; ++i) { 2603 TCGTemp *ts = &s->temps[i]; 2604 int state; 2605 2606 switch (ts->kind) { 2607 case TEMP_FIXED: 2608 case TEMP_GLOBAL: 2609 case TEMP_LOCAL: 2610 state = TS_DEAD | TS_MEM; 2611 break; 2612 case TEMP_NORMAL: 2613 case TEMP_EBB: 2614 case TEMP_CONST: 2615 state = TS_DEAD; 2616 break; 2617 default: 2618 g_assert_not_reached(); 2619 } 2620 ts->state = state; 2621 la_reset_pref(ts); 2622 } 2623 } 2624 2625 /* liveness analysis: sync globals back to memory. */ 2626 static void la_global_sync(TCGContext *s, int ng) 2627 { 2628 int i; 2629 2630 for (i = 0; i < ng; ++i) { 2631 int state = s->temps[i].state; 2632 s->temps[i].state = state | TS_MEM; 2633 if (state == TS_DEAD) { 2634 /* If the global was previously dead, reset prefs. */ 2635 la_reset_pref(&s->temps[i]); 2636 } 2637 } 2638 } 2639 2640 /* 2641 * liveness analysis: conditional branch: all temps are dead unless 2642 * explicitly live-across-conditional-branch, globals and local temps 2643 * should be synced. 2644 */ 2645 static void la_bb_sync(TCGContext *s, int ng, int nt) 2646 { 2647 la_global_sync(s, ng); 2648 2649 for (int i = ng; i < nt; ++i) { 2650 TCGTemp *ts = &s->temps[i]; 2651 int state; 2652 2653 switch (ts->kind) { 2654 case TEMP_LOCAL: 2655 state = ts->state; 2656 ts->state = state | TS_MEM; 2657 if (state != TS_DEAD) { 2658 continue; 2659 } 2660 break; 2661 case TEMP_NORMAL: 2662 s->temps[i].state = TS_DEAD; 2663 break; 2664 case TEMP_EBB: 2665 case TEMP_CONST: 2666 continue; 2667 default: 2668 g_assert_not_reached(); 2669 } 2670 la_reset_pref(&s->temps[i]); 2671 } 2672 } 2673 2674 /* liveness analysis: sync globals back to memory and kill. */ 2675 static void la_global_kill(TCGContext *s, int ng) 2676 { 2677 int i; 2678 2679 for (i = 0; i < ng; i++) { 2680 s->temps[i].state = TS_DEAD | TS_MEM; 2681 la_reset_pref(&s->temps[i]); 2682 } 2683 } 2684 2685 /* liveness analysis: note live globals crossing calls. */ 2686 static void la_cross_call(TCGContext *s, int nt) 2687 { 2688 TCGRegSet mask = ~tcg_target_call_clobber_regs; 2689 int i; 2690 2691 for (i = 0; i < nt; i++) { 2692 TCGTemp *ts = &s->temps[i]; 2693 if (!(ts->state & TS_DEAD)) { 2694 TCGRegSet *pset = la_temp_pref(ts); 2695 TCGRegSet set = *pset; 2696 2697 set &= mask; 2698 /* If the combination is not possible, restart. */ 2699 if (set == 0) { 2700 set = tcg_target_available_regs[ts->type] & mask; 2701 } 2702 *pset = set; 2703 } 2704 } 2705 } 2706 2707 /* Liveness analysis : update the opc_arg_life array to tell if a 2708 given input arguments is dead. Instructions updating dead 2709 temporaries are removed. */ 2710 static void liveness_pass_1(TCGContext *s) 2711 { 2712 int nb_globals = s->nb_globals; 2713 int nb_temps = s->nb_temps; 2714 TCGOp *op, *op_prev; 2715 TCGRegSet *prefs; 2716 int i; 2717 2718 prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps); 2719 for (i = 0; i < nb_temps; ++i) { 2720 s->temps[i].state_ptr = prefs + i; 2721 } 2722 2723 /* ??? Should be redundant with the exit_tb that ends the TB. */ 2724 la_func_end(s, nb_globals, nb_temps); 2725 2726 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) { 2727 int nb_iargs, nb_oargs; 2728 TCGOpcode opc_new, opc_new2; 2729 bool have_opc_new2; 2730 TCGLifeData arg_life = 0; 2731 TCGTemp *ts; 2732 TCGOpcode opc = op->opc; 2733 const TCGOpDef *def = &tcg_op_defs[opc]; 2734 2735 switch (opc) { 2736 case INDEX_op_call: 2737 { 2738 const TCGHelperInfo *info = tcg_call_info(op); 2739 int call_flags = tcg_call_flags(op); 2740 2741 nb_oargs = TCGOP_CALLO(op); 2742 nb_iargs = TCGOP_CALLI(op); 2743 2744 /* pure functions can be removed if their result is unused */ 2745 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { 2746 for (i = 0; i < nb_oargs; i++) { 2747 ts = arg_temp(op->args[i]); 2748 if (ts->state != TS_DEAD) { 2749 goto do_not_remove_call; 2750 } 2751 } 2752 goto do_remove; 2753 } 2754 do_not_remove_call: 2755 2756 /* Output args are dead. */ 2757 for (i = 0; i < nb_oargs; i++) { 2758 ts = arg_temp(op->args[i]); 2759 if (ts->state & TS_DEAD) { 2760 arg_life |= DEAD_ARG << i; 2761 } 2762 if (ts->state & TS_MEM) { 2763 arg_life |= SYNC_ARG << i; 2764 } 2765 ts->state = TS_DEAD; 2766 la_reset_pref(ts); 2767 } 2768 2769 /* Not used -- it will be tcg_target_call_oarg_reg(). */ 2770 memset(op->output_pref, 0, sizeof(op->output_pref)); 2771 2772 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS | 2773 TCG_CALL_NO_READ_GLOBALS))) { 2774 la_global_kill(s, nb_globals); 2775 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) { 2776 la_global_sync(s, nb_globals); 2777 } 2778 2779 /* Record arguments that die in this helper. */ 2780 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { 2781 ts = arg_temp(op->args[i]); 2782 if (ts->state & TS_DEAD) { 2783 arg_life |= DEAD_ARG << i; 2784 } 2785 } 2786 2787 /* For all live registers, remove call-clobbered prefs. */ 2788 la_cross_call(s, nb_temps); 2789 2790 /* 2791 * Input arguments are live for preceding opcodes. 2792 * 2793 * For those arguments that die, and will be allocated in 2794 * registers, clear the register set for that arg, to be 2795 * filled in below. For args that will be on the stack, 2796 * reset to any available reg. Process arguments in reverse 2797 * order so that if a temp is used more than once, the stack 2798 * reset to max happens before the register reset to 0. 2799 */ 2800 for (i = nb_iargs - 1; i >= 0; i--) { 2801 const TCGCallArgumentLoc *loc = &info->in[i]; 2802 ts = arg_temp(op->args[nb_oargs + i]); 2803 2804 if (ts->state & TS_DEAD) { 2805 switch (loc->kind) { 2806 case TCG_CALL_ARG_NORMAL: 2807 case TCG_CALL_ARG_EXTEND_U: 2808 case TCG_CALL_ARG_EXTEND_S: 2809 if (REG_P(loc)) { 2810 *la_temp_pref(ts) = 0; 2811 break; 2812 } 2813 /* fall through */ 2814 default: 2815 *la_temp_pref(ts) = 2816 tcg_target_available_regs[ts->type]; 2817 break; 2818 } 2819 ts->state &= ~TS_DEAD; 2820 } 2821 } 2822 2823 /* 2824 * For each input argument, add its input register to prefs. 2825 * If a temp is used once, this produces a single set bit; 2826 * if a temp is used multiple times, this produces a set. 2827 */ 2828 for (i = 0; i < nb_iargs; i++) { 2829 const TCGCallArgumentLoc *loc = &info->in[i]; 2830 ts = arg_temp(op->args[nb_oargs + i]); 2831 2832 switch (loc->kind) { 2833 case TCG_CALL_ARG_NORMAL: 2834 case TCG_CALL_ARG_EXTEND_U: 2835 case TCG_CALL_ARG_EXTEND_S: 2836 if (REG_P(loc)) { 2837 tcg_regset_set_reg(*la_temp_pref(ts), 2838 tcg_target_call_iarg_regs[loc->arg_slot]); 2839 } 2840 break; 2841 default: 2842 break; 2843 } 2844 } 2845 } 2846 break; 2847 case INDEX_op_insn_start: 2848 break; 2849 case INDEX_op_discard: 2850 /* mark the temporary as dead */ 2851 ts = arg_temp(op->args[0]); 2852 ts->state = TS_DEAD; 2853 la_reset_pref(ts); 2854 break; 2855 2856 case INDEX_op_add2_i32: 2857 opc_new = INDEX_op_add_i32; 2858 goto do_addsub2; 2859 case INDEX_op_sub2_i32: 2860 opc_new = INDEX_op_sub_i32; 2861 goto do_addsub2; 2862 case INDEX_op_add2_i64: 2863 opc_new = INDEX_op_add_i64; 2864 goto do_addsub2; 2865 case INDEX_op_sub2_i64: 2866 opc_new = INDEX_op_sub_i64; 2867 do_addsub2: 2868 nb_iargs = 4; 2869 nb_oargs = 2; 2870 /* Test if the high part of the operation is dead, but not 2871 the low part. The result can be optimized to a simple 2872 add or sub. This happens often for x86_64 guest when the 2873 cpu mode is set to 32 bit. */ 2874 if (arg_temp(op->args[1])->state == TS_DEAD) { 2875 if (arg_temp(op->args[0])->state == TS_DEAD) { 2876 goto do_remove; 2877 } 2878 /* Replace the opcode and adjust the args in place, 2879 leaving 3 unused args at the end. */ 2880 op->opc = opc = opc_new; 2881 op->args[1] = op->args[2]; 2882 op->args[2] = op->args[4]; 2883 /* Fall through and mark the single-word operation live. */ 2884 nb_iargs = 2; 2885 nb_oargs = 1; 2886 } 2887 goto do_not_remove; 2888 2889 case INDEX_op_mulu2_i32: 2890 opc_new = INDEX_op_mul_i32; 2891 opc_new2 = INDEX_op_muluh_i32; 2892 have_opc_new2 = TCG_TARGET_HAS_muluh_i32; 2893 goto do_mul2; 2894 case INDEX_op_muls2_i32: 2895 opc_new = INDEX_op_mul_i32; 2896 opc_new2 = INDEX_op_mulsh_i32; 2897 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32; 2898 goto do_mul2; 2899 case INDEX_op_mulu2_i64: 2900 opc_new = INDEX_op_mul_i64; 2901 opc_new2 = INDEX_op_muluh_i64; 2902 have_opc_new2 = TCG_TARGET_HAS_muluh_i64; 2903 goto do_mul2; 2904 case INDEX_op_muls2_i64: 2905 opc_new = INDEX_op_mul_i64; 2906 opc_new2 = INDEX_op_mulsh_i64; 2907 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64; 2908 goto do_mul2; 2909 do_mul2: 2910 nb_iargs = 2; 2911 nb_oargs = 2; 2912 if (arg_temp(op->args[1])->state == TS_DEAD) { 2913 if (arg_temp(op->args[0])->state == TS_DEAD) { 2914 /* Both parts of the operation are dead. */ 2915 goto do_remove; 2916 } 2917 /* The high part of the operation is dead; generate the low. */ 2918 op->opc = opc = opc_new; 2919 op->args[1] = op->args[2]; 2920 op->args[2] = op->args[3]; 2921 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) { 2922 /* The low part of the operation is dead; generate the high. */ 2923 op->opc = opc = opc_new2; 2924 op->args[0] = op->args[1]; 2925 op->args[1] = op->args[2]; 2926 op->args[2] = op->args[3]; 2927 } else { 2928 goto do_not_remove; 2929 } 2930 /* Mark the single-word operation live. */ 2931 nb_oargs = 1; 2932 goto do_not_remove; 2933 2934 default: 2935 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ 2936 nb_iargs = def->nb_iargs; 2937 nb_oargs = def->nb_oargs; 2938 2939 /* Test if the operation can be removed because all 2940 its outputs are dead. We assume that nb_oargs == 0 2941 implies side effects */ 2942 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { 2943 for (i = 0; i < nb_oargs; i++) { 2944 if (arg_temp(op->args[i])->state != TS_DEAD) { 2945 goto do_not_remove; 2946 } 2947 } 2948 goto do_remove; 2949 } 2950 goto do_not_remove; 2951 2952 do_remove: 2953 tcg_op_remove(s, op); 2954 break; 2955 2956 do_not_remove: 2957 for (i = 0; i < nb_oargs; i++) { 2958 ts = arg_temp(op->args[i]); 2959 2960 /* Remember the preference of the uses that followed. */ 2961 if (i < ARRAY_SIZE(op->output_pref)) { 2962 op->output_pref[i] = *la_temp_pref(ts); 2963 } 2964 2965 /* Output args are dead. */ 2966 if (ts->state & TS_DEAD) { 2967 arg_life |= DEAD_ARG << i; 2968 } 2969 if (ts->state & TS_MEM) { 2970 arg_life |= SYNC_ARG << i; 2971 } 2972 ts->state = TS_DEAD; 2973 la_reset_pref(ts); 2974 } 2975 2976 /* If end of basic block, update. */ 2977 if (def->flags & TCG_OPF_BB_EXIT) { 2978 la_func_end(s, nb_globals, nb_temps); 2979 } else if (def->flags & TCG_OPF_COND_BRANCH) { 2980 la_bb_sync(s, nb_globals, nb_temps); 2981 } else if (def->flags & TCG_OPF_BB_END) { 2982 la_bb_end(s, nb_globals, nb_temps); 2983 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { 2984 la_global_sync(s, nb_globals); 2985 if (def->flags & TCG_OPF_CALL_CLOBBER) { 2986 la_cross_call(s, nb_temps); 2987 } 2988 } 2989 2990 /* Record arguments that die in this opcode. */ 2991 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { 2992 ts = arg_temp(op->args[i]); 2993 if (ts->state & TS_DEAD) { 2994 arg_life |= DEAD_ARG << i; 2995 } 2996 } 2997 2998 /* Input arguments are live for preceding opcodes. */ 2999 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { 3000 ts = arg_temp(op->args[i]); 3001 if (ts->state & TS_DEAD) { 3002 /* For operands that were dead, initially allow 3003 all regs for the type. */ 3004 *la_temp_pref(ts) = tcg_target_available_regs[ts->type]; 3005 ts->state &= ~TS_DEAD; 3006 } 3007 } 3008 3009 /* Incorporate constraints for this operand. */ 3010 switch (opc) { 3011 case INDEX_op_mov_i32: 3012 case INDEX_op_mov_i64: 3013 /* Note that these are TCG_OPF_NOT_PRESENT and do not 3014 have proper constraints. That said, special case 3015 moves to propagate preferences backward. */ 3016 if (IS_DEAD_ARG(1)) { 3017 *la_temp_pref(arg_temp(op->args[0])) 3018 = *la_temp_pref(arg_temp(op->args[1])); 3019 } 3020 break; 3021 3022 default: 3023 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { 3024 const TCGArgConstraint *ct = &def->args_ct[i]; 3025 TCGRegSet set, *pset; 3026 3027 ts = arg_temp(op->args[i]); 3028 pset = la_temp_pref(ts); 3029 set = *pset; 3030 3031 set &= ct->regs; 3032 if (ct->ialias) { 3033 set &= output_pref(op, ct->alias_index); 3034 } 3035 /* If the combination is not possible, restart. */ 3036 if (set == 0) { 3037 set = ct->regs; 3038 } 3039 *pset = set; 3040 } 3041 break; 3042 } 3043 break; 3044 } 3045 op->life = arg_life; 3046 } 3047 } 3048 3049 /* Liveness analysis: Convert indirect regs to direct temporaries. */ 3050 static bool liveness_pass_2(TCGContext *s) 3051 { 3052 int nb_globals = s->nb_globals; 3053 int nb_temps, i; 3054 bool changes = false; 3055 TCGOp *op, *op_next; 3056 3057 /* Create a temporary for each indirect global. */ 3058 for (i = 0; i < nb_globals; ++i) { 3059 TCGTemp *its = &s->temps[i]; 3060 if (its->indirect_reg) { 3061 TCGTemp *dts = tcg_temp_alloc(s); 3062 dts->type = its->type; 3063 dts->base_type = its->base_type; 3064 dts->kind = TEMP_EBB; 3065 its->state_ptr = dts; 3066 } else { 3067 its->state_ptr = NULL; 3068 } 3069 /* All globals begin dead. */ 3070 its->state = TS_DEAD; 3071 } 3072 for (nb_temps = s->nb_temps; i < nb_temps; ++i) { 3073 TCGTemp *its = &s->temps[i]; 3074 its->state_ptr = NULL; 3075 its->state = TS_DEAD; 3076 } 3077 3078 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { 3079 TCGOpcode opc = op->opc; 3080 const TCGOpDef *def = &tcg_op_defs[opc]; 3081 TCGLifeData arg_life = op->life; 3082 int nb_iargs, nb_oargs, call_flags; 3083 TCGTemp *arg_ts, *dir_ts; 3084 3085 if (opc == INDEX_op_call) { 3086 nb_oargs = TCGOP_CALLO(op); 3087 nb_iargs = TCGOP_CALLI(op); 3088 call_flags = tcg_call_flags(op); 3089 } else { 3090 nb_iargs = def->nb_iargs; 3091 nb_oargs = def->nb_oargs; 3092 3093 /* Set flags similar to how calls require. */ 3094 if (def->flags & TCG_OPF_COND_BRANCH) { 3095 /* Like reading globals: sync_globals */ 3096 call_flags = TCG_CALL_NO_WRITE_GLOBALS; 3097 } else if (def->flags & TCG_OPF_BB_END) { 3098 /* Like writing globals: save_globals */ 3099 call_flags = 0; 3100 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { 3101 /* Like reading globals: sync_globals */ 3102 call_flags = TCG_CALL_NO_WRITE_GLOBALS; 3103 } else { 3104 /* No effect on globals. */ 3105 call_flags = (TCG_CALL_NO_READ_GLOBALS | 3106 TCG_CALL_NO_WRITE_GLOBALS); 3107 } 3108 } 3109 3110 /* Make sure that input arguments are available. */ 3111 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { 3112 arg_ts = arg_temp(op->args[i]); 3113 dir_ts = arg_ts->state_ptr; 3114 if (dir_ts && arg_ts->state == TS_DEAD) { 3115 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32 3116 ? INDEX_op_ld_i32 3117 : INDEX_op_ld_i64); 3118 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3); 3119 3120 lop->args[0] = temp_arg(dir_ts); 3121 lop->args[1] = temp_arg(arg_ts->mem_base); 3122 lop->args[2] = arg_ts->mem_offset; 3123 3124 /* Loaded, but synced with memory. */ 3125 arg_ts->state = TS_MEM; 3126 } 3127 } 3128 3129 /* Perform input replacement, and mark inputs that became dead. 3130 No action is required except keeping temp_state up to date 3131 so that we reload when needed. */ 3132 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { 3133 arg_ts = arg_temp(op->args[i]); 3134 dir_ts = arg_ts->state_ptr; 3135 if (dir_ts) { 3136 op->args[i] = temp_arg(dir_ts); 3137 changes = true; 3138 if (IS_DEAD_ARG(i)) { 3139 arg_ts->state = TS_DEAD; 3140 } 3141 } 3142 } 3143 3144 /* Liveness analysis should ensure that the following are 3145 all correct, for call sites and basic block end points. */ 3146 if (call_flags & TCG_CALL_NO_READ_GLOBALS) { 3147 /* Nothing to do */ 3148 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) { 3149 for (i = 0; i < nb_globals; ++i) { 3150 /* Liveness should see that globals are synced back, 3151 that is, either TS_DEAD or TS_MEM. */ 3152 arg_ts = &s->temps[i]; 3153 tcg_debug_assert(arg_ts->state_ptr == 0 3154 || arg_ts->state != 0); 3155 } 3156 } else { 3157 for (i = 0; i < nb_globals; ++i) { 3158 /* Liveness should see that globals are saved back, 3159 that is, TS_DEAD, waiting to be reloaded. */ 3160 arg_ts = &s->temps[i]; 3161 tcg_debug_assert(arg_ts->state_ptr == 0 3162 || arg_ts->state == TS_DEAD); 3163 } 3164 } 3165 3166 /* Outputs become available. */ 3167 if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) { 3168 arg_ts = arg_temp(op->args[0]); 3169 dir_ts = arg_ts->state_ptr; 3170 if (dir_ts) { 3171 op->args[0] = temp_arg(dir_ts); 3172 changes = true; 3173 3174 /* The output is now live and modified. */ 3175 arg_ts->state = 0; 3176 3177 if (NEED_SYNC_ARG(0)) { 3178 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32 3179 ? INDEX_op_st_i32 3180 : INDEX_op_st_i64); 3181 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3); 3182 TCGTemp *out_ts = dir_ts; 3183 3184 if (IS_DEAD_ARG(0)) { 3185 out_ts = arg_temp(op->args[1]); 3186 arg_ts->state = TS_DEAD; 3187 tcg_op_remove(s, op); 3188 } else { 3189 arg_ts->state = TS_MEM; 3190 } 3191 3192 sop->args[0] = temp_arg(out_ts); 3193 sop->args[1] = temp_arg(arg_ts->mem_base); 3194 sop->args[2] = arg_ts->mem_offset; 3195 } else { 3196 tcg_debug_assert(!IS_DEAD_ARG(0)); 3197 } 3198 } 3199 } else { 3200 for (i = 0; i < nb_oargs; i++) { 3201 arg_ts = arg_temp(op->args[i]); 3202 dir_ts = arg_ts->state_ptr; 3203 if (!dir_ts) { 3204 continue; 3205 } 3206 op->args[i] = temp_arg(dir_ts); 3207 changes = true; 3208 3209 /* The output is now live and modified. */ 3210 arg_ts->state = 0; 3211 3212 /* Sync outputs upon their last write. */ 3213 if (NEED_SYNC_ARG(i)) { 3214 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32 3215 ? INDEX_op_st_i32 3216 : INDEX_op_st_i64); 3217 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3); 3218 3219 sop->args[0] = temp_arg(dir_ts); 3220 sop->args[1] = temp_arg(arg_ts->mem_base); 3221 sop->args[2] = arg_ts->mem_offset; 3222 3223 arg_ts->state = TS_MEM; 3224 } 3225 /* Drop outputs that are dead. */ 3226 if (IS_DEAD_ARG(i)) { 3227 arg_ts->state = TS_DEAD; 3228 } 3229 } 3230 } 3231 } 3232 3233 return changes; 3234 } 3235 3236 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts) 3237 { 3238 int size = tcg_type_size(ts->type); 3239 int align; 3240 intptr_t off; 3241 3242 switch (ts->type) { 3243 case TCG_TYPE_I32: 3244 align = 4; 3245 break; 3246 case TCG_TYPE_I64: 3247 case TCG_TYPE_V64: 3248 align = 8; 3249 break; 3250 case TCG_TYPE_V128: 3251 case TCG_TYPE_V256: 3252 /* Note that we do not require aligned storage for V256. */ 3253 align = 16; 3254 break; 3255 default: 3256 g_assert_not_reached(); 3257 } 3258 3259 /* 3260 * Assume the stack is sufficiently aligned. 3261 * This affects e.g. ARM NEON, where we have 8 byte stack alignment 3262 * and do not require 16 byte vector alignment. This seems slightly 3263 * easier than fully parameterizing the above switch statement. 3264 */ 3265 align = MIN(TCG_TARGET_STACK_ALIGN, align); 3266 off = ROUND_UP(s->current_frame_offset, align); 3267 3268 /* If we've exhausted the stack frame, restart with a smaller TB. */ 3269 if (off + size > s->frame_end) { 3270 tcg_raise_tb_overflow(s); 3271 } 3272 s->current_frame_offset = off + size; 3273 3274 ts->mem_offset = off; 3275 #if defined(__sparc__) 3276 ts->mem_offset += TCG_TARGET_STACK_BIAS; 3277 #endif 3278 ts->mem_base = s->frame_temp; 3279 ts->mem_allocated = 1; 3280 } 3281 3282 /* Assign @reg to @ts, and update reg_to_temp[]. */ 3283 static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg) 3284 { 3285 if (ts->val_type == TEMP_VAL_REG) { 3286 TCGReg old = ts->reg; 3287 tcg_debug_assert(s->reg_to_temp[old] == ts); 3288 if (old == reg) { 3289 return; 3290 } 3291 s->reg_to_temp[old] = NULL; 3292 } 3293 tcg_debug_assert(s->reg_to_temp[reg] == NULL); 3294 s->reg_to_temp[reg] = ts; 3295 ts->val_type = TEMP_VAL_REG; 3296 ts->reg = reg; 3297 } 3298 3299 /* Assign a non-register value type to @ts, and update reg_to_temp[]. */ 3300 static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type) 3301 { 3302 tcg_debug_assert(type != TEMP_VAL_REG); 3303 if (ts->val_type == TEMP_VAL_REG) { 3304 TCGReg reg = ts->reg; 3305 tcg_debug_assert(s->reg_to_temp[reg] == ts); 3306 s->reg_to_temp[reg] = NULL; 3307 } 3308 ts->val_type = type; 3309 } 3310 3311 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet); 3312 3313 /* Mark a temporary as free or dead. If 'free_or_dead' is negative, 3314 mark it free; otherwise mark it dead. */ 3315 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) 3316 { 3317 TCGTempVal new_type; 3318 3319 switch (ts->kind) { 3320 case TEMP_FIXED: 3321 return; 3322 case TEMP_GLOBAL: 3323 case TEMP_LOCAL: 3324 new_type = TEMP_VAL_MEM; 3325 break; 3326 case TEMP_NORMAL: 3327 case TEMP_EBB: 3328 new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD; 3329 break; 3330 case TEMP_CONST: 3331 new_type = TEMP_VAL_CONST; 3332 break; 3333 default: 3334 g_assert_not_reached(); 3335 } 3336 set_temp_val_nonreg(s, ts, new_type); 3337 } 3338 3339 /* Mark a temporary as dead. */ 3340 static inline void temp_dead(TCGContext *s, TCGTemp *ts) 3341 { 3342 temp_free_or_dead(s, ts, 1); 3343 } 3344 3345 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary 3346 registers needs to be allocated to store a constant. If 'free_or_dead' 3347 is non-zero, subsequently release the temporary; if it is positive, the 3348 temp is dead; if it is negative, the temp is free. */ 3349 static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs, 3350 TCGRegSet preferred_regs, int free_or_dead) 3351 { 3352 if (!temp_readonly(ts) && !ts->mem_coherent) { 3353 if (!ts->mem_allocated) { 3354 temp_allocate_frame(s, ts); 3355 } 3356 switch (ts->val_type) { 3357 case TEMP_VAL_CONST: 3358 /* If we're going to free the temp immediately, then we won't 3359 require it later in a register, so attempt to store the 3360 constant to memory directly. */ 3361 if (free_or_dead 3362 && tcg_out_sti(s, ts->type, ts->val, 3363 ts->mem_base->reg, ts->mem_offset)) { 3364 break; 3365 } 3366 temp_load(s, ts, tcg_target_available_regs[ts->type], 3367 allocated_regs, preferred_regs); 3368 /* fallthrough */ 3369 3370 case TEMP_VAL_REG: 3371 tcg_out_st(s, ts->type, ts->reg, 3372 ts->mem_base->reg, ts->mem_offset); 3373 break; 3374 3375 case TEMP_VAL_MEM: 3376 break; 3377 3378 case TEMP_VAL_DEAD: 3379 default: 3380 tcg_abort(); 3381 } 3382 ts->mem_coherent = 1; 3383 } 3384 if (free_or_dead) { 3385 temp_free_or_dead(s, ts, free_or_dead); 3386 } 3387 } 3388 3389 /* free register 'reg' by spilling the corresponding temporary if necessary */ 3390 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs) 3391 { 3392 TCGTemp *ts = s->reg_to_temp[reg]; 3393 if (ts != NULL) { 3394 temp_sync(s, ts, allocated_regs, 0, -1); 3395 } 3396 } 3397 3398 /** 3399 * tcg_reg_alloc: 3400 * @required_regs: Set of registers in which we must allocate. 3401 * @allocated_regs: Set of registers which must be avoided. 3402 * @preferred_regs: Set of registers we should prefer. 3403 * @rev: True if we search the registers in "indirect" order. 3404 * 3405 * The allocated register must be in @required_regs & ~@allocated_regs, 3406 * but if we can put it in @preferred_regs we may save a move later. 3407 */ 3408 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs, 3409 TCGRegSet allocated_regs, 3410 TCGRegSet preferred_regs, bool rev) 3411 { 3412 int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order); 3413 TCGRegSet reg_ct[2]; 3414 const int *order; 3415 3416 reg_ct[1] = required_regs & ~allocated_regs; 3417 tcg_debug_assert(reg_ct[1] != 0); 3418 reg_ct[0] = reg_ct[1] & preferred_regs; 3419 3420 /* Skip the preferred_regs option if it cannot be satisfied, 3421 or if the preference made no difference. */ 3422 f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1]; 3423 3424 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order; 3425 3426 /* Try free registers, preferences first. */ 3427 for (j = f; j < 2; j++) { 3428 TCGRegSet set = reg_ct[j]; 3429 3430 if (tcg_regset_single(set)) { 3431 /* One register in the set. */ 3432 TCGReg reg = tcg_regset_first(set); 3433 if (s->reg_to_temp[reg] == NULL) { 3434 return reg; 3435 } 3436 } else { 3437 for (i = 0; i < n; i++) { 3438 TCGReg reg = order[i]; 3439 if (s->reg_to_temp[reg] == NULL && 3440 tcg_regset_test_reg(set, reg)) { 3441 return reg; 3442 } 3443 } 3444 } 3445 } 3446 3447 /* We must spill something. */ 3448 for (j = f; j < 2; j++) { 3449 TCGRegSet set = reg_ct[j]; 3450 3451 if (tcg_regset_single(set)) { 3452 /* One register in the set. */ 3453 TCGReg reg = tcg_regset_first(set); 3454 tcg_reg_free(s, reg, allocated_regs); 3455 return reg; 3456 } else { 3457 for (i = 0; i < n; i++) { 3458 TCGReg reg = order[i]; 3459 if (tcg_regset_test_reg(set, reg)) { 3460 tcg_reg_free(s, reg, allocated_regs); 3461 return reg; 3462 } 3463 } 3464 } 3465 } 3466 3467 tcg_abort(); 3468 } 3469 3470 static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs, 3471 TCGRegSet allocated_regs, 3472 TCGRegSet preferred_regs, bool rev) 3473 { 3474 int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order); 3475 TCGRegSet reg_ct[2]; 3476 const int *order; 3477 3478 /* Ensure that if I is not in allocated_regs, I+1 is not either. */ 3479 reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1)); 3480 tcg_debug_assert(reg_ct[1] != 0); 3481 reg_ct[0] = reg_ct[1] & preferred_regs; 3482 3483 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order; 3484 3485 /* 3486 * Skip the preferred_regs option if it cannot be satisfied, 3487 * or if the preference made no difference. 3488 */ 3489 k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1]; 3490 3491 /* 3492 * Minimize the number of flushes by looking for 2 free registers first, 3493 * then a single flush, then two flushes. 3494 */ 3495 for (fmin = 2; fmin >= 0; fmin--) { 3496 for (j = k; j < 2; j++) { 3497 TCGRegSet set = reg_ct[j]; 3498 3499 for (i = 0; i < n; i++) { 3500 TCGReg reg = order[i]; 3501 3502 if (tcg_regset_test_reg(set, reg)) { 3503 int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1]; 3504 if (f >= fmin) { 3505 tcg_reg_free(s, reg, allocated_regs); 3506 tcg_reg_free(s, reg + 1, allocated_regs); 3507 return reg; 3508 } 3509 } 3510 } 3511 } 3512 } 3513 tcg_abort(); 3514 } 3515 3516 /* Make sure the temporary is in a register. If needed, allocate the register 3517 from DESIRED while avoiding ALLOCATED. */ 3518 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, 3519 TCGRegSet allocated_regs, TCGRegSet preferred_regs) 3520 { 3521 TCGReg reg; 3522 3523 switch (ts->val_type) { 3524 case TEMP_VAL_REG: 3525 return; 3526 case TEMP_VAL_CONST: 3527 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, 3528 preferred_regs, ts->indirect_base); 3529 if (ts->type <= TCG_TYPE_I64) { 3530 tcg_out_movi(s, ts->type, reg, ts->val); 3531 } else { 3532 uint64_t val = ts->val; 3533 MemOp vece = MO_64; 3534 3535 /* 3536 * Find the minimal vector element that matches the constant. 3537 * The targets will, in general, have to do this search anyway, 3538 * do this generically. 3539 */ 3540 if (val == dup_const(MO_8, val)) { 3541 vece = MO_8; 3542 } else if (val == dup_const(MO_16, val)) { 3543 vece = MO_16; 3544 } else if (val == dup_const(MO_32, val)) { 3545 vece = MO_32; 3546 } 3547 3548 tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val); 3549 } 3550 ts->mem_coherent = 0; 3551 break; 3552 case TEMP_VAL_MEM: 3553 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, 3554 preferred_regs, ts->indirect_base); 3555 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset); 3556 ts->mem_coherent = 1; 3557 break; 3558 case TEMP_VAL_DEAD: 3559 default: 3560 tcg_abort(); 3561 } 3562 set_temp_val_reg(s, ts, reg); 3563 } 3564 3565 /* Save a temporary to memory. 'allocated_regs' is used in case a 3566 temporary registers needs to be allocated to store a constant. */ 3567 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs) 3568 { 3569 /* The liveness analysis already ensures that globals are back 3570 in memory. Keep an tcg_debug_assert for safety. */ 3571 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts)); 3572 } 3573 3574 /* save globals to their canonical location and assume they can be 3575 modified be the following code. 'allocated_regs' is used in case a 3576 temporary registers needs to be allocated to store a constant. */ 3577 static void save_globals(TCGContext *s, TCGRegSet allocated_regs) 3578 { 3579 int i, n; 3580 3581 for (i = 0, n = s->nb_globals; i < n; i++) { 3582 temp_save(s, &s->temps[i], allocated_regs); 3583 } 3584 } 3585 3586 /* sync globals to their canonical location and assume they can be 3587 read by the following code. 'allocated_regs' is used in case a 3588 temporary registers needs to be allocated to store a constant. */ 3589 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) 3590 { 3591 int i, n; 3592 3593 for (i = 0, n = s->nb_globals; i < n; i++) { 3594 TCGTemp *ts = &s->temps[i]; 3595 tcg_debug_assert(ts->val_type != TEMP_VAL_REG 3596 || ts->kind == TEMP_FIXED 3597 || ts->mem_coherent); 3598 } 3599 } 3600 3601 /* at the end of a basic block, we assume all temporaries are dead and 3602 all globals are stored at their canonical location. */ 3603 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) 3604 { 3605 int i; 3606 3607 for (i = s->nb_globals; i < s->nb_temps; i++) { 3608 TCGTemp *ts = &s->temps[i]; 3609 3610 switch (ts->kind) { 3611 case TEMP_LOCAL: 3612 temp_save(s, ts, allocated_regs); 3613 break; 3614 case TEMP_NORMAL: 3615 case TEMP_EBB: 3616 /* The liveness analysis already ensures that temps are dead. 3617 Keep an tcg_debug_assert for safety. */ 3618 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); 3619 break; 3620 case TEMP_CONST: 3621 /* Similarly, we should have freed any allocated register. */ 3622 tcg_debug_assert(ts->val_type == TEMP_VAL_CONST); 3623 break; 3624 default: 3625 g_assert_not_reached(); 3626 } 3627 } 3628 3629 save_globals(s, allocated_regs); 3630 } 3631 3632 /* 3633 * At a conditional branch, we assume all temporaries are dead unless 3634 * explicitly live-across-conditional-branch; all globals and local 3635 * temps are synced to their location. 3636 */ 3637 static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs) 3638 { 3639 sync_globals(s, allocated_regs); 3640 3641 for (int i = s->nb_globals; i < s->nb_temps; i++) { 3642 TCGTemp *ts = &s->temps[i]; 3643 /* 3644 * The liveness analysis already ensures that temps are dead. 3645 * Keep tcg_debug_asserts for safety. 3646 */ 3647 switch (ts->kind) { 3648 case TEMP_LOCAL: 3649 tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent); 3650 break; 3651 case TEMP_NORMAL: 3652 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); 3653 break; 3654 case TEMP_EBB: 3655 case TEMP_CONST: 3656 break; 3657 default: 3658 g_assert_not_reached(); 3659 } 3660 } 3661 } 3662 3663 /* 3664 * Specialized code generation for INDEX_op_mov_* with a constant. 3665 */ 3666 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, 3667 tcg_target_ulong val, TCGLifeData arg_life, 3668 TCGRegSet preferred_regs) 3669 { 3670 /* ENV should not be modified. */ 3671 tcg_debug_assert(!temp_readonly(ots)); 3672 3673 /* The movi is not explicitly generated here. */ 3674 set_temp_val_nonreg(s, ots, TEMP_VAL_CONST); 3675 ots->val = val; 3676 ots->mem_coherent = 0; 3677 if (NEED_SYNC_ARG(0)) { 3678 temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0)); 3679 } else if (IS_DEAD_ARG(0)) { 3680 temp_dead(s, ots); 3681 } 3682 } 3683 3684 /* 3685 * Specialized code generation for INDEX_op_mov_*. 3686 */ 3687 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) 3688 { 3689 const TCGLifeData arg_life = op->life; 3690 TCGRegSet allocated_regs, preferred_regs; 3691 TCGTemp *ts, *ots; 3692 TCGType otype, itype; 3693 TCGReg oreg, ireg; 3694 3695 allocated_regs = s->reserved_regs; 3696 preferred_regs = output_pref(op, 0); 3697 ots = arg_temp(op->args[0]); 3698 ts = arg_temp(op->args[1]); 3699 3700 /* ENV should not be modified. */ 3701 tcg_debug_assert(!temp_readonly(ots)); 3702 3703 /* Note that otype != itype for no-op truncation. */ 3704 otype = ots->type; 3705 itype = ts->type; 3706 3707 if (ts->val_type == TEMP_VAL_CONST) { 3708 /* propagate constant or generate sti */ 3709 tcg_target_ulong val = ts->val; 3710 if (IS_DEAD_ARG(1)) { 3711 temp_dead(s, ts); 3712 } 3713 tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs); 3714 return; 3715 } 3716 3717 /* If the source value is in memory we're going to be forced 3718 to have it in a register in order to perform the copy. Copy 3719 the SOURCE value into its own register first, that way we 3720 don't have to reload SOURCE the next time it is used. */ 3721 if (ts->val_type == TEMP_VAL_MEM) { 3722 temp_load(s, ts, tcg_target_available_regs[itype], 3723 allocated_regs, preferred_regs); 3724 } 3725 tcg_debug_assert(ts->val_type == TEMP_VAL_REG); 3726 ireg = ts->reg; 3727 3728 if (IS_DEAD_ARG(0)) { 3729 /* mov to a non-saved dead register makes no sense (even with 3730 liveness analysis disabled). */ 3731 tcg_debug_assert(NEED_SYNC_ARG(0)); 3732 if (!ots->mem_allocated) { 3733 temp_allocate_frame(s, ots); 3734 } 3735 tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset); 3736 if (IS_DEAD_ARG(1)) { 3737 temp_dead(s, ts); 3738 } 3739 temp_dead(s, ots); 3740 return; 3741 } 3742 3743 if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) { 3744 /* 3745 * The mov can be suppressed. Kill input first, so that it 3746 * is unlinked from reg_to_temp, then set the output to the 3747 * reg that we saved from the input. 3748 */ 3749 temp_dead(s, ts); 3750 oreg = ireg; 3751 } else { 3752 if (ots->val_type == TEMP_VAL_REG) { 3753 oreg = ots->reg; 3754 } else { 3755 /* Make sure to not spill the input register during allocation. */ 3756 oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype], 3757 allocated_regs | ((TCGRegSet)1 << ireg), 3758 preferred_regs, ots->indirect_base); 3759 } 3760 if (!tcg_out_mov(s, otype, oreg, ireg)) { 3761 /* 3762 * Cross register class move not supported. 3763 * Store the source register into the destination slot 3764 * and leave the destination temp as TEMP_VAL_MEM. 3765 */ 3766 assert(!temp_readonly(ots)); 3767 if (!ts->mem_allocated) { 3768 temp_allocate_frame(s, ots); 3769 } 3770 tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset); 3771 set_temp_val_nonreg(s, ts, TEMP_VAL_MEM); 3772 ots->mem_coherent = 1; 3773 return; 3774 } 3775 } 3776 set_temp_val_reg(s, ots, oreg); 3777 ots->mem_coherent = 0; 3778 3779 if (NEED_SYNC_ARG(0)) { 3780 temp_sync(s, ots, allocated_regs, 0, 0); 3781 } 3782 } 3783 3784 /* 3785 * Specialized code generation for INDEX_op_dup_vec. 3786 */ 3787 static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op) 3788 { 3789 const TCGLifeData arg_life = op->life; 3790 TCGRegSet dup_out_regs, dup_in_regs; 3791 TCGTemp *its, *ots; 3792 TCGType itype, vtype; 3793 unsigned vece; 3794 int lowpart_ofs; 3795 bool ok; 3796 3797 ots = arg_temp(op->args[0]); 3798 its = arg_temp(op->args[1]); 3799 3800 /* ENV should not be modified. */ 3801 tcg_debug_assert(!temp_readonly(ots)); 3802 3803 itype = its->type; 3804 vece = TCGOP_VECE(op); 3805 vtype = TCGOP_VECL(op) + TCG_TYPE_V64; 3806 3807 if (its->val_type == TEMP_VAL_CONST) { 3808 /* Propagate constant via movi -> dupi. */ 3809 tcg_target_ulong val = its->val; 3810 if (IS_DEAD_ARG(1)) { 3811 temp_dead(s, its); 3812 } 3813 tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0)); 3814 return; 3815 } 3816 3817 dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs; 3818 dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs; 3819 3820 /* Allocate the output register now. */ 3821 if (ots->val_type != TEMP_VAL_REG) { 3822 TCGRegSet allocated_regs = s->reserved_regs; 3823 TCGReg oreg; 3824 3825 if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) { 3826 /* Make sure to not spill the input register. */ 3827 tcg_regset_set_reg(allocated_regs, its->reg); 3828 } 3829 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs, 3830 output_pref(op, 0), ots->indirect_base); 3831 set_temp_val_reg(s, ots, oreg); 3832 } 3833 3834 switch (its->val_type) { 3835 case TEMP_VAL_REG: 3836 /* 3837 * The dup constriaints must be broad, covering all possible VECE. 3838 * However, tcg_op_dup_vec() gets to see the VECE and we allow it 3839 * to fail, indicating that extra moves are required for that case. 3840 */ 3841 if (tcg_regset_test_reg(dup_in_regs, its->reg)) { 3842 if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) { 3843 goto done; 3844 } 3845 /* Try again from memory or a vector input register. */ 3846 } 3847 if (!its->mem_coherent) { 3848 /* 3849 * The input register is not synced, and so an extra store 3850 * would be required to use memory. Attempt an integer-vector 3851 * register move first. We do not have a TCGRegSet for this. 3852 */ 3853 if (tcg_out_mov(s, itype, ots->reg, its->reg)) { 3854 break; 3855 } 3856 /* Sync the temp back to its slot and load from there. */ 3857 temp_sync(s, its, s->reserved_regs, 0, 0); 3858 } 3859 /* fall through */ 3860 3861 case TEMP_VAL_MEM: 3862 lowpart_ofs = 0; 3863 if (HOST_BIG_ENDIAN) { 3864 lowpart_ofs = tcg_type_size(itype) - (1 << vece); 3865 } 3866 if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg, 3867 its->mem_offset + lowpart_ofs)) { 3868 goto done; 3869 } 3870 /* Load the input into the destination vector register. */ 3871 tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset); 3872 break; 3873 3874 default: 3875 g_assert_not_reached(); 3876 } 3877 3878 /* We now have a vector input register, so dup must succeed. */ 3879 ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg); 3880 tcg_debug_assert(ok); 3881 3882 done: 3883 ots->mem_coherent = 0; 3884 if (IS_DEAD_ARG(1)) { 3885 temp_dead(s, its); 3886 } 3887 if (NEED_SYNC_ARG(0)) { 3888 temp_sync(s, ots, s->reserved_regs, 0, 0); 3889 } 3890 if (IS_DEAD_ARG(0)) { 3891 temp_dead(s, ots); 3892 } 3893 } 3894 3895 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) 3896 { 3897 const TCGLifeData arg_life = op->life; 3898 const TCGOpDef * const def = &tcg_op_defs[op->opc]; 3899 TCGRegSet i_allocated_regs; 3900 TCGRegSet o_allocated_regs; 3901 int i, k, nb_iargs, nb_oargs; 3902 TCGReg reg; 3903 TCGArg arg; 3904 const TCGArgConstraint *arg_ct; 3905 TCGTemp *ts; 3906 TCGArg new_args[TCG_MAX_OP_ARGS]; 3907 int const_args[TCG_MAX_OP_ARGS]; 3908 3909 nb_oargs = def->nb_oargs; 3910 nb_iargs = def->nb_iargs; 3911 3912 /* copy constants */ 3913 memcpy(new_args + nb_oargs + nb_iargs, 3914 op->args + nb_oargs + nb_iargs, 3915 sizeof(TCGArg) * def->nb_cargs); 3916 3917 i_allocated_regs = s->reserved_regs; 3918 o_allocated_regs = s->reserved_regs; 3919 3920 /* satisfy input constraints */ 3921 for (k = 0; k < nb_iargs; k++) { 3922 TCGRegSet i_preferred_regs, i_required_regs; 3923 bool allocate_new_reg, copyto_new_reg; 3924 TCGTemp *ts2; 3925 int i1, i2; 3926 3927 i = def->args_ct[nb_oargs + k].sort_index; 3928 arg = op->args[i]; 3929 arg_ct = &def->args_ct[i]; 3930 ts = arg_temp(arg); 3931 3932 if (ts->val_type == TEMP_VAL_CONST 3933 && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) { 3934 /* constant is OK for instruction */ 3935 const_args[i] = 1; 3936 new_args[i] = ts->val; 3937 continue; 3938 } 3939 3940 reg = ts->reg; 3941 i_preferred_regs = 0; 3942 i_required_regs = arg_ct->regs; 3943 allocate_new_reg = false; 3944 copyto_new_reg = false; 3945 3946 switch (arg_ct->pair) { 3947 case 0: /* not paired */ 3948 if (arg_ct->ialias) { 3949 i_preferred_regs = output_pref(op, arg_ct->alias_index); 3950 3951 /* 3952 * If the input is readonly, then it cannot also be an 3953 * output and aliased to itself. If the input is not 3954 * dead after the instruction, we must allocate a new 3955 * register and move it. 3956 */ 3957 if (temp_readonly(ts) || !IS_DEAD_ARG(i)) { 3958 allocate_new_reg = true; 3959 } else if (ts->val_type == TEMP_VAL_REG) { 3960 /* 3961 * Check if the current register has already been 3962 * allocated for another input. 3963 */ 3964 allocate_new_reg = 3965 tcg_regset_test_reg(i_allocated_regs, reg); 3966 } 3967 } 3968 if (!allocate_new_reg) { 3969 temp_load(s, ts, i_required_regs, i_allocated_regs, 3970 i_preferred_regs); 3971 reg = ts->reg; 3972 allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg); 3973 } 3974 if (allocate_new_reg) { 3975 /* 3976 * Allocate a new register matching the constraint 3977 * and move the temporary register into it. 3978 */ 3979 temp_load(s, ts, tcg_target_available_regs[ts->type], 3980 i_allocated_regs, 0); 3981 reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs, 3982 i_preferred_regs, ts->indirect_base); 3983 copyto_new_reg = true; 3984 } 3985 break; 3986 3987 case 1: 3988 /* First of an input pair; if i1 == i2, the second is an output. */ 3989 i1 = i; 3990 i2 = arg_ct->pair_index; 3991 ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL; 3992 3993 /* 3994 * It is easier to default to allocating a new pair 3995 * and to identify a few cases where it's not required. 3996 */ 3997 if (arg_ct->ialias) { 3998 i_preferred_regs = output_pref(op, arg_ct->alias_index); 3999 if (IS_DEAD_ARG(i1) && 4000 IS_DEAD_ARG(i2) && 4001 !temp_readonly(ts) && 4002 ts->val_type == TEMP_VAL_REG && 4003 ts->reg < TCG_TARGET_NB_REGS - 1 && 4004 tcg_regset_test_reg(i_required_regs, reg) && 4005 !tcg_regset_test_reg(i_allocated_regs, reg) && 4006 !tcg_regset_test_reg(i_allocated_regs, reg + 1) && 4007 (ts2 4008 ? ts2->val_type == TEMP_VAL_REG && 4009 ts2->reg == reg + 1 && 4010 !temp_readonly(ts2) 4011 : s->reg_to_temp[reg + 1] == NULL)) { 4012 break; 4013 } 4014 } else { 4015 /* Without aliasing, the pair must also be an input. */ 4016 tcg_debug_assert(ts2); 4017 if (ts->val_type == TEMP_VAL_REG && 4018 ts2->val_type == TEMP_VAL_REG && 4019 ts2->reg == reg + 1 && 4020 tcg_regset_test_reg(i_required_regs, reg)) { 4021 break; 4022 } 4023 } 4024 reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs, 4025 0, ts->indirect_base); 4026 goto do_pair; 4027 4028 case 2: /* pair second */ 4029 reg = new_args[arg_ct->pair_index] + 1; 4030 goto do_pair; 4031 4032 case 3: /* ialias with second output, no first input */ 4033 tcg_debug_assert(arg_ct->ialias); 4034 i_preferred_regs = output_pref(op, arg_ct->alias_index); 4035 4036 if (IS_DEAD_ARG(i) && 4037 !temp_readonly(ts) && 4038 ts->val_type == TEMP_VAL_REG && 4039 reg > 0 && 4040 s->reg_to_temp[reg - 1] == NULL && 4041 tcg_regset_test_reg(i_required_regs, reg) && 4042 !tcg_regset_test_reg(i_allocated_regs, reg) && 4043 !tcg_regset_test_reg(i_allocated_regs, reg - 1)) { 4044 tcg_regset_set_reg(i_allocated_regs, reg - 1); 4045 break; 4046 } 4047 reg = tcg_reg_alloc_pair(s, i_required_regs >> 1, 4048 i_allocated_regs, 0, 4049 ts->indirect_base); 4050 tcg_regset_set_reg(i_allocated_regs, reg); 4051 reg += 1; 4052 goto do_pair; 4053 4054 do_pair: 4055 /* 4056 * If an aliased input is not dead after the instruction, 4057 * we must allocate a new register and move it. 4058 */ 4059 if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) { 4060 TCGRegSet t_allocated_regs = i_allocated_regs; 4061 4062 /* 4063 * Because of the alias, and the continued life, make sure 4064 * that the temp is somewhere *other* than the reg pair, 4065 * and we get a copy in reg. 4066 */ 4067 tcg_regset_set_reg(t_allocated_regs, reg); 4068 tcg_regset_set_reg(t_allocated_regs, reg + 1); 4069 if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) { 4070 /* If ts was already in reg, copy it somewhere else. */ 4071 TCGReg nr; 4072 bool ok; 4073 4074 tcg_debug_assert(ts->kind != TEMP_FIXED); 4075 nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type], 4076 t_allocated_regs, 0, ts->indirect_base); 4077 ok = tcg_out_mov(s, ts->type, nr, reg); 4078 tcg_debug_assert(ok); 4079 4080 set_temp_val_reg(s, ts, nr); 4081 } else { 4082 temp_load(s, ts, tcg_target_available_regs[ts->type], 4083 t_allocated_regs, 0); 4084 copyto_new_reg = true; 4085 } 4086 } else { 4087 /* Preferably allocate to reg, otherwise copy. */ 4088 i_required_regs = (TCGRegSet)1 << reg; 4089 temp_load(s, ts, i_required_regs, i_allocated_regs, 4090 i_preferred_regs); 4091 copyto_new_reg = ts->reg != reg; 4092 } 4093 break; 4094 4095 default: 4096 g_assert_not_reached(); 4097 } 4098 4099 if (copyto_new_reg) { 4100 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) { 4101 /* 4102 * Cross register class move not supported. Sync the 4103 * temp back to its slot and load from there. 4104 */ 4105 temp_sync(s, ts, i_allocated_regs, 0, 0); 4106 tcg_out_ld(s, ts->type, reg, 4107 ts->mem_base->reg, ts->mem_offset); 4108 } 4109 } 4110 new_args[i] = reg; 4111 const_args[i] = 0; 4112 tcg_regset_set_reg(i_allocated_regs, reg); 4113 } 4114 4115 /* mark dead temporaries and free the associated registers */ 4116 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { 4117 if (IS_DEAD_ARG(i)) { 4118 temp_dead(s, arg_temp(op->args[i])); 4119 } 4120 } 4121 4122 if (def->flags & TCG_OPF_COND_BRANCH) { 4123 tcg_reg_alloc_cbranch(s, i_allocated_regs); 4124 } else if (def->flags & TCG_OPF_BB_END) { 4125 tcg_reg_alloc_bb_end(s, i_allocated_regs); 4126 } else { 4127 if (def->flags & TCG_OPF_CALL_CLOBBER) { 4128 /* XXX: permit generic clobber register list ? */ 4129 for (i = 0; i < TCG_TARGET_NB_REGS; i++) { 4130 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) { 4131 tcg_reg_free(s, i, i_allocated_regs); 4132 } 4133 } 4134 } 4135 if (def->flags & TCG_OPF_SIDE_EFFECTS) { 4136 /* sync globals if the op has side effects and might trigger 4137 an exception. */ 4138 sync_globals(s, i_allocated_regs); 4139 } 4140 4141 /* satisfy the output constraints */ 4142 for(k = 0; k < nb_oargs; k++) { 4143 i = def->args_ct[k].sort_index; 4144 arg = op->args[i]; 4145 arg_ct = &def->args_ct[i]; 4146 ts = arg_temp(arg); 4147 4148 /* ENV should not be modified. */ 4149 tcg_debug_assert(!temp_readonly(ts)); 4150 4151 switch (arg_ct->pair) { 4152 case 0: /* not paired */ 4153 if (arg_ct->oalias && !const_args[arg_ct->alias_index]) { 4154 reg = new_args[arg_ct->alias_index]; 4155 } else if (arg_ct->newreg) { 4156 reg = tcg_reg_alloc(s, arg_ct->regs, 4157 i_allocated_regs | o_allocated_regs, 4158 output_pref(op, k), ts->indirect_base); 4159 } else { 4160 reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs, 4161 output_pref(op, k), ts->indirect_base); 4162 } 4163 break; 4164 4165 case 1: /* first of pair */ 4166 tcg_debug_assert(!arg_ct->newreg); 4167 if (arg_ct->oalias) { 4168 reg = new_args[arg_ct->alias_index]; 4169 break; 4170 } 4171 reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs, 4172 output_pref(op, k), ts->indirect_base); 4173 break; 4174 4175 case 2: /* second of pair */ 4176 tcg_debug_assert(!arg_ct->newreg); 4177 if (arg_ct->oalias) { 4178 reg = new_args[arg_ct->alias_index]; 4179 } else { 4180 reg = new_args[arg_ct->pair_index] + 1; 4181 } 4182 break; 4183 4184 case 3: /* first of pair, aliasing with a second input */ 4185 tcg_debug_assert(!arg_ct->newreg); 4186 reg = new_args[arg_ct->pair_index] - 1; 4187 break; 4188 4189 default: 4190 g_assert_not_reached(); 4191 } 4192 tcg_regset_set_reg(o_allocated_regs, reg); 4193 set_temp_val_reg(s, ts, reg); 4194 ts->mem_coherent = 0; 4195 new_args[i] = reg; 4196 } 4197 } 4198 4199 /* emit instruction */ 4200 if (def->flags & TCG_OPF_VECTOR) { 4201 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op), 4202 new_args, const_args); 4203 } else { 4204 tcg_out_op(s, op->opc, new_args, const_args); 4205 } 4206 4207 /* move the outputs in the correct register if needed */ 4208 for(i = 0; i < nb_oargs; i++) { 4209 ts = arg_temp(op->args[i]); 4210 4211 /* ENV should not be modified. */ 4212 tcg_debug_assert(!temp_readonly(ts)); 4213 4214 if (NEED_SYNC_ARG(i)) { 4215 temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i)); 4216 } else if (IS_DEAD_ARG(i)) { 4217 temp_dead(s, ts); 4218 } 4219 } 4220 } 4221 4222 static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op) 4223 { 4224 const TCGLifeData arg_life = op->life; 4225 TCGTemp *ots, *itsl, *itsh; 4226 TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64; 4227 4228 /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */ 4229 tcg_debug_assert(TCG_TARGET_REG_BITS == 32); 4230 tcg_debug_assert(TCGOP_VECE(op) == MO_64); 4231 4232 ots = arg_temp(op->args[0]); 4233 itsl = arg_temp(op->args[1]); 4234 itsh = arg_temp(op->args[2]); 4235 4236 /* ENV should not be modified. */ 4237 tcg_debug_assert(!temp_readonly(ots)); 4238 4239 /* Allocate the output register now. */ 4240 if (ots->val_type != TEMP_VAL_REG) { 4241 TCGRegSet allocated_regs = s->reserved_regs; 4242 TCGRegSet dup_out_regs = 4243 tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs; 4244 TCGReg oreg; 4245 4246 /* Make sure to not spill the input registers. */ 4247 if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) { 4248 tcg_regset_set_reg(allocated_regs, itsl->reg); 4249 } 4250 if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) { 4251 tcg_regset_set_reg(allocated_regs, itsh->reg); 4252 } 4253 4254 oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs, 4255 output_pref(op, 0), ots->indirect_base); 4256 set_temp_val_reg(s, ots, oreg); 4257 } 4258 4259 /* Promote dup2 of immediates to dupi_vec. */ 4260 if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) { 4261 uint64_t val = deposit64(itsl->val, 32, 32, itsh->val); 4262 MemOp vece = MO_64; 4263 4264 if (val == dup_const(MO_8, val)) { 4265 vece = MO_8; 4266 } else if (val == dup_const(MO_16, val)) { 4267 vece = MO_16; 4268 } else if (val == dup_const(MO_32, val)) { 4269 vece = MO_32; 4270 } 4271 4272 tcg_out_dupi_vec(s, vtype, vece, ots->reg, val); 4273 goto done; 4274 } 4275 4276 /* If the two inputs form one 64-bit value, try dupm_vec. */ 4277 if (itsl->temp_subindex == HOST_BIG_ENDIAN && 4278 itsh->temp_subindex == !HOST_BIG_ENDIAN && 4279 itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) { 4280 TCGTemp *its = itsl - HOST_BIG_ENDIAN; 4281 4282 temp_sync(s, its + 0, s->reserved_regs, 0, 0); 4283 temp_sync(s, its + 1, s->reserved_regs, 0, 0); 4284 4285 if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg, 4286 its->mem_base->reg, its->mem_offset)) { 4287 goto done; 4288 } 4289 } 4290 4291 /* Fall back to generic expansion. */ 4292 return false; 4293 4294 done: 4295 ots->mem_coherent = 0; 4296 if (IS_DEAD_ARG(1)) { 4297 temp_dead(s, itsl); 4298 } 4299 if (IS_DEAD_ARG(2)) { 4300 temp_dead(s, itsh); 4301 } 4302 if (NEED_SYNC_ARG(0)) { 4303 temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0)); 4304 } else if (IS_DEAD_ARG(0)) { 4305 temp_dead(s, ots); 4306 } 4307 return true; 4308 } 4309 4310 static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts, 4311 TCGRegSet allocated_regs) 4312 { 4313 if (ts->val_type == TEMP_VAL_REG) { 4314 if (ts->reg != reg) { 4315 tcg_reg_free(s, reg, allocated_regs); 4316 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) { 4317 /* 4318 * Cross register class move not supported. Sync the 4319 * temp back to its slot and load from there. 4320 */ 4321 temp_sync(s, ts, allocated_regs, 0, 0); 4322 tcg_out_ld(s, ts->type, reg, 4323 ts->mem_base->reg, ts->mem_offset); 4324 } 4325 } 4326 } else { 4327 TCGRegSet arg_set = 0; 4328 4329 tcg_reg_free(s, reg, allocated_regs); 4330 tcg_regset_set_reg(arg_set, reg); 4331 temp_load(s, ts, arg_set, allocated_regs, 0); 4332 } 4333 } 4334 4335 static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts, 4336 TCGRegSet allocated_regs) 4337 { 4338 /* 4339 * When the destination is on the stack, load up the temp and store. 4340 * If there are many call-saved registers, the temp might live to 4341 * see another use; otherwise it'll be discarded. 4342 */ 4343 temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0); 4344 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, 4345 TCG_TARGET_CALL_STACK_OFFSET + 4346 stk_slot * sizeof(tcg_target_long)); 4347 } 4348 4349 static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l, 4350 TCGTemp *ts, TCGRegSet *allocated_regs) 4351 { 4352 if (REG_P(l)) { 4353 TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot]; 4354 load_arg_reg(s, reg, ts, *allocated_regs); 4355 tcg_regset_set_reg(*allocated_regs, reg); 4356 } else { 4357 load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs), 4358 ts, *allocated_regs); 4359 } 4360 } 4361 4362 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) 4363 { 4364 const int nb_oargs = TCGOP_CALLO(op); 4365 const int nb_iargs = TCGOP_CALLI(op); 4366 const TCGLifeData arg_life = op->life; 4367 const TCGHelperInfo *info = tcg_call_info(op); 4368 TCGRegSet allocated_regs = s->reserved_regs; 4369 int i; 4370 4371 /* 4372 * Move inputs into place in reverse order, 4373 * so that we place stacked arguments first. 4374 */ 4375 for (i = nb_iargs - 1; i >= 0; --i) { 4376 const TCGCallArgumentLoc *loc = &info->in[i]; 4377 TCGTemp *ts = arg_temp(op->args[nb_oargs + i]); 4378 4379 switch (loc->kind) { 4380 case TCG_CALL_ARG_NORMAL: 4381 case TCG_CALL_ARG_EXTEND_U: 4382 case TCG_CALL_ARG_EXTEND_S: 4383 load_arg_normal(s, loc, ts, &allocated_regs); 4384 break; 4385 default: 4386 g_assert_not_reached(); 4387 } 4388 } 4389 4390 /* Mark dead temporaries and free the associated registers. */ 4391 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { 4392 if (IS_DEAD_ARG(i)) { 4393 temp_dead(s, arg_temp(op->args[i])); 4394 } 4395 } 4396 4397 /* Clobber call registers. */ 4398 for (i = 0; i < TCG_TARGET_NB_REGS; i++) { 4399 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) { 4400 tcg_reg_free(s, i, allocated_regs); 4401 } 4402 } 4403 4404 /* 4405 * Save globals if they might be written by the helper, 4406 * sync them if they might be read. 4407 */ 4408 if (info->flags & TCG_CALL_NO_READ_GLOBALS) { 4409 /* Nothing to do */ 4410 } else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) { 4411 sync_globals(s, allocated_regs); 4412 } else { 4413 save_globals(s, allocated_regs); 4414 } 4415 4416 tcg_out_call(s, tcg_call_func(op), info); 4417 4418 /* Assign output registers and emit moves if needed. */ 4419 switch (info->out_kind) { 4420 case TCG_CALL_RET_NORMAL: 4421 for (i = 0; i < nb_oargs; i++) { 4422 TCGTemp *ts = arg_temp(op->args[i]); 4423 TCGReg reg = tcg_target_call_oarg_regs[i]; 4424 4425 /* ENV should not be modified. */ 4426 tcg_debug_assert(!temp_readonly(ts)); 4427 4428 set_temp_val_reg(s, ts, reg); 4429 ts->mem_coherent = 0; 4430 } 4431 break; 4432 default: 4433 g_assert_not_reached(); 4434 } 4435 4436 /* Flush or discard output registers as needed. */ 4437 for (i = 0; i < nb_oargs; i++) { 4438 TCGTemp *ts = arg_temp(op->args[i]); 4439 if (NEED_SYNC_ARG(i)) { 4440 temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i)); 4441 } else if (IS_DEAD_ARG(i)) { 4442 temp_dead(s, ts); 4443 } 4444 } 4445 } 4446 4447 #ifdef CONFIG_PROFILER 4448 4449 /* avoid copy/paste errors */ 4450 #define PROF_ADD(to, from, field) \ 4451 do { \ 4452 (to)->field += qatomic_read(&((from)->field)); \ 4453 } while (0) 4454 4455 #define PROF_MAX(to, from, field) \ 4456 do { \ 4457 typeof((from)->field) val__ = qatomic_read(&((from)->field)); \ 4458 if (val__ > (to)->field) { \ 4459 (to)->field = val__; \ 4460 } \ 4461 } while (0) 4462 4463 /* Pass in a zero'ed @prof */ 4464 static inline 4465 void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table) 4466 { 4467 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 4468 unsigned int i; 4469 4470 for (i = 0; i < n_ctxs; i++) { 4471 TCGContext *s = qatomic_read(&tcg_ctxs[i]); 4472 const TCGProfile *orig = &s->prof; 4473 4474 if (counters) { 4475 PROF_ADD(prof, orig, cpu_exec_time); 4476 PROF_ADD(prof, orig, tb_count1); 4477 PROF_ADD(prof, orig, tb_count); 4478 PROF_ADD(prof, orig, op_count); 4479 PROF_MAX(prof, orig, op_count_max); 4480 PROF_ADD(prof, orig, temp_count); 4481 PROF_MAX(prof, orig, temp_count_max); 4482 PROF_ADD(prof, orig, del_op_count); 4483 PROF_ADD(prof, orig, code_in_len); 4484 PROF_ADD(prof, orig, code_out_len); 4485 PROF_ADD(prof, orig, search_out_len); 4486 PROF_ADD(prof, orig, interm_time); 4487 PROF_ADD(prof, orig, code_time); 4488 PROF_ADD(prof, orig, la_time); 4489 PROF_ADD(prof, orig, opt_time); 4490 PROF_ADD(prof, orig, restore_count); 4491 PROF_ADD(prof, orig, restore_time); 4492 } 4493 if (table) { 4494 int i; 4495 4496 for (i = 0; i < NB_OPS; i++) { 4497 PROF_ADD(prof, orig, table_op_count[i]); 4498 } 4499 } 4500 } 4501 } 4502 4503 #undef PROF_ADD 4504 #undef PROF_MAX 4505 4506 static void tcg_profile_snapshot_counters(TCGProfile *prof) 4507 { 4508 tcg_profile_snapshot(prof, true, false); 4509 } 4510 4511 static void tcg_profile_snapshot_table(TCGProfile *prof) 4512 { 4513 tcg_profile_snapshot(prof, false, true); 4514 } 4515 4516 void tcg_dump_op_count(GString *buf) 4517 { 4518 TCGProfile prof = {}; 4519 int i; 4520 4521 tcg_profile_snapshot_table(&prof); 4522 for (i = 0; i < NB_OPS; i++) { 4523 g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name, 4524 prof.table_op_count[i]); 4525 } 4526 } 4527 4528 int64_t tcg_cpu_exec_time(void) 4529 { 4530 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 4531 unsigned int i; 4532 int64_t ret = 0; 4533 4534 for (i = 0; i < n_ctxs; i++) { 4535 const TCGContext *s = qatomic_read(&tcg_ctxs[i]); 4536 const TCGProfile *prof = &s->prof; 4537 4538 ret += qatomic_read(&prof->cpu_exec_time); 4539 } 4540 return ret; 4541 } 4542 #else 4543 void tcg_dump_op_count(GString *buf) 4544 { 4545 g_string_append_printf(buf, "[TCG profiler not compiled]\n"); 4546 } 4547 4548 int64_t tcg_cpu_exec_time(void) 4549 { 4550 error_report("%s: TCG profiler not compiled", __func__); 4551 exit(EXIT_FAILURE); 4552 } 4553 #endif 4554 4555 4556 int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start) 4557 { 4558 #ifdef CONFIG_PROFILER 4559 TCGProfile *prof = &s->prof; 4560 #endif 4561 int i, num_insns; 4562 TCGOp *op; 4563 4564 #ifdef CONFIG_PROFILER 4565 { 4566 int n = 0; 4567 4568 QTAILQ_FOREACH(op, &s->ops, link) { 4569 n++; 4570 } 4571 qatomic_set(&prof->op_count, prof->op_count + n); 4572 if (n > prof->op_count_max) { 4573 qatomic_set(&prof->op_count_max, n); 4574 } 4575 4576 n = s->nb_temps; 4577 qatomic_set(&prof->temp_count, prof->temp_count + n); 4578 if (n > prof->temp_count_max) { 4579 qatomic_set(&prof->temp_count_max, n); 4580 } 4581 } 4582 #endif 4583 4584 #ifdef DEBUG_DISAS 4585 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP) 4586 && qemu_log_in_addr_range(pc_start))) { 4587 FILE *logfile = qemu_log_trylock(); 4588 if (logfile) { 4589 fprintf(logfile, "OP:\n"); 4590 tcg_dump_ops(s, logfile, false); 4591 fprintf(logfile, "\n"); 4592 qemu_log_unlock(logfile); 4593 } 4594 } 4595 #endif 4596 4597 #ifdef CONFIG_DEBUG_TCG 4598 /* Ensure all labels referenced have been emitted. */ 4599 { 4600 TCGLabel *l; 4601 bool error = false; 4602 4603 QSIMPLEQ_FOREACH(l, &s->labels, next) { 4604 if (unlikely(!l->present) && l->refs) { 4605 qemu_log_mask(CPU_LOG_TB_OP, 4606 "$L%d referenced but not present.\n", l->id); 4607 error = true; 4608 } 4609 } 4610 assert(!error); 4611 } 4612 #endif 4613 4614 #ifdef CONFIG_PROFILER 4615 qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock()); 4616 #endif 4617 4618 #ifdef USE_TCG_OPTIMIZATIONS 4619 tcg_optimize(s); 4620 #endif 4621 4622 #ifdef CONFIG_PROFILER 4623 qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock()); 4624 qatomic_set(&prof->la_time, prof->la_time - profile_getclock()); 4625 #endif 4626 4627 reachable_code_pass(s); 4628 liveness_pass_1(s); 4629 4630 if (s->nb_indirects > 0) { 4631 #ifdef DEBUG_DISAS 4632 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND) 4633 && qemu_log_in_addr_range(pc_start))) { 4634 FILE *logfile = qemu_log_trylock(); 4635 if (logfile) { 4636 fprintf(logfile, "OP before indirect lowering:\n"); 4637 tcg_dump_ops(s, logfile, false); 4638 fprintf(logfile, "\n"); 4639 qemu_log_unlock(logfile); 4640 } 4641 } 4642 #endif 4643 /* Replace indirect temps with direct temps. */ 4644 if (liveness_pass_2(s)) { 4645 /* If changes were made, re-run liveness. */ 4646 liveness_pass_1(s); 4647 } 4648 } 4649 4650 #ifdef CONFIG_PROFILER 4651 qatomic_set(&prof->la_time, prof->la_time + profile_getclock()); 4652 #endif 4653 4654 #ifdef DEBUG_DISAS 4655 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT) 4656 && qemu_log_in_addr_range(pc_start))) { 4657 FILE *logfile = qemu_log_trylock(); 4658 if (logfile) { 4659 fprintf(logfile, "OP after optimization and liveness analysis:\n"); 4660 tcg_dump_ops(s, logfile, true); 4661 fprintf(logfile, "\n"); 4662 qemu_log_unlock(logfile); 4663 } 4664 } 4665 #endif 4666 4667 /* Initialize goto_tb jump offsets. */ 4668 tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID; 4669 tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID; 4670 tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID; 4671 tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID; 4672 4673 tcg_reg_alloc_start(s); 4674 4675 /* 4676 * Reset the buffer pointers when restarting after overflow. 4677 * TODO: Move this into translate-all.c with the rest of the 4678 * buffer management. Having only this done here is confusing. 4679 */ 4680 s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr); 4681 s->code_ptr = s->code_buf; 4682 4683 #ifdef TCG_TARGET_NEED_LDST_LABELS 4684 QSIMPLEQ_INIT(&s->ldst_labels); 4685 #endif 4686 #ifdef TCG_TARGET_NEED_POOL_LABELS 4687 s->pool_labels = NULL; 4688 #endif 4689 4690 num_insns = -1; 4691 QTAILQ_FOREACH(op, &s->ops, link) { 4692 TCGOpcode opc = op->opc; 4693 4694 #ifdef CONFIG_PROFILER 4695 qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1); 4696 #endif 4697 4698 switch (opc) { 4699 case INDEX_op_mov_i32: 4700 case INDEX_op_mov_i64: 4701 case INDEX_op_mov_vec: 4702 tcg_reg_alloc_mov(s, op); 4703 break; 4704 case INDEX_op_dup_vec: 4705 tcg_reg_alloc_dup(s, op); 4706 break; 4707 case INDEX_op_insn_start: 4708 if (num_insns >= 0) { 4709 size_t off = tcg_current_code_size(s); 4710 s->gen_insn_end_off[num_insns] = off; 4711 /* Assert that we do not overflow our stored offset. */ 4712 assert(s->gen_insn_end_off[num_insns] == off); 4713 } 4714 num_insns++; 4715 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { 4716 target_ulong a; 4717 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS 4718 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); 4719 #else 4720 a = op->args[i]; 4721 #endif 4722 s->gen_insn_data[num_insns][i] = a; 4723 } 4724 break; 4725 case INDEX_op_discard: 4726 temp_dead(s, arg_temp(op->args[0])); 4727 break; 4728 case INDEX_op_set_label: 4729 tcg_reg_alloc_bb_end(s, s->reserved_regs); 4730 tcg_out_label(s, arg_label(op->args[0])); 4731 break; 4732 case INDEX_op_call: 4733 tcg_reg_alloc_call(s, op); 4734 break; 4735 case INDEX_op_exit_tb: 4736 tcg_out_exit_tb(s, op->args[0]); 4737 break; 4738 case INDEX_op_goto_tb: 4739 tcg_out_goto_tb(s, op->args[0]); 4740 break; 4741 case INDEX_op_dup2_vec: 4742 if (tcg_reg_alloc_dup2(s, op)) { 4743 break; 4744 } 4745 /* fall through */ 4746 default: 4747 /* Sanity check that we've not introduced any unhandled opcodes. */ 4748 tcg_debug_assert(tcg_op_supported(opc)); 4749 /* Note: in order to speed up the code, it would be much 4750 faster to have specialized register allocator functions for 4751 some common argument patterns */ 4752 tcg_reg_alloc_op(s, op); 4753 break; 4754 } 4755 /* Test for (pending) buffer overflow. The assumption is that any 4756 one operation beginning below the high water mark cannot overrun 4757 the buffer completely. Thus we can test for overflow after 4758 generating code without having to check during generation. */ 4759 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { 4760 return -1; 4761 } 4762 /* Test for TB overflow, as seen by gen_insn_end_off. */ 4763 if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) { 4764 return -2; 4765 } 4766 } 4767 tcg_debug_assert(num_insns >= 0); 4768 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); 4769 4770 /* Generate TB finalization at the end of block */ 4771 #ifdef TCG_TARGET_NEED_LDST_LABELS 4772 i = tcg_out_ldst_finalize(s); 4773 if (i < 0) { 4774 return i; 4775 } 4776 #endif 4777 #ifdef TCG_TARGET_NEED_POOL_LABELS 4778 i = tcg_out_pool_finalize(s); 4779 if (i < 0) { 4780 return i; 4781 } 4782 #endif 4783 if (!tcg_resolve_relocs(s)) { 4784 return -2; 4785 } 4786 4787 #ifndef CONFIG_TCG_INTERPRETER 4788 /* flush instruction cache */ 4789 flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf), 4790 (uintptr_t)s->code_buf, 4791 tcg_ptr_byte_diff(s->code_ptr, s->code_buf)); 4792 #endif 4793 4794 return tcg_current_code_size(s); 4795 } 4796 4797 #ifdef CONFIG_PROFILER 4798 void tcg_dump_info(GString *buf) 4799 { 4800 TCGProfile prof = {}; 4801 const TCGProfile *s; 4802 int64_t tb_count; 4803 int64_t tb_div_count; 4804 int64_t tot; 4805 4806 tcg_profile_snapshot_counters(&prof); 4807 s = &prof; 4808 tb_count = s->tb_count; 4809 tb_div_count = tb_count ? tb_count : 1; 4810 tot = s->interm_time + s->code_time; 4811 4812 g_string_append_printf(buf, "JIT cycles %" PRId64 4813 " (%0.3f s at 2.4 GHz)\n", 4814 tot, tot / 2.4e9); 4815 g_string_append_printf(buf, "translated TBs %" PRId64 4816 " (aborted=%" PRId64 " %0.1f%%)\n", 4817 tb_count, s->tb_count1 - tb_count, 4818 (double)(s->tb_count1 - s->tb_count) 4819 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0); 4820 g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n", 4821 (double)s->op_count / tb_div_count, s->op_count_max); 4822 g_string_append_printf(buf, "deleted ops/TB %0.2f\n", 4823 (double)s->del_op_count / tb_div_count); 4824 g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n", 4825 (double)s->temp_count / tb_div_count, 4826 s->temp_count_max); 4827 g_string_append_printf(buf, "avg host code/TB %0.1f\n", 4828 (double)s->code_out_len / tb_div_count); 4829 g_string_append_printf(buf, "avg search data/TB %0.1f\n", 4830 (double)s->search_out_len / tb_div_count); 4831 4832 g_string_append_printf(buf, "cycles/op %0.1f\n", 4833 s->op_count ? (double)tot / s->op_count : 0); 4834 g_string_append_printf(buf, "cycles/in byte %0.1f\n", 4835 s->code_in_len ? (double)tot / s->code_in_len : 0); 4836 g_string_append_printf(buf, "cycles/out byte %0.1f\n", 4837 s->code_out_len ? (double)tot / s->code_out_len : 0); 4838 g_string_append_printf(buf, "cycles/search byte %0.1f\n", 4839 s->search_out_len ? 4840 (double)tot / s->search_out_len : 0); 4841 if (tot == 0) { 4842 tot = 1; 4843 } 4844 g_string_append_printf(buf, " gen_interm time %0.1f%%\n", 4845 (double)s->interm_time / tot * 100.0); 4846 g_string_append_printf(buf, " gen_code time %0.1f%%\n", 4847 (double)s->code_time / tot * 100.0); 4848 g_string_append_printf(buf, "optim./code time %0.1f%%\n", 4849 (double)s->opt_time / (s->code_time ? 4850 s->code_time : 1) 4851 * 100.0); 4852 g_string_append_printf(buf, "liveness/code time %0.1f%%\n", 4853 (double)s->la_time / (s->code_time ? 4854 s->code_time : 1) * 100.0); 4855 g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n", 4856 s->restore_count); 4857 g_string_append_printf(buf, " avg cycles %0.1f\n", 4858 s->restore_count ? 4859 (double)s->restore_time / s->restore_count : 0); 4860 } 4861 #else 4862 void tcg_dump_info(GString *buf) 4863 { 4864 g_string_append_printf(buf, "[TCG profiler not compiled]\n"); 4865 } 4866 #endif 4867 4868 #ifdef ELF_HOST_MACHINE 4869 /* In order to use this feature, the backend needs to do three things: 4870 4871 (1) Define ELF_HOST_MACHINE to indicate both what value to 4872 put into the ELF image and to indicate support for the feature. 4873 4874 (2) Define tcg_register_jit. This should create a buffer containing 4875 the contents of a .debug_frame section that describes the post- 4876 prologue unwind info for the tcg machine. 4877 4878 (3) Call tcg_register_jit_int, with the constructed .debug_frame. 4879 */ 4880 4881 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */ 4882 typedef enum { 4883 JIT_NOACTION = 0, 4884 JIT_REGISTER_FN, 4885 JIT_UNREGISTER_FN 4886 } jit_actions_t; 4887 4888 struct jit_code_entry { 4889 struct jit_code_entry *next_entry; 4890 struct jit_code_entry *prev_entry; 4891 const void *symfile_addr; 4892 uint64_t symfile_size; 4893 }; 4894 4895 struct jit_descriptor { 4896 uint32_t version; 4897 uint32_t action_flag; 4898 struct jit_code_entry *relevant_entry; 4899 struct jit_code_entry *first_entry; 4900 }; 4901 4902 void __jit_debug_register_code(void) __attribute__((noinline)); 4903 void __jit_debug_register_code(void) 4904 { 4905 asm(""); 4906 } 4907 4908 /* Must statically initialize the version, because GDB may check 4909 the version before we can set it. */ 4910 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 }; 4911 4912 /* End GDB interface. */ 4913 4914 static int find_string(const char *strtab, const char *str) 4915 { 4916 const char *p = strtab + 1; 4917 4918 while (1) { 4919 if (strcmp(p, str) == 0) { 4920 return p - strtab; 4921 } 4922 p += strlen(p) + 1; 4923 } 4924 } 4925 4926 static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size, 4927 const void *debug_frame, 4928 size_t debug_frame_size) 4929 { 4930 struct __attribute__((packed)) DebugInfo { 4931 uint32_t len; 4932 uint16_t version; 4933 uint32_t abbrev; 4934 uint8_t ptr_size; 4935 uint8_t cu_die; 4936 uint16_t cu_lang; 4937 uintptr_t cu_low_pc; 4938 uintptr_t cu_high_pc; 4939 uint8_t fn_die; 4940 char fn_name[16]; 4941 uintptr_t fn_low_pc; 4942 uintptr_t fn_high_pc; 4943 uint8_t cu_eoc; 4944 }; 4945 4946 struct ElfImage { 4947 ElfW(Ehdr) ehdr; 4948 ElfW(Phdr) phdr; 4949 ElfW(Shdr) shdr[7]; 4950 ElfW(Sym) sym[2]; 4951 struct DebugInfo di; 4952 uint8_t da[24]; 4953 char str[80]; 4954 }; 4955 4956 struct ElfImage *img; 4957 4958 static const struct ElfImage img_template = { 4959 .ehdr = { 4960 .e_ident[EI_MAG0] = ELFMAG0, 4961 .e_ident[EI_MAG1] = ELFMAG1, 4962 .e_ident[EI_MAG2] = ELFMAG2, 4963 .e_ident[EI_MAG3] = ELFMAG3, 4964 .e_ident[EI_CLASS] = ELF_CLASS, 4965 .e_ident[EI_DATA] = ELF_DATA, 4966 .e_ident[EI_VERSION] = EV_CURRENT, 4967 .e_type = ET_EXEC, 4968 .e_machine = ELF_HOST_MACHINE, 4969 .e_version = EV_CURRENT, 4970 .e_phoff = offsetof(struct ElfImage, phdr), 4971 .e_shoff = offsetof(struct ElfImage, shdr), 4972 .e_ehsize = sizeof(ElfW(Shdr)), 4973 .e_phentsize = sizeof(ElfW(Phdr)), 4974 .e_phnum = 1, 4975 .e_shentsize = sizeof(ElfW(Shdr)), 4976 .e_shnum = ARRAY_SIZE(img->shdr), 4977 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1, 4978 #ifdef ELF_HOST_FLAGS 4979 .e_flags = ELF_HOST_FLAGS, 4980 #endif 4981 #ifdef ELF_OSABI 4982 .e_ident[EI_OSABI] = ELF_OSABI, 4983 #endif 4984 }, 4985 .phdr = { 4986 .p_type = PT_LOAD, 4987 .p_flags = PF_X, 4988 }, 4989 .shdr = { 4990 [0] = { .sh_type = SHT_NULL }, 4991 /* Trick: The contents of code_gen_buffer are not present in 4992 this fake ELF file; that got allocated elsewhere. Therefore 4993 we mark .text as SHT_NOBITS (similar to .bss) so that readers 4994 will not look for contents. We can record any address. */ 4995 [1] = { /* .text */ 4996 .sh_type = SHT_NOBITS, 4997 .sh_flags = SHF_EXECINSTR | SHF_ALLOC, 4998 }, 4999 [2] = { /* .debug_info */ 5000 .sh_type = SHT_PROGBITS, 5001 .sh_offset = offsetof(struct ElfImage, di), 5002 .sh_size = sizeof(struct DebugInfo), 5003 }, 5004 [3] = { /* .debug_abbrev */ 5005 .sh_type = SHT_PROGBITS, 5006 .sh_offset = offsetof(struct ElfImage, da), 5007 .sh_size = sizeof(img->da), 5008 }, 5009 [4] = { /* .debug_frame */ 5010 .sh_type = SHT_PROGBITS, 5011 .sh_offset = sizeof(struct ElfImage), 5012 }, 5013 [5] = { /* .symtab */ 5014 .sh_type = SHT_SYMTAB, 5015 .sh_offset = offsetof(struct ElfImage, sym), 5016 .sh_size = sizeof(img->sym), 5017 .sh_info = 1, 5018 .sh_link = ARRAY_SIZE(img->shdr) - 1, 5019 .sh_entsize = sizeof(ElfW(Sym)), 5020 }, 5021 [6] = { /* .strtab */ 5022 .sh_type = SHT_STRTAB, 5023 .sh_offset = offsetof(struct ElfImage, str), 5024 .sh_size = sizeof(img->str), 5025 } 5026 }, 5027 .sym = { 5028 [1] = { /* code_gen_buffer */ 5029 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC), 5030 .st_shndx = 1, 5031 } 5032 }, 5033 .di = { 5034 .len = sizeof(struct DebugInfo) - 4, 5035 .version = 2, 5036 .ptr_size = sizeof(void *), 5037 .cu_die = 1, 5038 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */ 5039 .fn_die = 2, 5040 .fn_name = "code_gen_buffer" 5041 }, 5042 .da = { 5043 1, /* abbrev number (the cu) */ 5044 0x11, 1, /* DW_TAG_compile_unit, has children */ 5045 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */ 5046 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ 5047 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ 5048 0, 0, /* end of abbrev */ 5049 2, /* abbrev number (the fn) */ 5050 0x2e, 0, /* DW_TAG_subprogram, no children */ 5051 0x3, 0x8, /* DW_AT_name, DW_FORM_string */ 5052 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ 5053 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ 5054 0, 0, /* end of abbrev */ 5055 0 /* no more abbrev */ 5056 }, 5057 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0" 5058 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer", 5059 }; 5060 5061 /* We only need a single jit entry; statically allocate it. */ 5062 static struct jit_code_entry one_entry; 5063 5064 uintptr_t buf = (uintptr_t)buf_ptr; 5065 size_t img_size = sizeof(struct ElfImage) + debug_frame_size; 5066 DebugFrameHeader *dfh; 5067 5068 img = g_malloc(img_size); 5069 *img = img_template; 5070 5071 img->phdr.p_vaddr = buf; 5072 img->phdr.p_paddr = buf; 5073 img->phdr.p_memsz = buf_size; 5074 5075 img->shdr[1].sh_name = find_string(img->str, ".text"); 5076 img->shdr[1].sh_addr = buf; 5077 img->shdr[1].sh_size = buf_size; 5078 5079 img->shdr[2].sh_name = find_string(img->str, ".debug_info"); 5080 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev"); 5081 5082 img->shdr[4].sh_name = find_string(img->str, ".debug_frame"); 5083 img->shdr[4].sh_size = debug_frame_size; 5084 5085 img->shdr[5].sh_name = find_string(img->str, ".symtab"); 5086 img->shdr[6].sh_name = find_string(img->str, ".strtab"); 5087 5088 img->sym[1].st_name = find_string(img->str, "code_gen_buffer"); 5089 img->sym[1].st_value = buf; 5090 img->sym[1].st_size = buf_size; 5091 5092 img->di.cu_low_pc = buf; 5093 img->di.cu_high_pc = buf + buf_size; 5094 img->di.fn_low_pc = buf; 5095 img->di.fn_high_pc = buf + buf_size; 5096 5097 dfh = (DebugFrameHeader *)(img + 1); 5098 memcpy(dfh, debug_frame, debug_frame_size); 5099 dfh->fde.func_start = buf; 5100 dfh->fde.func_len = buf_size; 5101 5102 #ifdef DEBUG_JIT 5103 /* Enable this block to be able to debug the ELF image file creation. 5104 One can use readelf, objdump, or other inspection utilities. */ 5105 { 5106 g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir()); 5107 FILE *f = fopen(jit, "w+b"); 5108 if (f) { 5109 if (fwrite(img, img_size, 1, f) != img_size) { 5110 /* Avoid stupid unused return value warning for fwrite. */ 5111 } 5112 fclose(f); 5113 } 5114 } 5115 #endif 5116 5117 one_entry.symfile_addr = img; 5118 one_entry.symfile_size = img_size; 5119 5120 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN; 5121 __jit_debug_descriptor.relevant_entry = &one_entry; 5122 __jit_debug_descriptor.first_entry = &one_entry; 5123 __jit_debug_register_code(); 5124 } 5125 #else 5126 /* No support for the feature. Provide the entry point expected by exec.c, 5127 and implement the internal function we declared earlier. */ 5128 5129 static void tcg_register_jit_int(const void *buf, size_t size, 5130 const void *debug_frame, 5131 size_t debug_frame_size) 5132 { 5133 } 5134 5135 void tcg_register_jit(const void *buf, size_t buf_size) 5136 { 5137 } 5138 #endif /* ELF_HOST_MACHINE */ 5139 5140 #if !TCG_TARGET_MAYBE_vec 5141 void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...) 5142 { 5143 g_assert_not_reached(); 5144 } 5145 #endif 5146