1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2009, 2011 Stefan Weil 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25/* Used for function call generation. */ 26#define TCG_TARGET_CALL_STACK_OFFSET 0 27#define TCG_TARGET_STACK_ALIGN 8 28#if TCG_TARGET_REG_BITS == 32 29# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN 30# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN 31# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN 32#else 33# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 34# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 35# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL 36#endif 37#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL 38 39static TCGConstraintSetIndex 40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 41{ 42 switch (op) { 43 case INDEX_op_goto_ptr: 44 return C_O0_I1(r); 45 46 case INDEX_op_ld8u_i32: 47 case INDEX_op_ld8s_i32: 48 case INDEX_op_ld16u_i32: 49 case INDEX_op_ld16s_i32: 50 case INDEX_op_ld_i32: 51 case INDEX_op_ld8u_i64: 52 case INDEX_op_ld8s_i64: 53 case INDEX_op_ld16u_i64: 54 case INDEX_op_ld16s_i64: 55 case INDEX_op_ld32u_i64: 56 case INDEX_op_ld32s_i64: 57 case INDEX_op_ld_i64: 58 return C_O1_I1(r, r); 59 60 case INDEX_op_st8_i32: 61 case INDEX_op_st16_i32: 62 case INDEX_op_st_i32: 63 case INDEX_op_st8_i64: 64 case INDEX_op_st16_i64: 65 case INDEX_op_st32_i64: 66 case INDEX_op_st_i64: 67 return C_O0_I2(r, r); 68 69 case INDEX_op_qemu_ld_i32: 70 return C_O1_I1(r, r); 71 case INDEX_op_qemu_ld_i64: 72 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); 73 case INDEX_op_qemu_st_i32: 74 return C_O0_I2(r, r); 75 case INDEX_op_qemu_st_i64: 76 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); 77 78 default: 79 return C_NotImplemented; 80 } 81} 82 83static const int tcg_target_reg_alloc_order[] = { 84 TCG_REG_R4, 85 TCG_REG_R5, 86 TCG_REG_R6, 87 TCG_REG_R7, 88 TCG_REG_R8, 89 TCG_REG_R9, 90 TCG_REG_R10, 91 TCG_REG_R11, 92 TCG_REG_R12, 93 TCG_REG_R13, 94 TCG_REG_R14, 95 TCG_REG_R15, 96 /* Either 2 or 4 of these are call clobbered, so use them last. */ 97 TCG_REG_R3, 98 TCG_REG_R2, 99 TCG_REG_R1, 100 TCG_REG_R0, 101}; 102 103/* No call arguments via registers. All will be stored on the "stack". */ 104static const int tcg_target_call_iarg_regs[] = { }; 105 106static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 107{ 108 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 109 tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS); 110 return TCG_REG_R0 + slot; 111} 112 113#ifdef CONFIG_DEBUG_TCG 114static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 115 "r00", 116 "r01", 117 "r02", 118 "r03", 119 "r04", 120 "r05", 121 "r06", 122 "r07", 123 "r08", 124 "r09", 125 "r10", 126 "r11", 127 "r12", 128 "r13", 129 "r14", 130 "r15", 131}; 132#endif 133 134static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 135 intptr_t value, intptr_t addend) 136{ 137 intptr_t diff = value - (intptr_t)(code_ptr + 1); 138 139 tcg_debug_assert(addend == 0); 140 tcg_debug_assert(type == 20); 141 142 if (diff == sextract32(diff, 0, type)) { 143 tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff)); 144 return true; 145 } 146 return false; 147} 148 149static void stack_bounds_check(TCGReg base, intptr_t offset) 150{ 151 if (base == TCG_REG_CALL_STACK) { 152 tcg_debug_assert(offset >= 0); 153 tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE + 154 TCG_STATIC_FRAME_SIZE)); 155 } 156} 157 158static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0) 159{ 160 tcg_insn_unit insn = 0; 161 162 tcg_out_reloc(s, s->code_ptr, 20, l0, 0); 163 insn = deposit32(insn, 0, 8, op); 164 tcg_out32(s, insn); 165} 166 167static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0) 168{ 169 tcg_insn_unit insn = 0; 170 intptr_t diff; 171 172 /* Special case for exit_tb: map null -> 0. */ 173 if (p0 == NULL) { 174 diff = 0; 175 } else { 176 diff = p0 - (void *)(s->code_ptr + 1); 177 tcg_debug_assert(diff != 0); 178 if (diff != sextract32(diff, 0, 20)) { 179 tcg_raise_tb_overflow(s); 180 } 181 } 182 insn = deposit32(insn, 0, 8, op); 183 insn = deposit32(insn, 12, 20, diff); 184 tcg_out32(s, insn); 185} 186 187static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0) 188{ 189 tcg_insn_unit insn = 0; 190 191 insn = deposit32(insn, 0, 8, op); 192 insn = deposit32(insn, 8, 4, r0); 193 tcg_out32(s, insn); 194} 195 196static void tcg_out_op_v(TCGContext *s, TCGOpcode op) 197{ 198 tcg_out32(s, (uint8_t)op); 199} 200 201static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1) 202{ 203 tcg_insn_unit insn = 0; 204 205 tcg_debug_assert(i1 == sextract32(i1, 0, 20)); 206 insn = deposit32(insn, 0, 8, op); 207 insn = deposit32(insn, 8, 4, r0); 208 insn = deposit32(insn, 12, 20, i1); 209 tcg_out32(s, insn); 210} 211 212static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1) 213{ 214 tcg_insn_unit insn = 0; 215 216 tcg_out_reloc(s, s->code_ptr, 20, l1, 0); 217 insn = deposit32(insn, 0, 8, op); 218 insn = deposit32(insn, 8, 4, r0); 219 tcg_out32(s, insn); 220} 221 222static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1) 223{ 224 tcg_insn_unit insn = 0; 225 226 insn = deposit32(insn, 0, 8, op); 227 insn = deposit32(insn, 8, 4, r0); 228 insn = deposit32(insn, 12, 4, r1); 229 tcg_out32(s, insn); 230} 231 232static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op, 233 TCGReg r0, TCGReg r1, TCGArg m2) 234{ 235 tcg_insn_unit insn = 0; 236 237 tcg_debug_assert(m2 == extract32(m2, 0, 16)); 238 insn = deposit32(insn, 0, 8, op); 239 insn = deposit32(insn, 8, 4, r0); 240 insn = deposit32(insn, 12, 4, r1); 241 insn = deposit32(insn, 16, 16, m2); 242 tcg_out32(s, insn); 243} 244 245static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op, 246 TCGReg r0, TCGReg r1, TCGReg r2) 247{ 248 tcg_insn_unit insn = 0; 249 250 insn = deposit32(insn, 0, 8, op); 251 insn = deposit32(insn, 8, 4, r0); 252 insn = deposit32(insn, 12, 4, r1); 253 insn = deposit32(insn, 16, 4, r2); 254 tcg_out32(s, insn); 255} 256 257static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op, 258 TCGReg r0, TCGReg r1, intptr_t i2) 259{ 260 tcg_insn_unit insn = 0; 261 262 tcg_debug_assert(i2 == sextract32(i2, 0, 16)); 263 insn = deposit32(insn, 0, 8, op); 264 insn = deposit32(insn, 8, 4, r0); 265 insn = deposit32(insn, 12, 4, r1); 266 insn = deposit32(insn, 16, 16, i2); 267 tcg_out32(s, insn); 268} 269 270static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0, 271 TCGReg r1, uint8_t b2, uint8_t b3) 272{ 273 tcg_insn_unit insn = 0; 274 275 tcg_debug_assert(b2 == extract32(b2, 0, 6)); 276 tcg_debug_assert(b3 == extract32(b3, 0, 6)); 277 insn = deposit32(insn, 0, 8, op); 278 insn = deposit32(insn, 8, 4, r0); 279 insn = deposit32(insn, 12, 4, r1); 280 insn = deposit32(insn, 16, 6, b2); 281 insn = deposit32(insn, 22, 6, b3); 282 tcg_out32(s, insn); 283} 284 285static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op, 286 TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3) 287{ 288 tcg_insn_unit insn = 0; 289 290 insn = deposit32(insn, 0, 8, op); 291 insn = deposit32(insn, 8, 4, r0); 292 insn = deposit32(insn, 12, 4, r1); 293 insn = deposit32(insn, 16, 4, r2); 294 insn = deposit32(insn, 20, 4, c3); 295 tcg_out32(s, insn); 296} 297 298static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0, 299 TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4) 300{ 301 tcg_insn_unit insn = 0; 302 303 tcg_debug_assert(b3 == extract32(b3, 0, 6)); 304 tcg_debug_assert(b4 == extract32(b4, 0, 6)); 305 insn = deposit32(insn, 0, 8, op); 306 insn = deposit32(insn, 8, 4, r0); 307 insn = deposit32(insn, 12, 4, r1); 308 insn = deposit32(insn, 16, 4, r2); 309 insn = deposit32(insn, 20, 6, b3); 310 insn = deposit32(insn, 26, 6, b4); 311 tcg_out32(s, insn); 312} 313 314static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op, 315 TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3) 316{ 317 tcg_insn_unit insn = 0; 318 319 insn = deposit32(insn, 0, 8, op); 320 insn = deposit32(insn, 8, 4, r0); 321 insn = deposit32(insn, 12, 4, r1); 322 insn = deposit32(insn, 16, 4, r2); 323 insn = deposit32(insn, 20, 4, r3); 324 tcg_out32(s, insn); 325} 326 327static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op, 328 TCGReg r0, TCGReg r1, TCGReg r2, 329 TCGReg r3, TCGReg r4, TCGCond c5) 330{ 331 tcg_insn_unit insn = 0; 332 333 insn = deposit32(insn, 0, 8, op); 334 insn = deposit32(insn, 8, 4, r0); 335 insn = deposit32(insn, 12, 4, r1); 336 insn = deposit32(insn, 16, 4, r2); 337 insn = deposit32(insn, 20, 4, r3); 338 insn = deposit32(insn, 24, 4, r4); 339 insn = deposit32(insn, 28, 4, c5); 340 tcg_out32(s, insn); 341} 342 343static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, 344 TCGReg base, intptr_t offset) 345{ 346 stack_bounds_check(base, offset); 347 if (offset != sextract32(offset, 0, 16)) { 348 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 349 tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base); 350 base = TCG_REG_TMP; 351 offset = 0; 352 } 353 tcg_out_op_rrs(s, op, val, base, offset); 354} 355 356static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base, 357 intptr_t offset) 358{ 359 switch (type) { 360 case TCG_TYPE_I32: 361 tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset); 362 break; 363#if TCG_TARGET_REG_BITS == 64 364 case TCG_TYPE_I64: 365 tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset); 366 break; 367#endif 368 default: 369 g_assert_not_reached(); 370 } 371} 372 373static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 374{ 375 tcg_out_op_rr(s, INDEX_op_mov, ret, arg); 376 return true; 377} 378 379static void tcg_out_movi(TCGContext *s, TCGType type, 380 TCGReg ret, tcg_target_long arg) 381{ 382 switch (type) { 383 case TCG_TYPE_I32: 384#if TCG_TARGET_REG_BITS == 64 385 arg = (int32_t)arg; 386 /* fall through */ 387 case TCG_TYPE_I64: 388#endif 389 break; 390 default: 391 g_assert_not_reached(); 392 } 393 394 if (arg == sextract32(arg, 0, 20)) { 395 tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg); 396 } else { 397 tcg_insn_unit insn = 0; 398 399 new_pool_label(s, arg, 20, s->code_ptr, 0); 400 insn = deposit32(insn, 0, 8, INDEX_op_tci_movl); 401 insn = deposit32(insn, 8, 4, ret); 402 tcg_out32(s, insn); 403 } 404} 405 406static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd, 407 TCGReg rs, unsigned pos, unsigned len) 408{ 409 tcg_out_op_rrbb(s, INDEX_op_extract, rd, rs, pos, len); 410} 411 412static const TCGOutOpExtract outop_extract = { 413 .base.static_constraint = C_O1_I1(r, r), 414 .out_rr = tcg_out_extract, 415}; 416 417static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd, 418 TCGReg rs, unsigned pos, unsigned len) 419{ 420 tcg_out_op_rrbb(s, INDEX_op_sextract, rd, rs, pos, len); 421} 422 423static const TCGOutOpExtract outop_sextract = { 424 .base.static_constraint = C_O1_I1(r, r), 425 .out_rr = tcg_out_sextract, 426}; 427 428static const TCGOutOpExtract2 outop_extract2 = { 429 .base.static_constraint = C_NotImplemented, 430}; 431 432static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 433{ 434 tcg_out_sextract(s, type, rd, rs, 0, 8); 435} 436 437static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) 438{ 439 tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8); 440} 441 442static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 443{ 444 tcg_out_sextract(s, type, rd, rs, 0, 16); 445} 446 447static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) 448{ 449 tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16); 450} 451 452static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) 453{ 454 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 455 tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32); 456} 457 458static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) 459{ 460 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 461 tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32); 462} 463 464static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 465{ 466 tcg_out_ext32s(s, rd, rs); 467} 468 469static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 470{ 471 tcg_out_ext32u(s, rd, rs); 472} 473 474static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) 475{ 476 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 477 tcg_out_mov(s, TCG_TYPE_I32, rd, rs); 478} 479 480static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 481{ 482 return false; 483} 484 485static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 486 tcg_target_long imm) 487{ 488 /* This function is only used for passing structs by reference. */ 489 g_assert_not_reached(); 490} 491 492static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func, 493 const TCGHelperInfo *info) 494{ 495 ffi_cif *cif = info->cif; 496 tcg_insn_unit insn = 0; 497 uint8_t which; 498 499 if (cif->rtype == &ffi_type_void) { 500 which = 0; 501 } else { 502 tcg_debug_assert(cif->rtype->size == 4 || 503 cif->rtype->size == 8 || 504 cif->rtype->size == 16); 505 which = ctz32(cif->rtype->size) - 1; 506 } 507 new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif); 508 insn = deposit32(insn, 0, 8, INDEX_op_call); 509 insn = deposit32(insn, 8, 4, which); 510 tcg_out32(s, insn); 511} 512 513#if TCG_TARGET_REG_BITS == 64 514# define CASE_32_64(x) \ 515 case glue(glue(INDEX_op_, x), _i64): \ 516 case glue(glue(INDEX_op_, x), _i32): 517# define CASE_64(x) \ 518 case glue(glue(INDEX_op_, x), _i64): 519#else 520# define CASE_32_64(x) \ 521 case glue(glue(INDEX_op_, x), _i32): 522# define CASE_64(x) 523#endif 524 525static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 526{ 527 tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg); 528} 529 530static void tcg_out_goto_tb(TCGContext *s, int which) 531{ 532 /* indirect jump method. */ 533 tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which)); 534 set_jmp_reset_offset(s, which); 535} 536 537void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 538 uintptr_t jmp_rx, uintptr_t jmp_rw) 539{ 540 /* Always indirect, nothing to do */ 541} 542 543static void tgen_add(TCGContext *s, TCGType type, 544 TCGReg a0, TCGReg a1, TCGReg a2) 545{ 546 tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2); 547} 548 549static const TCGOutOpBinary outop_add = { 550 .base.static_constraint = C_O1_I2(r, r, r), 551 .out_rrr = tgen_add, 552}; 553 554static TCGConstraintSetIndex cset_addsubcarry(TCGType type, unsigned flags) 555{ 556 return type == TCG_TYPE_REG ? C_O1_I2(r, r, r) : C_NotImplemented; 557} 558 559static void tgen_addco(TCGContext *s, TCGType type, 560 TCGReg a0, TCGReg a1, TCGReg a2) 561{ 562 tcg_out_op_rrr(s, INDEX_op_addco, a0, a1, a2); 563} 564 565static const TCGOutOpBinary outop_addco = { 566 .base.static_constraint = C_Dynamic, 567 .base.dynamic_constraint = cset_addsubcarry, 568 .out_rrr = tgen_addco, 569}; 570 571static void tgen_addci(TCGContext *s, TCGType type, 572 TCGReg a0, TCGReg a1, TCGReg a2) 573{ 574 tcg_out_op_rrr(s, INDEX_op_addci, a0, a1, a2); 575} 576 577static const TCGOutOpAddSubCarry outop_addci = { 578 .base.static_constraint = C_Dynamic, 579 .base.dynamic_constraint = cset_addsubcarry, 580 .out_rrr = tgen_addci, 581}; 582 583static void tgen_addcio(TCGContext *s, TCGType type, 584 TCGReg a0, TCGReg a1, TCGReg a2) 585{ 586 tcg_out_op_rrr(s, INDEX_op_addcio, a0, a1, a2); 587} 588 589static const TCGOutOpBinary outop_addcio = { 590 .base.static_constraint = C_Dynamic, 591 .base.dynamic_constraint = cset_addsubcarry, 592 .out_rrr = tgen_addcio, 593}; 594 595static void tcg_out_set_carry(TCGContext *s) 596{ 597 tcg_out_op_v(s, INDEX_op_tci_setcarry); 598} 599 600static void tgen_and(TCGContext *s, TCGType type, 601 TCGReg a0, TCGReg a1, TCGReg a2) 602{ 603 tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2); 604} 605 606static const TCGOutOpBinary outop_and = { 607 .base.static_constraint = C_O1_I2(r, r, r), 608 .out_rrr = tgen_and, 609}; 610 611static void tgen_andc(TCGContext *s, TCGType type, 612 TCGReg a0, TCGReg a1, TCGReg a2) 613{ 614 tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2); 615} 616 617static const TCGOutOpBinary outop_andc = { 618 .base.static_constraint = C_O1_I2(r, r, r), 619 .out_rrr = tgen_andc, 620}; 621 622static void tgen_clz(TCGContext *s, TCGType type, 623 TCGReg a0, TCGReg a1, TCGReg a2) 624{ 625 TCGOpcode opc = (type == TCG_TYPE_I32 626 ? INDEX_op_tci_clz32 627 : INDEX_op_clz); 628 tcg_out_op_rrr(s, opc, a0, a1, a2); 629} 630 631static const TCGOutOpBinary outop_clz = { 632 .base.static_constraint = C_O1_I2(r, r, r), 633 .out_rrr = tgen_clz, 634}; 635 636static void tgen_ctz(TCGContext *s, TCGType type, 637 TCGReg a0, TCGReg a1, TCGReg a2) 638{ 639 TCGOpcode opc = (type == TCG_TYPE_I32 640 ? INDEX_op_tci_ctz32 641 : INDEX_op_ctz); 642 tcg_out_op_rrr(s, opc, a0, a1, a2); 643} 644 645static const TCGOutOpBinary outop_ctz = { 646 .base.static_constraint = C_O1_I2(r, r, r), 647 .out_rrr = tgen_ctz, 648}; 649 650static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, 651 TCGReg a2, unsigned ofs, unsigned len) 652{ 653 tcg_out_op_rrrbb(s, INDEX_op_deposit, a0, a1, a2, ofs, len); 654} 655 656static const TCGOutOpDeposit outop_deposit = { 657 .base.static_constraint = C_O1_I2(r, r, r), 658 .out_rrr = tgen_deposit, 659}; 660 661static void tgen_divs(TCGContext *s, TCGType type, 662 TCGReg a0, TCGReg a1, TCGReg a2) 663{ 664 TCGOpcode opc = (type == TCG_TYPE_I32 665 ? INDEX_op_tci_divs32 666 : INDEX_op_divs); 667 tcg_out_op_rrr(s, opc, a0, a1, a2); 668} 669 670static const TCGOutOpBinary outop_divs = { 671 .base.static_constraint = C_O1_I2(r, r, r), 672 .out_rrr = tgen_divs, 673}; 674 675static const TCGOutOpDivRem outop_divs2 = { 676 .base.static_constraint = C_NotImplemented, 677}; 678 679static void tgen_divu(TCGContext *s, TCGType type, 680 TCGReg a0, TCGReg a1, TCGReg a2) 681{ 682 TCGOpcode opc = (type == TCG_TYPE_I32 683 ? INDEX_op_tci_divu32 684 : INDEX_op_divu); 685 tcg_out_op_rrr(s, opc, a0, a1, a2); 686} 687 688static const TCGOutOpBinary outop_divu = { 689 .base.static_constraint = C_O1_I2(r, r, r), 690 .out_rrr = tgen_divu, 691}; 692 693static const TCGOutOpDivRem outop_divu2 = { 694 .base.static_constraint = C_NotImplemented, 695}; 696 697static void tgen_eqv(TCGContext *s, TCGType type, 698 TCGReg a0, TCGReg a1, TCGReg a2) 699{ 700 tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2); 701} 702 703static const TCGOutOpBinary outop_eqv = { 704 .base.static_constraint = C_O1_I2(r, r, r), 705 .out_rrr = tgen_eqv, 706}; 707 708#if TCG_TARGET_REG_BITS == 64 709static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) 710{ 711 tcg_out_extract(s, TCG_TYPE_I64, a0, a1, 32, 32); 712} 713 714static const TCGOutOpUnary outop_extrh_i64_i32 = { 715 .base.static_constraint = C_O1_I1(r, r), 716 .out_rr = tgen_extrh_i64_i32, 717}; 718#endif 719 720static void tgen_mul(TCGContext *s, TCGType type, 721 TCGReg a0, TCGReg a1, TCGReg a2) 722{ 723 tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2); 724} 725 726static const TCGOutOpBinary outop_mul = { 727 .base.static_constraint = C_O1_I2(r, r, r), 728 .out_rrr = tgen_mul, 729}; 730 731static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags) 732{ 733 return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented; 734} 735 736static void tgen_muls2(TCGContext *s, TCGType type, 737 TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) 738{ 739 tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3); 740} 741 742static const TCGOutOpMul2 outop_muls2 = { 743 .base.static_constraint = C_Dynamic, 744 .base.dynamic_constraint = cset_mul2, 745 .out_rrrr = tgen_muls2, 746}; 747 748static const TCGOutOpBinary outop_mulsh = { 749 .base.static_constraint = C_NotImplemented, 750}; 751 752static void tgen_mulu2(TCGContext *s, TCGType type, 753 TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) 754{ 755 tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3); 756} 757 758static const TCGOutOpMul2 outop_mulu2 = { 759 .base.static_constraint = C_Dynamic, 760 .base.dynamic_constraint = cset_mul2, 761 .out_rrrr = tgen_mulu2, 762}; 763 764static const TCGOutOpBinary outop_muluh = { 765 .base.static_constraint = C_NotImplemented, 766}; 767 768static void tgen_nand(TCGContext *s, TCGType type, 769 TCGReg a0, TCGReg a1, TCGReg a2) 770{ 771 tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2); 772} 773 774static const TCGOutOpBinary outop_nand = { 775 .base.static_constraint = C_O1_I2(r, r, r), 776 .out_rrr = tgen_nand, 777}; 778 779static void tgen_nor(TCGContext *s, TCGType type, 780 TCGReg a0, TCGReg a1, TCGReg a2) 781{ 782 tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2); 783} 784 785static const TCGOutOpBinary outop_nor = { 786 .base.static_constraint = C_O1_I2(r, r, r), 787 .out_rrr = tgen_nor, 788}; 789 790static void tgen_or(TCGContext *s, TCGType type, 791 TCGReg a0, TCGReg a1, TCGReg a2) 792{ 793 tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2); 794} 795 796static const TCGOutOpBinary outop_or = { 797 .base.static_constraint = C_O1_I2(r, r, r), 798 .out_rrr = tgen_or, 799}; 800 801static void tgen_orc(TCGContext *s, TCGType type, 802 TCGReg a0, TCGReg a1, TCGReg a2) 803{ 804 tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2); 805} 806 807static const TCGOutOpBinary outop_orc = { 808 .base.static_constraint = C_O1_I2(r, r, r), 809 .out_rrr = tgen_orc, 810}; 811 812static void tgen_rems(TCGContext *s, TCGType type, 813 TCGReg a0, TCGReg a1, TCGReg a2) 814{ 815 TCGOpcode opc = (type == TCG_TYPE_I32 816 ? INDEX_op_tci_rems32 817 : INDEX_op_rems); 818 tcg_out_op_rrr(s, opc, a0, a1, a2); 819} 820 821static const TCGOutOpBinary outop_rems = { 822 .base.static_constraint = C_O1_I2(r, r, r), 823 .out_rrr = tgen_rems, 824}; 825 826static void tgen_remu(TCGContext *s, TCGType type, 827 TCGReg a0, TCGReg a1, TCGReg a2) 828{ 829 TCGOpcode opc = (type == TCG_TYPE_I32 830 ? INDEX_op_tci_remu32 831 : INDEX_op_remu); 832 tcg_out_op_rrr(s, opc, a0, a1, a2); 833} 834 835static const TCGOutOpBinary outop_remu = { 836 .base.static_constraint = C_O1_I2(r, r, r), 837 .out_rrr = tgen_remu, 838}; 839 840static void tgen_rotl(TCGContext *s, TCGType type, 841 TCGReg a0, TCGReg a1, TCGReg a2) 842{ 843 TCGOpcode opc = (type == TCG_TYPE_I32 844 ? INDEX_op_tci_rotl32 845 : INDEX_op_rotl); 846 tcg_out_op_rrr(s, opc, a0, a1, a2); 847} 848 849static const TCGOutOpBinary outop_rotl = { 850 .base.static_constraint = C_O1_I2(r, r, r), 851 .out_rrr = tgen_rotl, 852}; 853 854static void tgen_rotr(TCGContext *s, TCGType type, 855 TCGReg a0, TCGReg a1, TCGReg a2) 856{ 857 TCGOpcode opc = (type == TCG_TYPE_I32 858 ? INDEX_op_tci_rotr32 859 : INDEX_op_rotr); 860 tcg_out_op_rrr(s, opc, a0, a1, a2); 861} 862 863static const TCGOutOpBinary outop_rotr = { 864 .base.static_constraint = C_O1_I2(r, r, r), 865 .out_rrr = tgen_rotr, 866}; 867 868static void tgen_sar(TCGContext *s, TCGType type, 869 TCGReg a0, TCGReg a1, TCGReg a2) 870{ 871 if (type < TCG_TYPE_REG) { 872 tcg_out_ext32s(s, TCG_REG_TMP, a1); 873 a1 = TCG_REG_TMP; 874 } 875 tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2); 876} 877 878static const TCGOutOpBinary outop_sar = { 879 .base.static_constraint = C_O1_I2(r, r, r), 880 .out_rrr = tgen_sar, 881}; 882 883static void tgen_shl(TCGContext *s, TCGType type, 884 TCGReg a0, TCGReg a1, TCGReg a2) 885{ 886 tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2); 887} 888 889static const TCGOutOpBinary outop_shl = { 890 .base.static_constraint = C_O1_I2(r, r, r), 891 .out_rrr = tgen_shl, 892}; 893 894static void tgen_shr(TCGContext *s, TCGType type, 895 TCGReg a0, TCGReg a1, TCGReg a2) 896{ 897 if (type < TCG_TYPE_REG) { 898 tcg_out_ext32u(s, TCG_REG_TMP, a1); 899 a1 = TCG_REG_TMP; 900 } 901 tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2); 902} 903 904static const TCGOutOpBinary outop_shr = { 905 .base.static_constraint = C_O1_I2(r, r, r), 906 .out_rrr = tgen_shr, 907}; 908 909static void tgen_sub(TCGContext *s, TCGType type, 910 TCGReg a0, TCGReg a1, TCGReg a2) 911{ 912 tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2); 913} 914 915static const TCGOutOpSubtract outop_sub = { 916 .base.static_constraint = C_O1_I2(r, r, r), 917 .out_rrr = tgen_sub, 918}; 919 920static void tgen_subbo(TCGContext *s, TCGType type, 921 TCGReg a0, TCGReg a1, TCGReg a2) 922{ 923 tcg_out_op_rrr(s, INDEX_op_subbo, a0, a1, a2); 924} 925 926static const TCGOutOpAddSubCarry outop_subbo = { 927 .base.static_constraint = C_Dynamic, 928 .base.dynamic_constraint = cset_addsubcarry, 929 .out_rrr = tgen_subbo, 930}; 931 932static void tgen_subbi(TCGContext *s, TCGType type, 933 TCGReg a0, TCGReg a1, TCGReg a2) 934{ 935 tcg_out_op_rrr(s, INDEX_op_subbi, a0, a1, a2); 936} 937 938static const TCGOutOpAddSubCarry outop_subbi = { 939 .base.static_constraint = C_Dynamic, 940 .base.dynamic_constraint = cset_addsubcarry, 941 .out_rrr = tgen_subbi, 942}; 943 944static void tgen_subbio(TCGContext *s, TCGType type, 945 TCGReg a0, TCGReg a1, TCGReg a2) 946{ 947 tcg_out_op_rrr(s, INDEX_op_subbio, a0, a1, a2); 948} 949 950static const TCGOutOpAddSubCarry outop_subbio = { 951 .base.static_constraint = C_Dynamic, 952 .base.dynamic_constraint = cset_addsubcarry, 953 .out_rrr = tgen_subbio, 954}; 955 956static void tcg_out_set_borrow(TCGContext *s) 957{ 958 tcg_out_op_v(s, INDEX_op_tci_setcarry); /* borrow == carry */ 959} 960 961static void tgen_xor(TCGContext *s, TCGType type, 962 TCGReg a0, TCGReg a1, TCGReg a2) 963{ 964 tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2); 965} 966 967static const TCGOutOpBinary outop_xor = { 968 .base.static_constraint = C_O1_I2(r, r, r), 969 .out_rrr = tgen_xor, 970}; 971 972static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 973{ 974 tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1); 975} 976 977static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) 978{ 979 return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented; 980} 981 982static const TCGOutOpUnary outop_ctpop = { 983 .base.static_constraint = C_Dynamic, 984 .base.dynamic_constraint = cset_ctpop, 985 .out_rr = tgen_ctpop, 986}; 987 988static void tgen_bswap16(TCGContext *s, TCGType type, 989 TCGReg a0, TCGReg a1, unsigned flags) 990{ 991 tcg_out_op_rr(s, INDEX_op_bswap16, a0, a1); 992 if (flags & TCG_BSWAP_OS) { 993 tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 16); 994 } 995} 996 997static const TCGOutOpBswap outop_bswap16 = { 998 .base.static_constraint = C_O1_I1(r, r), 999 .out_rr = tgen_bswap16, 1000}; 1001 1002static void tgen_bswap32(TCGContext *s, TCGType type, 1003 TCGReg a0, TCGReg a1, unsigned flags) 1004{ 1005 tcg_out_op_rr(s, INDEX_op_bswap32, a0, a1); 1006 if (flags & TCG_BSWAP_OS) { 1007 tcg_out_sextract(s, TCG_TYPE_REG, a0, a0, 0, 32); 1008 } 1009} 1010 1011static const TCGOutOpBswap outop_bswap32 = { 1012 .base.static_constraint = C_O1_I1(r, r), 1013 .out_rr = tgen_bswap32, 1014}; 1015 1016#if TCG_TARGET_REG_BITS == 64 1017static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 1018{ 1019 tcg_out_op_rr(s, INDEX_op_bswap64, a0, a1); 1020} 1021 1022static const TCGOutOpUnary outop_bswap64 = { 1023 .base.static_constraint = C_O1_I1(r, r), 1024 .out_rr = tgen_bswap64, 1025}; 1026#endif 1027 1028static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 1029{ 1030 tcg_out_op_rr(s, INDEX_op_neg, a0, a1); 1031} 1032 1033static const TCGOutOpUnary outop_neg = { 1034 .base.static_constraint = C_O1_I1(r, r), 1035 .out_rr = tgen_neg, 1036}; 1037 1038static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 1039{ 1040 tcg_out_op_rr(s, INDEX_op_not, a0, a1); 1041} 1042 1043static const TCGOutOpUnary outop_not = { 1044 .base.static_constraint = C_O1_I1(r, r), 1045 .out_rr = tgen_not, 1046}; 1047 1048static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, 1049 TCGReg dest, TCGReg arg1, TCGReg arg2) 1050{ 1051 TCGOpcode opc = (type == TCG_TYPE_I32 1052 ? INDEX_op_tci_setcond32 1053 : INDEX_op_setcond); 1054 tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond); 1055} 1056 1057static const TCGOutOpSetcond outop_setcond = { 1058 .base.static_constraint = C_O1_I2(r, r, r), 1059 .out_rrr = tgen_setcond, 1060}; 1061 1062static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, 1063 TCGReg dest, TCGReg arg1, TCGReg arg2) 1064{ 1065 tgen_setcond(s, type, cond, dest, arg1, arg2); 1066 tgen_neg(s, type, dest, dest); 1067} 1068 1069static const TCGOutOpSetcond outop_negsetcond = { 1070 .base.static_constraint = C_O1_I2(r, r, r), 1071 .out_rrr = tgen_negsetcond, 1072}; 1073 1074static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, 1075 TCGReg arg0, TCGReg arg1, TCGLabel *l) 1076{ 1077 tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1); 1078 tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l); 1079} 1080 1081static const TCGOutOpBrcond outop_brcond = { 1082 .base.static_constraint = C_O0_I2(r, r), 1083 .out_rr = tgen_brcond, 1084}; 1085 1086static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, 1087 TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, 1088 TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf) 1089{ 1090 TCGOpcode opc = (type == TCG_TYPE_I32 1091 ? INDEX_op_tci_movcond32 1092 : INDEX_op_movcond); 1093 tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond); 1094} 1095 1096static const TCGOutOpMovcond outop_movcond = { 1097 .base.static_constraint = C_O1_I4(r, r, r, r, r), 1098 .out = tgen_movcond, 1099}; 1100 1101static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, 1102 TCGArg bl, bool const_bl, 1103 TCGArg bh, bool const_bh, TCGLabel *l) 1104{ 1105 tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, 1106 al, ah, bl, bh, cond); 1107 tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l); 1108} 1109 1110#if TCG_TARGET_REG_BITS != 32 1111__attribute__((unused)) 1112#endif 1113static const TCGOutOpBrcond2 outop_brcond2 = { 1114 .base.static_constraint = C_O0_I4(r, r, r, r), 1115 .out = tgen_brcond2, 1116}; 1117 1118static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, 1119 TCGReg al, TCGReg ah, 1120 TCGArg bl, bool const_bl, 1121 TCGArg bh, bool const_bh) 1122{ 1123 tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, ret, al, ah, bl, bh, cond); 1124} 1125 1126#if TCG_TARGET_REG_BITS != 32 1127__attribute__((unused)) 1128#endif 1129static const TCGOutOpSetcond2 outop_setcond2 = { 1130 .base.static_constraint = C_O1_I4(r, r, r, r, r), 1131 .out = tgen_setcond2, 1132}; 1133 1134static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 1135 const TCGArg args[TCG_MAX_OP_ARGS], 1136 const int const_args[TCG_MAX_OP_ARGS]) 1137{ 1138 switch (opc) { 1139 case INDEX_op_goto_ptr: 1140 tcg_out_op_r(s, opc, args[0]); 1141 break; 1142 1143 case INDEX_op_br: 1144 tcg_out_op_l(s, opc, arg_label(args[0])); 1145 break; 1146 1147 CASE_32_64(ld8u) 1148 CASE_32_64(ld8s) 1149 CASE_32_64(ld16u) 1150 CASE_32_64(ld16s) 1151 case INDEX_op_ld_i32: 1152 CASE_64(ld32u) 1153 CASE_64(ld32s) 1154 CASE_64(ld) 1155 CASE_32_64(st8) 1156 CASE_32_64(st16) 1157 case INDEX_op_st_i32: 1158 CASE_64(st32) 1159 CASE_64(st) 1160 tcg_out_ldst(s, opc, args[0], args[1], args[2]); 1161 break; 1162 1163 case INDEX_op_qemu_ld_i64: 1164 case INDEX_op_qemu_st_i64: 1165 if (TCG_TARGET_REG_BITS == 32) { 1166 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]); 1167 tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP); 1168 break; 1169 } 1170 /* fall through */ 1171 case INDEX_op_qemu_ld_i32: 1172 case INDEX_op_qemu_st_i32: 1173 if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) { 1174 tcg_out_ext32u(s, TCG_REG_TMP, args[1]); 1175 tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]); 1176 } else { 1177 tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); 1178 } 1179 break; 1180 1181 case INDEX_op_mb: 1182 tcg_out_op_v(s, opc); 1183 break; 1184 1185 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1186 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1187 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1188 default: 1189 g_assert_not_reached(); 1190 } 1191} 1192 1193static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, 1194 intptr_t offset) 1195{ 1196 switch (type) { 1197 case TCG_TYPE_I32: 1198 tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset); 1199 break; 1200#if TCG_TARGET_REG_BITS == 64 1201 case TCG_TYPE_I64: 1202 tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset); 1203 break; 1204#endif 1205 default: 1206 g_assert_not_reached(); 1207 } 1208} 1209 1210static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 1211 TCGReg base, intptr_t ofs) 1212{ 1213 return false; 1214} 1215 1216/* Test if a constant matches the constraint. */ 1217static bool tcg_target_const_match(int64_t val, int ct, 1218 TCGType type, TCGCond cond, int vece) 1219{ 1220 return ct & TCG_CT_CONST; 1221} 1222 1223static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 1224{ 1225 memset(p, 0, sizeof(*p) * count); 1226} 1227 1228static void tcg_target_init(TCGContext *s) 1229{ 1230 /* The current code uses uint8_t for tcg operations. */ 1231 tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX); 1232 1233 /* Registers available for 32 bit operations. */ 1234 tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1; 1235 /* Registers available for 64 bit operations. */ 1236 tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1; 1237 /* 1238 * The interpreter "registers" are in the local stack frame and 1239 * cannot be clobbered by the called helper functions. However, 1240 * the interpreter assumes a 128-bit return value and assigns to 1241 * the return value registers. 1242 */ 1243 tcg_target_call_clobber_regs = 1244 MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS); 1245 1246 s->reserved_regs = 0; 1247 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 1248 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 1249 1250 /* The call arguments come first, followed by the temp storage. */ 1251 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 1252 TCG_STATIC_FRAME_SIZE); 1253} 1254 1255/* Generate global QEMU prologue and epilogue code. */ 1256static inline void tcg_target_qemu_prologue(TCGContext *s) 1257{ 1258} 1259 1260static void tcg_out_tb_start(TCGContext *s) 1261{ 1262 /* nothing to do */ 1263} 1264 1265bool tcg_target_has_memory_bswap(MemOp memop) 1266{ 1267 return true; 1268} 1269 1270static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1271{ 1272 g_assert_not_reached(); 1273} 1274 1275static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1276{ 1277 g_assert_not_reached(); 1278} 1279