1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2009, 2011 Stefan Weil 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25/* Used for function call generation. */ 26#define TCG_TARGET_CALL_STACK_OFFSET 0 27#define TCG_TARGET_STACK_ALIGN 8 28#if TCG_TARGET_REG_BITS == 32 29# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN 30# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN 31# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN 32#else 33# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 34# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 35# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL 36#endif 37#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL 38 39static TCGConstraintSetIndex 40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 41{ 42 switch (op) { 43 case INDEX_op_goto_ptr: 44 return C_O0_I1(r); 45 46 case INDEX_op_ld8u_i32: 47 case INDEX_op_ld8s_i32: 48 case INDEX_op_ld16u_i32: 49 case INDEX_op_ld16s_i32: 50 case INDEX_op_ld_i32: 51 case INDEX_op_ld8u_i64: 52 case INDEX_op_ld8s_i64: 53 case INDEX_op_ld16u_i64: 54 case INDEX_op_ld16s_i64: 55 case INDEX_op_ld32u_i64: 56 case INDEX_op_ld32s_i64: 57 case INDEX_op_ld_i64: 58 case INDEX_op_ext_i32_i64: 59 case INDEX_op_extu_i32_i64: 60 case INDEX_op_bswap16_i32: 61 case INDEX_op_bswap16_i64: 62 case INDEX_op_bswap32_i32: 63 case INDEX_op_bswap32_i64: 64 case INDEX_op_bswap64_i64: 65 case INDEX_op_extract_i32: 66 case INDEX_op_extract_i64: 67 case INDEX_op_sextract_i32: 68 case INDEX_op_sextract_i64: 69 return C_O1_I1(r, r); 70 71 case INDEX_op_st8_i32: 72 case INDEX_op_st16_i32: 73 case INDEX_op_st_i32: 74 case INDEX_op_st8_i64: 75 case INDEX_op_st16_i64: 76 case INDEX_op_st32_i64: 77 case INDEX_op_st_i64: 78 return C_O0_I2(r, r); 79 80 case INDEX_op_deposit_i32: 81 case INDEX_op_deposit_i64: 82 return C_O1_I2(r, r, r); 83 84 case INDEX_op_brcond_i32: 85 case INDEX_op_brcond_i64: 86 return C_O0_I2(r, r); 87 88 case INDEX_op_add2_i32: 89 case INDEX_op_add2_i64: 90 case INDEX_op_sub2_i32: 91 case INDEX_op_sub2_i64: 92 return C_O2_I4(r, r, r, r, r, r); 93 94#if TCG_TARGET_REG_BITS == 32 95 case INDEX_op_brcond2_i32: 96 return C_O0_I4(r, r, r, r); 97#endif 98 99 case INDEX_op_movcond_i32: 100 case INDEX_op_movcond_i64: 101 case INDEX_op_setcond2_i32: 102 return C_O1_I4(r, r, r, r, r); 103 104 case INDEX_op_qemu_ld_i32: 105 return C_O1_I1(r, r); 106 case INDEX_op_qemu_ld_i64: 107 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); 108 case INDEX_op_qemu_st_i32: 109 return C_O0_I2(r, r); 110 case INDEX_op_qemu_st_i64: 111 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); 112 113 default: 114 return C_NotImplemented; 115 } 116} 117 118static const int tcg_target_reg_alloc_order[] = { 119 TCG_REG_R4, 120 TCG_REG_R5, 121 TCG_REG_R6, 122 TCG_REG_R7, 123 TCG_REG_R8, 124 TCG_REG_R9, 125 TCG_REG_R10, 126 TCG_REG_R11, 127 TCG_REG_R12, 128 TCG_REG_R13, 129 TCG_REG_R14, 130 TCG_REG_R15, 131 /* Either 2 or 4 of these are call clobbered, so use them last. */ 132 TCG_REG_R3, 133 TCG_REG_R2, 134 TCG_REG_R1, 135 TCG_REG_R0, 136}; 137 138/* No call arguments via registers. All will be stored on the "stack". */ 139static const int tcg_target_call_iarg_regs[] = { }; 140 141static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 142{ 143 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 144 tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS); 145 return TCG_REG_R0 + slot; 146} 147 148#ifdef CONFIG_DEBUG_TCG 149static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 150 "r00", 151 "r01", 152 "r02", 153 "r03", 154 "r04", 155 "r05", 156 "r06", 157 "r07", 158 "r08", 159 "r09", 160 "r10", 161 "r11", 162 "r12", 163 "r13", 164 "r14", 165 "r15", 166}; 167#endif 168 169static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 170 intptr_t value, intptr_t addend) 171{ 172 intptr_t diff = value - (intptr_t)(code_ptr + 1); 173 174 tcg_debug_assert(addend == 0); 175 tcg_debug_assert(type == 20); 176 177 if (diff == sextract32(diff, 0, type)) { 178 tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff)); 179 return true; 180 } 181 return false; 182} 183 184static void stack_bounds_check(TCGReg base, intptr_t offset) 185{ 186 if (base == TCG_REG_CALL_STACK) { 187 tcg_debug_assert(offset >= 0); 188 tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE + 189 TCG_STATIC_FRAME_SIZE)); 190 } 191} 192 193static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0) 194{ 195 tcg_insn_unit insn = 0; 196 197 tcg_out_reloc(s, s->code_ptr, 20, l0, 0); 198 insn = deposit32(insn, 0, 8, op); 199 tcg_out32(s, insn); 200} 201 202static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0) 203{ 204 tcg_insn_unit insn = 0; 205 intptr_t diff; 206 207 /* Special case for exit_tb: map null -> 0. */ 208 if (p0 == NULL) { 209 diff = 0; 210 } else { 211 diff = p0 - (void *)(s->code_ptr + 1); 212 tcg_debug_assert(diff != 0); 213 if (diff != sextract32(diff, 0, 20)) { 214 tcg_raise_tb_overflow(s); 215 } 216 } 217 insn = deposit32(insn, 0, 8, op); 218 insn = deposit32(insn, 12, 20, diff); 219 tcg_out32(s, insn); 220} 221 222static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0) 223{ 224 tcg_insn_unit insn = 0; 225 226 insn = deposit32(insn, 0, 8, op); 227 insn = deposit32(insn, 8, 4, r0); 228 tcg_out32(s, insn); 229} 230 231static void tcg_out_op_v(TCGContext *s, TCGOpcode op) 232{ 233 tcg_out32(s, (uint8_t)op); 234} 235 236static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1) 237{ 238 tcg_insn_unit insn = 0; 239 240 tcg_debug_assert(i1 == sextract32(i1, 0, 20)); 241 insn = deposit32(insn, 0, 8, op); 242 insn = deposit32(insn, 8, 4, r0); 243 insn = deposit32(insn, 12, 20, i1); 244 tcg_out32(s, insn); 245} 246 247static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1) 248{ 249 tcg_insn_unit insn = 0; 250 251 tcg_out_reloc(s, s->code_ptr, 20, l1, 0); 252 insn = deposit32(insn, 0, 8, op); 253 insn = deposit32(insn, 8, 4, r0); 254 tcg_out32(s, insn); 255} 256 257static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1) 258{ 259 tcg_insn_unit insn = 0; 260 261 insn = deposit32(insn, 0, 8, op); 262 insn = deposit32(insn, 8, 4, r0); 263 insn = deposit32(insn, 12, 4, r1); 264 tcg_out32(s, insn); 265} 266 267static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op, 268 TCGReg r0, TCGReg r1, TCGArg m2) 269{ 270 tcg_insn_unit insn = 0; 271 272 tcg_debug_assert(m2 == extract32(m2, 0, 16)); 273 insn = deposit32(insn, 0, 8, op); 274 insn = deposit32(insn, 8, 4, r0); 275 insn = deposit32(insn, 12, 4, r1); 276 insn = deposit32(insn, 16, 16, m2); 277 tcg_out32(s, insn); 278} 279 280static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op, 281 TCGReg r0, TCGReg r1, TCGReg r2) 282{ 283 tcg_insn_unit insn = 0; 284 285 insn = deposit32(insn, 0, 8, op); 286 insn = deposit32(insn, 8, 4, r0); 287 insn = deposit32(insn, 12, 4, r1); 288 insn = deposit32(insn, 16, 4, r2); 289 tcg_out32(s, insn); 290} 291 292static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op, 293 TCGReg r0, TCGReg r1, intptr_t i2) 294{ 295 tcg_insn_unit insn = 0; 296 297 tcg_debug_assert(i2 == sextract32(i2, 0, 16)); 298 insn = deposit32(insn, 0, 8, op); 299 insn = deposit32(insn, 8, 4, r0); 300 insn = deposit32(insn, 12, 4, r1); 301 insn = deposit32(insn, 16, 16, i2); 302 tcg_out32(s, insn); 303} 304 305static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0, 306 TCGReg r1, uint8_t b2, uint8_t b3) 307{ 308 tcg_insn_unit insn = 0; 309 310 tcg_debug_assert(b2 == extract32(b2, 0, 6)); 311 tcg_debug_assert(b3 == extract32(b3, 0, 6)); 312 insn = deposit32(insn, 0, 8, op); 313 insn = deposit32(insn, 8, 4, r0); 314 insn = deposit32(insn, 12, 4, r1); 315 insn = deposit32(insn, 16, 6, b2); 316 insn = deposit32(insn, 22, 6, b3); 317 tcg_out32(s, insn); 318} 319 320static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op, 321 TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3) 322{ 323 tcg_insn_unit insn = 0; 324 325 insn = deposit32(insn, 0, 8, op); 326 insn = deposit32(insn, 8, 4, r0); 327 insn = deposit32(insn, 12, 4, r1); 328 insn = deposit32(insn, 16, 4, r2); 329 insn = deposit32(insn, 20, 4, c3); 330 tcg_out32(s, insn); 331} 332 333static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0, 334 TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4) 335{ 336 tcg_insn_unit insn = 0; 337 338 tcg_debug_assert(b3 == extract32(b3, 0, 6)); 339 tcg_debug_assert(b4 == extract32(b4, 0, 6)); 340 insn = deposit32(insn, 0, 8, op); 341 insn = deposit32(insn, 8, 4, r0); 342 insn = deposit32(insn, 12, 4, r1); 343 insn = deposit32(insn, 16, 4, r2); 344 insn = deposit32(insn, 20, 6, b3); 345 insn = deposit32(insn, 26, 6, b4); 346 tcg_out32(s, insn); 347} 348 349static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op, 350 TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3) 351{ 352 tcg_insn_unit insn = 0; 353 354 insn = deposit32(insn, 0, 8, op); 355 insn = deposit32(insn, 8, 4, r0); 356 insn = deposit32(insn, 12, 4, r1); 357 insn = deposit32(insn, 16, 4, r2); 358 insn = deposit32(insn, 20, 4, r3); 359 tcg_out32(s, insn); 360} 361 362static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op, 363 TCGReg r0, TCGReg r1, TCGReg r2, 364 TCGReg r3, TCGReg r4, TCGCond c5) 365{ 366 tcg_insn_unit insn = 0; 367 368 insn = deposit32(insn, 0, 8, op); 369 insn = deposit32(insn, 8, 4, r0); 370 insn = deposit32(insn, 12, 4, r1); 371 insn = deposit32(insn, 16, 4, r2); 372 insn = deposit32(insn, 20, 4, r3); 373 insn = deposit32(insn, 24, 4, r4); 374 insn = deposit32(insn, 28, 4, c5); 375 tcg_out32(s, insn); 376} 377 378static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op, 379 TCGReg r0, TCGReg r1, TCGReg r2, 380 TCGReg r3, TCGReg r4, TCGReg r5) 381{ 382 tcg_insn_unit insn = 0; 383 384 insn = deposit32(insn, 0, 8, op); 385 insn = deposit32(insn, 8, 4, r0); 386 insn = deposit32(insn, 12, 4, r1); 387 insn = deposit32(insn, 16, 4, r2); 388 insn = deposit32(insn, 20, 4, r3); 389 insn = deposit32(insn, 24, 4, r4); 390 insn = deposit32(insn, 28, 4, r5); 391 tcg_out32(s, insn); 392} 393 394static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, 395 TCGReg base, intptr_t offset) 396{ 397 stack_bounds_check(base, offset); 398 if (offset != sextract32(offset, 0, 16)) { 399 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 400 tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base); 401 base = TCG_REG_TMP; 402 offset = 0; 403 } 404 tcg_out_op_rrs(s, op, val, base, offset); 405} 406 407static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base, 408 intptr_t offset) 409{ 410 switch (type) { 411 case TCG_TYPE_I32: 412 tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset); 413 break; 414#if TCG_TARGET_REG_BITS == 64 415 case TCG_TYPE_I64: 416 tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset); 417 break; 418#endif 419 default: 420 g_assert_not_reached(); 421 } 422} 423 424static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 425{ 426 tcg_out_op_rr(s, INDEX_op_mov, ret, arg); 427 return true; 428} 429 430static void tcg_out_movi(TCGContext *s, TCGType type, 431 TCGReg ret, tcg_target_long arg) 432{ 433 switch (type) { 434 case TCG_TYPE_I32: 435#if TCG_TARGET_REG_BITS == 64 436 arg = (int32_t)arg; 437 /* fall through */ 438 case TCG_TYPE_I64: 439#endif 440 break; 441 default: 442 g_assert_not_reached(); 443 } 444 445 if (arg == sextract32(arg, 0, 20)) { 446 tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg); 447 } else { 448 tcg_insn_unit insn = 0; 449 450 new_pool_label(s, arg, 20, s->code_ptr, 0); 451 insn = deposit32(insn, 0, 8, INDEX_op_tci_movl); 452 insn = deposit32(insn, 8, 4, ret); 453 tcg_out32(s, insn); 454 } 455} 456 457static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd, 458 TCGReg rs, unsigned pos, unsigned len) 459{ 460 TCGOpcode opc = type == TCG_TYPE_I32 ? 461 INDEX_op_extract_i32 : 462 INDEX_op_extract_i64; 463 tcg_out_op_rrbb(s, opc, rd, rs, pos, len); 464} 465 466static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd, 467 TCGReg rs, unsigned pos, unsigned len) 468{ 469 TCGOpcode opc = type == TCG_TYPE_I32 ? 470 INDEX_op_sextract_i32 : 471 INDEX_op_sextract_i64; 472 tcg_out_op_rrbb(s, opc, rd, rs, pos, len); 473} 474 475static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 476{ 477 tcg_out_sextract(s, type, rd, rs, 0, 8); 478} 479 480static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) 481{ 482 tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8); 483} 484 485static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 486{ 487 tcg_out_sextract(s, type, rd, rs, 0, 16); 488} 489 490static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) 491{ 492 tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16); 493} 494 495static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) 496{ 497 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 498 tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32); 499} 500 501static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) 502{ 503 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 504 tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32); 505} 506 507static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 508{ 509 tcg_out_ext32s(s, rd, rs); 510} 511 512static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 513{ 514 tcg_out_ext32u(s, rd, rs); 515} 516 517static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) 518{ 519 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 520 tcg_out_mov(s, TCG_TYPE_I32, rd, rs); 521} 522 523static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 524{ 525 return false; 526} 527 528static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 529 tcg_target_long imm) 530{ 531 /* This function is only used for passing structs by reference. */ 532 g_assert_not_reached(); 533} 534 535static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func, 536 const TCGHelperInfo *info) 537{ 538 ffi_cif *cif = info->cif; 539 tcg_insn_unit insn = 0; 540 uint8_t which; 541 542 if (cif->rtype == &ffi_type_void) { 543 which = 0; 544 } else { 545 tcg_debug_assert(cif->rtype->size == 4 || 546 cif->rtype->size == 8 || 547 cif->rtype->size == 16); 548 which = ctz32(cif->rtype->size) - 1; 549 } 550 new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif); 551 insn = deposit32(insn, 0, 8, INDEX_op_call); 552 insn = deposit32(insn, 8, 4, which); 553 tcg_out32(s, insn); 554} 555 556#if TCG_TARGET_REG_BITS == 64 557# define CASE_32_64(x) \ 558 case glue(glue(INDEX_op_, x), _i64): \ 559 case glue(glue(INDEX_op_, x), _i32): 560# define CASE_64(x) \ 561 case glue(glue(INDEX_op_, x), _i64): 562#else 563# define CASE_32_64(x) \ 564 case glue(glue(INDEX_op_, x), _i32): 565# define CASE_64(x) 566#endif 567 568static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 569{ 570 tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg); 571} 572 573static void tcg_out_goto_tb(TCGContext *s, int which) 574{ 575 /* indirect jump method. */ 576 tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which)); 577 set_jmp_reset_offset(s, which); 578} 579 580void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 581 uintptr_t jmp_rx, uintptr_t jmp_rw) 582{ 583 /* Always indirect, nothing to do */ 584} 585 586static void tgen_add(TCGContext *s, TCGType type, 587 TCGReg a0, TCGReg a1, TCGReg a2) 588{ 589 tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2); 590} 591 592static const TCGOutOpBinary outop_add = { 593 .base.static_constraint = C_O1_I2(r, r, r), 594 .out_rrr = tgen_add, 595}; 596 597static void tgen_and(TCGContext *s, TCGType type, 598 TCGReg a0, TCGReg a1, TCGReg a2) 599{ 600 tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2); 601} 602 603static const TCGOutOpBinary outop_and = { 604 .base.static_constraint = C_O1_I2(r, r, r), 605 .out_rrr = tgen_and, 606}; 607 608static void tgen_andc(TCGContext *s, TCGType type, 609 TCGReg a0, TCGReg a1, TCGReg a2) 610{ 611 tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2); 612} 613 614static const TCGOutOpBinary outop_andc = { 615 .base.static_constraint = C_O1_I2(r, r, r), 616 .out_rrr = tgen_andc, 617}; 618 619static void tgen_clz(TCGContext *s, TCGType type, 620 TCGReg a0, TCGReg a1, TCGReg a2) 621{ 622 TCGOpcode opc = (type == TCG_TYPE_I32 623 ? INDEX_op_tci_clz32 624 : INDEX_op_clz); 625 tcg_out_op_rrr(s, opc, a0, a1, a2); 626} 627 628static const TCGOutOpBinary outop_clz = { 629 .base.static_constraint = C_O1_I2(r, r, r), 630 .out_rrr = tgen_clz, 631}; 632 633static void tgen_ctz(TCGContext *s, TCGType type, 634 TCGReg a0, TCGReg a1, TCGReg a2) 635{ 636 TCGOpcode opc = (type == TCG_TYPE_I32 637 ? INDEX_op_tci_ctz32 638 : INDEX_op_ctz); 639 tcg_out_op_rrr(s, opc, a0, a1, a2); 640} 641 642static const TCGOutOpBinary outop_ctz = { 643 .base.static_constraint = C_O1_I2(r, r, r), 644 .out_rrr = tgen_ctz, 645}; 646 647static void tgen_divs(TCGContext *s, TCGType type, 648 TCGReg a0, TCGReg a1, TCGReg a2) 649{ 650 TCGOpcode opc = (type == TCG_TYPE_I32 651 ? INDEX_op_tci_divs32 652 : INDEX_op_divs); 653 tcg_out_op_rrr(s, opc, a0, a1, a2); 654} 655 656static const TCGOutOpBinary outop_divs = { 657 .base.static_constraint = C_O1_I2(r, r, r), 658 .out_rrr = tgen_divs, 659}; 660 661static const TCGOutOpDivRem outop_divs2 = { 662 .base.static_constraint = C_NotImplemented, 663}; 664 665static void tgen_divu(TCGContext *s, TCGType type, 666 TCGReg a0, TCGReg a1, TCGReg a2) 667{ 668 TCGOpcode opc = (type == TCG_TYPE_I32 669 ? INDEX_op_tci_divu32 670 : INDEX_op_divu); 671 tcg_out_op_rrr(s, opc, a0, a1, a2); 672} 673 674static const TCGOutOpBinary outop_divu = { 675 .base.static_constraint = C_O1_I2(r, r, r), 676 .out_rrr = tgen_divu, 677}; 678 679static const TCGOutOpDivRem outop_divu2 = { 680 .base.static_constraint = C_NotImplemented, 681}; 682 683static void tgen_eqv(TCGContext *s, TCGType type, 684 TCGReg a0, TCGReg a1, TCGReg a2) 685{ 686 tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2); 687} 688 689static const TCGOutOpBinary outop_eqv = { 690 .base.static_constraint = C_O1_I2(r, r, r), 691 .out_rrr = tgen_eqv, 692}; 693 694static void tgen_mul(TCGContext *s, TCGType type, 695 TCGReg a0, TCGReg a1, TCGReg a2) 696{ 697 tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2); 698} 699 700static const TCGOutOpBinary outop_mul = { 701 .base.static_constraint = C_O1_I2(r, r, r), 702 .out_rrr = tgen_mul, 703}; 704 705static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags) 706{ 707 return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented; 708} 709 710static void tgen_muls2(TCGContext *s, TCGType type, 711 TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) 712{ 713 tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3); 714} 715 716static const TCGOutOpMul2 outop_muls2 = { 717 .base.static_constraint = C_Dynamic, 718 .base.dynamic_constraint = cset_mul2, 719 .out_rrrr = tgen_muls2, 720}; 721 722static const TCGOutOpBinary outop_mulsh = { 723 .base.static_constraint = C_NotImplemented, 724}; 725 726static void tgen_mulu2(TCGContext *s, TCGType type, 727 TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) 728{ 729 tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3); 730} 731 732static const TCGOutOpMul2 outop_mulu2 = { 733 .base.static_constraint = C_Dynamic, 734 .base.dynamic_constraint = cset_mul2, 735 .out_rrrr = tgen_mulu2, 736}; 737 738static const TCGOutOpBinary outop_muluh = { 739 .base.static_constraint = C_NotImplemented, 740}; 741 742static void tgen_nand(TCGContext *s, TCGType type, 743 TCGReg a0, TCGReg a1, TCGReg a2) 744{ 745 tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2); 746} 747 748static const TCGOutOpBinary outop_nand = { 749 .base.static_constraint = C_O1_I2(r, r, r), 750 .out_rrr = tgen_nand, 751}; 752 753static void tgen_nor(TCGContext *s, TCGType type, 754 TCGReg a0, TCGReg a1, TCGReg a2) 755{ 756 tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2); 757} 758 759static const TCGOutOpBinary outop_nor = { 760 .base.static_constraint = C_O1_I2(r, r, r), 761 .out_rrr = tgen_nor, 762}; 763 764static void tgen_or(TCGContext *s, TCGType type, 765 TCGReg a0, TCGReg a1, TCGReg a2) 766{ 767 tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2); 768} 769 770static const TCGOutOpBinary outop_or = { 771 .base.static_constraint = C_O1_I2(r, r, r), 772 .out_rrr = tgen_or, 773}; 774 775static void tgen_orc(TCGContext *s, TCGType type, 776 TCGReg a0, TCGReg a1, TCGReg a2) 777{ 778 tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2); 779} 780 781static const TCGOutOpBinary outop_orc = { 782 .base.static_constraint = C_O1_I2(r, r, r), 783 .out_rrr = tgen_orc, 784}; 785 786static void tgen_rems(TCGContext *s, TCGType type, 787 TCGReg a0, TCGReg a1, TCGReg a2) 788{ 789 TCGOpcode opc = (type == TCG_TYPE_I32 790 ? INDEX_op_tci_rems32 791 : INDEX_op_rems); 792 tcg_out_op_rrr(s, opc, a0, a1, a2); 793} 794 795static const TCGOutOpBinary outop_rems = { 796 .base.static_constraint = C_O1_I2(r, r, r), 797 .out_rrr = tgen_rems, 798}; 799 800static void tgen_remu(TCGContext *s, TCGType type, 801 TCGReg a0, TCGReg a1, TCGReg a2) 802{ 803 TCGOpcode opc = (type == TCG_TYPE_I32 804 ? INDEX_op_tci_remu32 805 : INDEX_op_remu); 806 tcg_out_op_rrr(s, opc, a0, a1, a2); 807} 808 809static const TCGOutOpBinary outop_remu = { 810 .base.static_constraint = C_O1_I2(r, r, r), 811 .out_rrr = tgen_remu, 812}; 813 814static void tgen_rotl(TCGContext *s, TCGType type, 815 TCGReg a0, TCGReg a1, TCGReg a2) 816{ 817 TCGOpcode opc = (type == TCG_TYPE_I32 818 ? INDEX_op_tci_rotl32 819 : INDEX_op_rotl); 820 tcg_out_op_rrr(s, opc, a0, a1, a2); 821} 822 823static const TCGOutOpBinary outop_rotl = { 824 .base.static_constraint = C_O1_I2(r, r, r), 825 .out_rrr = tgen_rotl, 826}; 827 828static void tgen_rotr(TCGContext *s, TCGType type, 829 TCGReg a0, TCGReg a1, TCGReg a2) 830{ 831 TCGOpcode opc = (type == TCG_TYPE_I32 832 ? INDEX_op_tci_rotr32 833 : INDEX_op_rotr); 834 tcg_out_op_rrr(s, opc, a0, a1, a2); 835} 836 837static const TCGOutOpBinary outop_rotr = { 838 .base.static_constraint = C_O1_I2(r, r, r), 839 .out_rrr = tgen_rotr, 840}; 841 842static void tgen_sar(TCGContext *s, TCGType type, 843 TCGReg a0, TCGReg a1, TCGReg a2) 844{ 845 if (type < TCG_TYPE_REG) { 846 tcg_out_ext32s(s, TCG_REG_TMP, a1); 847 a1 = TCG_REG_TMP; 848 } 849 tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2); 850} 851 852static const TCGOutOpBinary outop_sar = { 853 .base.static_constraint = C_O1_I2(r, r, r), 854 .out_rrr = tgen_sar, 855}; 856 857static void tgen_shl(TCGContext *s, TCGType type, 858 TCGReg a0, TCGReg a1, TCGReg a2) 859{ 860 tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2); 861} 862 863static const TCGOutOpBinary outop_shl = { 864 .base.static_constraint = C_O1_I2(r, r, r), 865 .out_rrr = tgen_shl, 866}; 867 868static void tgen_shr(TCGContext *s, TCGType type, 869 TCGReg a0, TCGReg a1, TCGReg a2) 870{ 871 if (type < TCG_TYPE_REG) { 872 tcg_out_ext32u(s, TCG_REG_TMP, a1); 873 a1 = TCG_REG_TMP; 874 } 875 tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2); 876} 877 878static const TCGOutOpBinary outop_shr = { 879 .base.static_constraint = C_O1_I2(r, r, r), 880 .out_rrr = tgen_shr, 881}; 882 883static void tgen_sub(TCGContext *s, TCGType type, 884 TCGReg a0, TCGReg a1, TCGReg a2) 885{ 886 tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2); 887} 888 889static const TCGOutOpSubtract outop_sub = { 890 .base.static_constraint = C_O1_I2(r, r, r), 891 .out_rrr = tgen_sub, 892}; 893 894static void tgen_xor(TCGContext *s, TCGType type, 895 TCGReg a0, TCGReg a1, TCGReg a2) 896{ 897 tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2); 898} 899 900static const TCGOutOpBinary outop_xor = { 901 .base.static_constraint = C_O1_I2(r, r, r), 902 .out_rrr = tgen_xor, 903}; 904 905static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 906{ 907 tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1); 908} 909 910static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) 911{ 912 return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented; 913} 914 915static const TCGOutOpUnary outop_ctpop = { 916 .base.static_constraint = C_Dynamic, 917 .base.dynamic_constraint = cset_ctpop, 918 .out_rr = tgen_ctpop, 919}; 920 921static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 922{ 923 tcg_out_op_rr(s, INDEX_op_neg, a0, a1); 924} 925 926static const TCGOutOpUnary outop_neg = { 927 .base.static_constraint = C_O1_I1(r, r), 928 .out_rr = tgen_neg, 929}; 930 931static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 932{ 933 tcg_out_op_rr(s, INDEX_op_not, a0, a1); 934} 935 936static const TCGOutOpUnary outop_not = { 937 .base.static_constraint = C_O1_I1(r, r), 938 .out_rr = tgen_not, 939}; 940 941static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, 942 TCGReg dest, TCGReg arg1, TCGReg arg2) 943{ 944 TCGOpcode opc = (type == TCG_TYPE_I32 945 ? INDEX_op_tci_setcond32 946 : INDEX_op_setcond); 947 tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond); 948} 949 950static const TCGOutOpSetcond outop_setcond = { 951 .base.static_constraint = C_O1_I2(r, r, r), 952 .out_rrr = tgen_setcond, 953}; 954 955static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, 956 TCGReg dest, TCGReg arg1, TCGReg arg2) 957{ 958 tgen_setcond(s, type, cond, dest, arg1, arg2); 959 tgen_neg(s, type, dest, dest); 960} 961 962static const TCGOutOpSetcond outop_negsetcond = { 963 .base.static_constraint = C_O1_I2(r, r, r), 964 .out_rrr = tgen_negsetcond, 965}; 966 967 968static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 969 const TCGArg args[TCG_MAX_OP_ARGS], 970 const int const_args[TCG_MAX_OP_ARGS]) 971{ 972 int width; 973 974 switch (opc) { 975 case INDEX_op_goto_ptr: 976 tcg_out_op_r(s, opc, args[0]); 977 break; 978 979 case INDEX_op_br: 980 tcg_out_op_l(s, opc, arg_label(args[0])); 981 break; 982 983 CASE_32_64(movcond) 984 case INDEX_op_setcond2_i32: 985 tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2], 986 args[3], args[4], args[5]); 987 break; 988 989 CASE_32_64(ld8u) 990 CASE_32_64(ld8s) 991 CASE_32_64(ld16u) 992 CASE_32_64(ld16s) 993 case INDEX_op_ld_i32: 994 CASE_64(ld32u) 995 CASE_64(ld32s) 996 CASE_64(ld) 997 CASE_32_64(st8) 998 CASE_32_64(st16) 999 case INDEX_op_st_i32: 1000 CASE_64(st32) 1001 CASE_64(st) 1002 tcg_out_ldst(s, opc, args[0], args[1], args[2]); 1003 break; 1004 1005 CASE_32_64(deposit) 1006 tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]); 1007 break; 1008 1009 CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */ 1010 CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */ 1011 tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]); 1012 break; 1013 1014 CASE_32_64(brcond) 1015 tgen_setcond(s, type, args[2], TCG_REG_TMP, args[0], args[1]); 1016 tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3])); 1017 break; 1018 1019 case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ 1020 case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ 1021 tcg_out_op_rr(s, opc, args[0], args[1]); 1022 break; 1023 1024 case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ 1025 case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ 1026 width = 16; 1027 goto do_bswap; 1028 case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ 1029 width = 32; 1030 do_bswap: 1031 /* The base tci bswaps zero-extend, and ignore high bits. */ 1032 tcg_out_op_rr(s, opc, args[0], args[1]); 1033 if (args[2] & TCG_BSWAP_OS) { 1034 tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width); 1035 } 1036 break; 1037 1038 CASE_32_64(add2) 1039 CASE_32_64(sub2) 1040 tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2], 1041 args[3], args[4], args[5]); 1042 break; 1043 1044#if TCG_TARGET_REG_BITS == 32 1045 case INDEX_op_brcond2_i32: 1046 tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, 1047 args[0], args[1], args[2], args[3], args[4]); 1048 tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5])); 1049 break; 1050#endif 1051 1052 case INDEX_op_qemu_ld_i64: 1053 case INDEX_op_qemu_st_i64: 1054 if (TCG_TARGET_REG_BITS == 32) { 1055 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]); 1056 tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP); 1057 break; 1058 } 1059 /* fall through */ 1060 case INDEX_op_qemu_ld_i32: 1061 case INDEX_op_qemu_st_i32: 1062 if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) { 1063 tcg_out_ext32u(s, TCG_REG_TMP, args[1]); 1064 tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]); 1065 } else { 1066 tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); 1067 } 1068 break; 1069 1070 case INDEX_op_mb: 1071 tcg_out_op_v(s, opc); 1072 break; 1073 1074 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1075 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1076 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1077 case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */ 1078 case INDEX_op_extu_i32_i64: 1079 case INDEX_op_extrl_i64_i32: 1080 default: 1081 g_assert_not_reached(); 1082 } 1083} 1084 1085static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, 1086 intptr_t offset) 1087{ 1088 switch (type) { 1089 case TCG_TYPE_I32: 1090 tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset); 1091 break; 1092#if TCG_TARGET_REG_BITS == 64 1093 case TCG_TYPE_I64: 1094 tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset); 1095 break; 1096#endif 1097 default: 1098 g_assert_not_reached(); 1099 } 1100} 1101 1102static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 1103 TCGReg base, intptr_t ofs) 1104{ 1105 return false; 1106} 1107 1108/* Test if a constant matches the constraint. */ 1109static bool tcg_target_const_match(int64_t val, int ct, 1110 TCGType type, TCGCond cond, int vece) 1111{ 1112 return ct & TCG_CT_CONST; 1113} 1114 1115static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 1116{ 1117 memset(p, 0, sizeof(*p) * count); 1118} 1119 1120static void tcg_target_init(TCGContext *s) 1121{ 1122 /* The current code uses uint8_t for tcg operations. */ 1123 tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX); 1124 1125 /* Registers available for 32 bit operations. */ 1126 tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1; 1127 /* Registers available for 64 bit operations. */ 1128 tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1; 1129 /* 1130 * The interpreter "registers" are in the local stack frame and 1131 * cannot be clobbered by the called helper functions. However, 1132 * the interpreter assumes a 128-bit return value and assigns to 1133 * the return value registers. 1134 */ 1135 tcg_target_call_clobber_regs = 1136 MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS); 1137 1138 s->reserved_regs = 0; 1139 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 1140 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 1141 1142 /* The call arguments come first, followed by the temp storage. */ 1143 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 1144 TCG_STATIC_FRAME_SIZE); 1145} 1146 1147/* Generate global QEMU prologue and epilogue code. */ 1148static inline void tcg_target_qemu_prologue(TCGContext *s) 1149{ 1150} 1151 1152static void tcg_out_tb_start(TCGContext *s) 1153{ 1154 /* nothing to do */ 1155} 1156 1157bool tcg_target_has_memory_bswap(MemOp memop) 1158{ 1159 return true; 1160} 1161 1162static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1163{ 1164 g_assert_not_reached(); 1165} 1166 1167static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1168{ 1169 g_assert_not_reached(); 1170} 1171