1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2009, 2011 Stefan Weil 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25/* Used for function call generation. */ 26#define TCG_TARGET_CALL_STACK_OFFSET 0 27#define TCG_TARGET_STACK_ALIGN 8 28#if TCG_TARGET_REG_BITS == 32 29# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN 30# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN 31# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN 32#else 33# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 34# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 35# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL 36#endif 37#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL 38 39static TCGConstraintSetIndex 40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 41{ 42 switch (op) { 43 case INDEX_op_goto_ptr: 44 return C_O0_I1(r); 45 46 case INDEX_op_ld8u_i32: 47 case INDEX_op_ld8s_i32: 48 case INDEX_op_ld16u_i32: 49 case INDEX_op_ld16s_i32: 50 case INDEX_op_ld_i32: 51 case INDEX_op_ld8u_i64: 52 case INDEX_op_ld8s_i64: 53 case INDEX_op_ld16u_i64: 54 case INDEX_op_ld16s_i64: 55 case INDEX_op_ld32u_i64: 56 case INDEX_op_ld32s_i64: 57 case INDEX_op_ld_i64: 58 case INDEX_op_not_i32: 59 case INDEX_op_not_i64: 60 case INDEX_op_neg_i32: 61 case INDEX_op_neg_i64: 62 case INDEX_op_ext_i32_i64: 63 case INDEX_op_extu_i32_i64: 64 case INDEX_op_bswap16_i32: 65 case INDEX_op_bswap16_i64: 66 case INDEX_op_bswap32_i32: 67 case INDEX_op_bswap32_i64: 68 case INDEX_op_bswap64_i64: 69 case INDEX_op_extract_i32: 70 case INDEX_op_extract_i64: 71 case INDEX_op_sextract_i32: 72 case INDEX_op_sextract_i64: 73 case INDEX_op_ctpop_i32: 74 case INDEX_op_ctpop_i64: 75 return C_O1_I1(r, r); 76 77 case INDEX_op_st8_i32: 78 case INDEX_op_st16_i32: 79 case INDEX_op_st_i32: 80 case INDEX_op_st8_i64: 81 case INDEX_op_st16_i64: 82 case INDEX_op_st32_i64: 83 case INDEX_op_st_i64: 84 return C_O0_I2(r, r); 85 86 case INDEX_op_div_i32: 87 case INDEX_op_div_i64: 88 case INDEX_op_divu_i32: 89 case INDEX_op_divu_i64: 90 case INDEX_op_rem_i32: 91 case INDEX_op_rem_i64: 92 case INDEX_op_remu_i32: 93 case INDEX_op_remu_i64: 94 case INDEX_op_mul_i32: 95 case INDEX_op_mul_i64: 96 case INDEX_op_shl_i32: 97 case INDEX_op_shl_i64: 98 case INDEX_op_shr_i32: 99 case INDEX_op_shr_i64: 100 case INDEX_op_sar_i32: 101 case INDEX_op_sar_i64: 102 case INDEX_op_rotl_i32: 103 case INDEX_op_rotl_i64: 104 case INDEX_op_rotr_i32: 105 case INDEX_op_rotr_i64: 106 case INDEX_op_setcond_i32: 107 case INDEX_op_setcond_i64: 108 case INDEX_op_deposit_i32: 109 case INDEX_op_deposit_i64: 110 case INDEX_op_clz_i32: 111 case INDEX_op_clz_i64: 112 case INDEX_op_ctz_i32: 113 case INDEX_op_ctz_i64: 114 return C_O1_I2(r, r, r); 115 116 case INDEX_op_brcond_i32: 117 case INDEX_op_brcond_i64: 118 return C_O0_I2(r, r); 119 120 case INDEX_op_add2_i32: 121 case INDEX_op_add2_i64: 122 case INDEX_op_sub2_i32: 123 case INDEX_op_sub2_i64: 124 return C_O2_I4(r, r, r, r, r, r); 125 126#if TCG_TARGET_REG_BITS == 32 127 case INDEX_op_brcond2_i32: 128 return C_O0_I4(r, r, r, r); 129#endif 130 131 case INDEX_op_mulu2_i32: 132 case INDEX_op_mulu2_i64: 133 case INDEX_op_muls2_i32: 134 case INDEX_op_muls2_i64: 135 return C_O2_I2(r, r, r, r); 136 137 case INDEX_op_movcond_i32: 138 case INDEX_op_movcond_i64: 139 case INDEX_op_setcond2_i32: 140 return C_O1_I4(r, r, r, r, r); 141 142 case INDEX_op_qemu_ld_i32: 143 return C_O1_I1(r, r); 144 case INDEX_op_qemu_ld_i64: 145 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); 146 case INDEX_op_qemu_st_i32: 147 return C_O0_I2(r, r); 148 case INDEX_op_qemu_st_i64: 149 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); 150 151 default: 152 return C_NotImplemented; 153 } 154} 155 156static const int tcg_target_reg_alloc_order[] = { 157 TCG_REG_R4, 158 TCG_REG_R5, 159 TCG_REG_R6, 160 TCG_REG_R7, 161 TCG_REG_R8, 162 TCG_REG_R9, 163 TCG_REG_R10, 164 TCG_REG_R11, 165 TCG_REG_R12, 166 TCG_REG_R13, 167 TCG_REG_R14, 168 TCG_REG_R15, 169 /* Either 2 or 4 of these are call clobbered, so use them last. */ 170 TCG_REG_R3, 171 TCG_REG_R2, 172 TCG_REG_R1, 173 TCG_REG_R0, 174}; 175 176/* No call arguments via registers. All will be stored on the "stack". */ 177static const int tcg_target_call_iarg_regs[] = { }; 178 179static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 180{ 181 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 182 tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS); 183 return TCG_REG_R0 + slot; 184} 185 186#ifdef CONFIG_DEBUG_TCG 187static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 188 "r00", 189 "r01", 190 "r02", 191 "r03", 192 "r04", 193 "r05", 194 "r06", 195 "r07", 196 "r08", 197 "r09", 198 "r10", 199 "r11", 200 "r12", 201 "r13", 202 "r14", 203 "r15", 204}; 205#endif 206 207static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 208 intptr_t value, intptr_t addend) 209{ 210 intptr_t diff = value - (intptr_t)(code_ptr + 1); 211 212 tcg_debug_assert(addend == 0); 213 tcg_debug_assert(type == 20); 214 215 if (diff == sextract32(diff, 0, type)) { 216 tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff)); 217 return true; 218 } 219 return false; 220} 221 222static void stack_bounds_check(TCGReg base, intptr_t offset) 223{ 224 if (base == TCG_REG_CALL_STACK) { 225 tcg_debug_assert(offset >= 0); 226 tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE + 227 TCG_STATIC_FRAME_SIZE)); 228 } 229} 230 231static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0) 232{ 233 tcg_insn_unit insn = 0; 234 235 tcg_out_reloc(s, s->code_ptr, 20, l0, 0); 236 insn = deposit32(insn, 0, 8, op); 237 tcg_out32(s, insn); 238} 239 240static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0) 241{ 242 tcg_insn_unit insn = 0; 243 intptr_t diff; 244 245 /* Special case for exit_tb: map null -> 0. */ 246 if (p0 == NULL) { 247 diff = 0; 248 } else { 249 diff = p0 - (void *)(s->code_ptr + 1); 250 tcg_debug_assert(diff != 0); 251 if (diff != sextract32(diff, 0, 20)) { 252 tcg_raise_tb_overflow(s); 253 } 254 } 255 insn = deposit32(insn, 0, 8, op); 256 insn = deposit32(insn, 12, 20, diff); 257 tcg_out32(s, insn); 258} 259 260static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0) 261{ 262 tcg_insn_unit insn = 0; 263 264 insn = deposit32(insn, 0, 8, op); 265 insn = deposit32(insn, 8, 4, r0); 266 tcg_out32(s, insn); 267} 268 269static void tcg_out_op_v(TCGContext *s, TCGOpcode op) 270{ 271 tcg_out32(s, (uint8_t)op); 272} 273 274static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1) 275{ 276 tcg_insn_unit insn = 0; 277 278 tcg_debug_assert(i1 == sextract32(i1, 0, 20)); 279 insn = deposit32(insn, 0, 8, op); 280 insn = deposit32(insn, 8, 4, r0); 281 insn = deposit32(insn, 12, 20, i1); 282 tcg_out32(s, insn); 283} 284 285static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1) 286{ 287 tcg_insn_unit insn = 0; 288 289 tcg_out_reloc(s, s->code_ptr, 20, l1, 0); 290 insn = deposit32(insn, 0, 8, op); 291 insn = deposit32(insn, 8, 4, r0); 292 tcg_out32(s, insn); 293} 294 295static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1) 296{ 297 tcg_insn_unit insn = 0; 298 299 insn = deposit32(insn, 0, 8, op); 300 insn = deposit32(insn, 8, 4, r0); 301 insn = deposit32(insn, 12, 4, r1); 302 tcg_out32(s, insn); 303} 304 305static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op, 306 TCGReg r0, TCGReg r1, TCGArg m2) 307{ 308 tcg_insn_unit insn = 0; 309 310 tcg_debug_assert(m2 == extract32(m2, 0, 16)); 311 insn = deposit32(insn, 0, 8, op); 312 insn = deposit32(insn, 8, 4, r0); 313 insn = deposit32(insn, 12, 4, r1); 314 insn = deposit32(insn, 16, 16, m2); 315 tcg_out32(s, insn); 316} 317 318static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op, 319 TCGReg r0, TCGReg r1, TCGReg r2) 320{ 321 tcg_insn_unit insn = 0; 322 323 insn = deposit32(insn, 0, 8, op); 324 insn = deposit32(insn, 8, 4, r0); 325 insn = deposit32(insn, 12, 4, r1); 326 insn = deposit32(insn, 16, 4, r2); 327 tcg_out32(s, insn); 328} 329 330static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op, 331 TCGReg r0, TCGReg r1, intptr_t i2) 332{ 333 tcg_insn_unit insn = 0; 334 335 tcg_debug_assert(i2 == sextract32(i2, 0, 16)); 336 insn = deposit32(insn, 0, 8, op); 337 insn = deposit32(insn, 8, 4, r0); 338 insn = deposit32(insn, 12, 4, r1); 339 insn = deposit32(insn, 16, 16, i2); 340 tcg_out32(s, insn); 341} 342 343static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0, 344 TCGReg r1, uint8_t b2, uint8_t b3) 345{ 346 tcg_insn_unit insn = 0; 347 348 tcg_debug_assert(b2 == extract32(b2, 0, 6)); 349 tcg_debug_assert(b3 == extract32(b3, 0, 6)); 350 insn = deposit32(insn, 0, 8, op); 351 insn = deposit32(insn, 8, 4, r0); 352 insn = deposit32(insn, 12, 4, r1); 353 insn = deposit32(insn, 16, 6, b2); 354 insn = deposit32(insn, 22, 6, b3); 355 tcg_out32(s, insn); 356} 357 358static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op, 359 TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3) 360{ 361 tcg_insn_unit insn = 0; 362 363 insn = deposit32(insn, 0, 8, op); 364 insn = deposit32(insn, 8, 4, r0); 365 insn = deposit32(insn, 12, 4, r1); 366 insn = deposit32(insn, 16, 4, r2); 367 insn = deposit32(insn, 20, 4, c3); 368 tcg_out32(s, insn); 369} 370 371static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0, 372 TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4) 373{ 374 tcg_insn_unit insn = 0; 375 376 tcg_debug_assert(b3 == extract32(b3, 0, 6)); 377 tcg_debug_assert(b4 == extract32(b4, 0, 6)); 378 insn = deposit32(insn, 0, 8, op); 379 insn = deposit32(insn, 8, 4, r0); 380 insn = deposit32(insn, 12, 4, r1); 381 insn = deposit32(insn, 16, 4, r2); 382 insn = deposit32(insn, 20, 6, b3); 383 insn = deposit32(insn, 26, 6, b4); 384 tcg_out32(s, insn); 385} 386 387static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op, 388 TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3) 389{ 390 tcg_insn_unit insn = 0; 391 392 insn = deposit32(insn, 0, 8, op); 393 insn = deposit32(insn, 8, 4, r0); 394 insn = deposit32(insn, 12, 4, r1); 395 insn = deposit32(insn, 16, 4, r2); 396 insn = deposit32(insn, 20, 4, r3); 397 tcg_out32(s, insn); 398} 399 400static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op, 401 TCGReg r0, TCGReg r1, TCGReg r2, 402 TCGReg r3, TCGReg r4, TCGCond c5) 403{ 404 tcg_insn_unit insn = 0; 405 406 insn = deposit32(insn, 0, 8, op); 407 insn = deposit32(insn, 8, 4, r0); 408 insn = deposit32(insn, 12, 4, r1); 409 insn = deposit32(insn, 16, 4, r2); 410 insn = deposit32(insn, 20, 4, r3); 411 insn = deposit32(insn, 24, 4, r4); 412 insn = deposit32(insn, 28, 4, c5); 413 tcg_out32(s, insn); 414} 415 416static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op, 417 TCGReg r0, TCGReg r1, TCGReg r2, 418 TCGReg r3, TCGReg r4, TCGReg r5) 419{ 420 tcg_insn_unit insn = 0; 421 422 insn = deposit32(insn, 0, 8, op); 423 insn = deposit32(insn, 8, 4, r0); 424 insn = deposit32(insn, 12, 4, r1); 425 insn = deposit32(insn, 16, 4, r2); 426 insn = deposit32(insn, 20, 4, r3); 427 insn = deposit32(insn, 24, 4, r4); 428 insn = deposit32(insn, 28, 4, r5); 429 tcg_out32(s, insn); 430} 431 432static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, 433 TCGReg base, intptr_t offset) 434{ 435 stack_bounds_check(base, offset); 436 if (offset != sextract32(offset, 0, 16)) { 437 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 438 tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base); 439 base = TCG_REG_TMP; 440 offset = 0; 441 } 442 tcg_out_op_rrs(s, op, val, base, offset); 443} 444 445static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base, 446 intptr_t offset) 447{ 448 switch (type) { 449 case TCG_TYPE_I32: 450 tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset); 451 break; 452#if TCG_TARGET_REG_BITS == 64 453 case TCG_TYPE_I64: 454 tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset); 455 break; 456#endif 457 default: 458 g_assert_not_reached(); 459 } 460} 461 462static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 463{ 464 tcg_out_op_rr(s, INDEX_op_mov, ret, arg); 465 return true; 466} 467 468static void tcg_out_movi(TCGContext *s, TCGType type, 469 TCGReg ret, tcg_target_long arg) 470{ 471 switch (type) { 472 case TCG_TYPE_I32: 473#if TCG_TARGET_REG_BITS == 64 474 arg = (int32_t)arg; 475 /* fall through */ 476 case TCG_TYPE_I64: 477#endif 478 break; 479 default: 480 g_assert_not_reached(); 481 } 482 483 if (arg == sextract32(arg, 0, 20)) { 484 tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg); 485 } else { 486 tcg_insn_unit insn = 0; 487 488 new_pool_label(s, arg, 20, s->code_ptr, 0); 489 insn = deposit32(insn, 0, 8, INDEX_op_tci_movl); 490 insn = deposit32(insn, 8, 4, ret); 491 tcg_out32(s, insn); 492 } 493} 494 495static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd, 496 TCGReg rs, unsigned pos, unsigned len) 497{ 498 TCGOpcode opc = type == TCG_TYPE_I32 ? 499 INDEX_op_extract_i32 : 500 INDEX_op_extract_i64; 501 tcg_out_op_rrbb(s, opc, rd, rs, pos, len); 502} 503 504static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd, 505 TCGReg rs, unsigned pos, unsigned len) 506{ 507 TCGOpcode opc = type == TCG_TYPE_I32 ? 508 INDEX_op_sextract_i32 : 509 INDEX_op_sextract_i64; 510 tcg_out_op_rrbb(s, opc, rd, rs, pos, len); 511} 512 513static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 514{ 515 tcg_out_sextract(s, type, rd, rs, 0, 8); 516} 517 518static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) 519{ 520 tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8); 521} 522 523static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 524{ 525 tcg_out_sextract(s, type, rd, rs, 0, 16); 526} 527 528static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) 529{ 530 tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16); 531} 532 533static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) 534{ 535 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 536 tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32); 537} 538 539static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) 540{ 541 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 542 tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32); 543} 544 545static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 546{ 547 tcg_out_ext32s(s, rd, rs); 548} 549 550static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 551{ 552 tcg_out_ext32u(s, rd, rs); 553} 554 555static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) 556{ 557 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 558 tcg_out_mov(s, TCG_TYPE_I32, rd, rs); 559} 560 561static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 562{ 563 return false; 564} 565 566static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 567 tcg_target_long imm) 568{ 569 /* This function is only used for passing structs by reference. */ 570 g_assert_not_reached(); 571} 572 573static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func, 574 const TCGHelperInfo *info) 575{ 576 ffi_cif *cif = info->cif; 577 tcg_insn_unit insn = 0; 578 uint8_t which; 579 580 if (cif->rtype == &ffi_type_void) { 581 which = 0; 582 } else { 583 tcg_debug_assert(cif->rtype->size == 4 || 584 cif->rtype->size == 8 || 585 cif->rtype->size == 16); 586 which = ctz32(cif->rtype->size) - 1; 587 } 588 new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif); 589 insn = deposit32(insn, 0, 8, INDEX_op_call); 590 insn = deposit32(insn, 8, 4, which); 591 tcg_out32(s, insn); 592} 593 594#if TCG_TARGET_REG_BITS == 64 595# define CASE_32_64(x) \ 596 case glue(glue(INDEX_op_, x), _i64): \ 597 case glue(glue(INDEX_op_, x), _i32): 598# define CASE_64(x) \ 599 case glue(glue(INDEX_op_, x), _i64): 600#else 601# define CASE_32_64(x) \ 602 case glue(glue(INDEX_op_, x), _i32): 603# define CASE_64(x) 604#endif 605 606static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 607{ 608 tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg); 609} 610 611static void tcg_out_goto_tb(TCGContext *s, int which) 612{ 613 /* indirect jump method. */ 614 tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which)); 615 set_jmp_reset_offset(s, which); 616} 617 618void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 619 uintptr_t jmp_rx, uintptr_t jmp_rw) 620{ 621 /* Always indirect, nothing to do */ 622} 623 624static void tgen_add(TCGContext *s, TCGType type, 625 TCGReg a0, TCGReg a1, TCGReg a2) 626{ 627 tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2); 628} 629 630static const TCGOutOpBinary outop_add = { 631 .base.static_constraint = C_O1_I2(r, r, r), 632 .out_rrr = tgen_add, 633}; 634 635static void tgen_and(TCGContext *s, TCGType type, 636 TCGReg a0, TCGReg a1, TCGReg a2) 637{ 638 tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2); 639} 640 641static const TCGOutOpBinary outop_and = { 642 .base.static_constraint = C_O1_I2(r, r, r), 643 .out_rrr = tgen_and, 644}; 645 646static void tgen_andc(TCGContext *s, TCGType type, 647 TCGReg a0, TCGReg a1, TCGReg a2) 648{ 649 tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2); 650} 651 652static const TCGOutOpBinary outop_andc = { 653 .base.static_constraint = C_O1_I2(r, r, r), 654 .out_rrr = tgen_andc, 655}; 656 657static void tgen_eqv(TCGContext *s, TCGType type, 658 TCGReg a0, TCGReg a1, TCGReg a2) 659{ 660 tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2); 661} 662 663static const TCGOutOpBinary outop_eqv = { 664 .base.static_constraint = C_O1_I2(r, r, r), 665 .out_rrr = tgen_eqv, 666}; 667 668static void tgen_nand(TCGContext *s, TCGType type, 669 TCGReg a0, TCGReg a1, TCGReg a2) 670{ 671 tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2); 672} 673 674static const TCGOutOpBinary outop_nand = { 675 .base.static_constraint = C_O1_I2(r, r, r), 676 .out_rrr = tgen_nand, 677}; 678 679static void tgen_nor(TCGContext *s, TCGType type, 680 TCGReg a0, TCGReg a1, TCGReg a2) 681{ 682 tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2); 683} 684 685static const TCGOutOpBinary outop_nor = { 686 .base.static_constraint = C_O1_I2(r, r, r), 687 .out_rrr = tgen_nor, 688}; 689 690static void tgen_or(TCGContext *s, TCGType type, 691 TCGReg a0, TCGReg a1, TCGReg a2) 692{ 693 tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2); 694} 695 696static const TCGOutOpBinary outop_or = { 697 .base.static_constraint = C_O1_I2(r, r, r), 698 .out_rrr = tgen_or, 699}; 700 701static void tgen_orc(TCGContext *s, TCGType type, 702 TCGReg a0, TCGReg a1, TCGReg a2) 703{ 704 tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2); 705} 706 707static const TCGOutOpBinary outop_orc = { 708 .base.static_constraint = C_O1_I2(r, r, r), 709 .out_rrr = tgen_orc, 710}; 711 712static void tgen_sub(TCGContext *s, TCGType type, 713 TCGReg a0, TCGReg a1, TCGReg a2) 714{ 715 tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2); 716} 717 718static const TCGOutOpSubtract outop_sub = { 719 .base.static_constraint = C_O1_I2(r, r, r), 720 .out_rrr = tgen_sub, 721}; 722 723static void tgen_xor(TCGContext *s, TCGType type, 724 TCGReg a0, TCGReg a1, TCGReg a2) 725{ 726 tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2); 727} 728 729static const TCGOutOpBinary outop_xor = { 730 .base.static_constraint = C_O1_I2(r, r, r), 731 .out_rrr = tgen_xor, 732}; 733 734 735static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 736 const TCGArg args[TCG_MAX_OP_ARGS], 737 const int const_args[TCG_MAX_OP_ARGS]) 738{ 739 int width; 740 741 switch (opc) { 742 case INDEX_op_goto_ptr: 743 tcg_out_op_r(s, opc, args[0]); 744 break; 745 746 case INDEX_op_br: 747 tcg_out_op_l(s, opc, arg_label(args[0])); 748 break; 749 750 CASE_32_64(setcond) 751 tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]); 752 break; 753 754 CASE_32_64(movcond) 755 case INDEX_op_setcond2_i32: 756 tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2], 757 args[3], args[4], args[5]); 758 break; 759 760 CASE_32_64(ld8u) 761 CASE_32_64(ld8s) 762 CASE_32_64(ld16u) 763 CASE_32_64(ld16s) 764 case INDEX_op_ld_i32: 765 CASE_64(ld32u) 766 CASE_64(ld32s) 767 CASE_64(ld) 768 CASE_32_64(st8) 769 CASE_32_64(st16) 770 case INDEX_op_st_i32: 771 CASE_64(st32) 772 CASE_64(st) 773 tcg_out_ldst(s, opc, args[0], args[1], args[2]); 774 break; 775 776 CASE_32_64(mul) 777 CASE_32_64(shl) 778 CASE_32_64(shr) 779 CASE_32_64(sar) 780 CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */ 781 CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */ 782 CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */ 783 CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */ 784 CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */ 785 CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */ 786 CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */ 787 CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */ 788 tcg_out_op_rrr(s, opc, args[0], args[1], args[2]); 789 break; 790 791 CASE_32_64(deposit) 792 tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]); 793 break; 794 795 CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */ 796 CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */ 797 tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]); 798 break; 799 800 CASE_32_64(brcond) 801 tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32 802 ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64), 803 TCG_REG_TMP, args[0], args[1], args[2]); 804 tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3])); 805 break; 806 807 CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ 808 CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ 809 CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */ 810 case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ 811 case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ 812 tcg_out_op_rr(s, opc, args[0], args[1]); 813 break; 814 815 case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ 816 case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ 817 width = 16; 818 goto do_bswap; 819 case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ 820 width = 32; 821 do_bswap: 822 /* The base tci bswaps zero-extend, and ignore high bits. */ 823 tcg_out_op_rr(s, opc, args[0], args[1]); 824 if (args[2] & TCG_BSWAP_OS) { 825 tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width); 826 } 827 break; 828 829 CASE_32_64(add2) 830 CASE_32_64(sub2) 831 tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2], 832 args[3], args[4], args[5]); 833 break; 834 835#if TCG_TARGET_REG_BITS == 32 836 case INDEX_op_brcond2_i32: 837 tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, 838 args[0], args[1], args[2], args[3], args[4]); 839 tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5])); 840 break; 841#endif 842 843 CASE_32_64(mulu2) 844 CASE_32_64(muls2) 845 tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]); 846 break; 847 848 case INDEX_op_qemu_ld_i64: 849 case INDEX_op_qemu_st_i64: 850 if (TCG_TARGET_REG_BITS == 32) { 851 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]); 852 tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP); 853 break; 854 } 855 /* fall through */ 856 case INDEX_op_qemu_ld_i32: 857 case INDEX_op_qemu_st_i32: 858 if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) { 859 tcg_out_ext32u(s, TCG_REG_TMP, args[1]); 860 tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]); 861 } else { 862 tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); 863 } 864 break; 865 866 case INDEX_op_mb: 867 tcg_out_op_v(s, opc); 868 break; 869 870 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 871 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 872 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 873 case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */ 874 case INDEX_op_extu_i32_i64: 875 case INDEX_op_extrl_i64_i32: 876 default: 877 g_assert_not_reached(); 878 } 879} 880 881static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, 882 intptr_t offset) 883{ 884 switch (type) { 885 case TCG_TYPE_I32: 886 tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset); 887 break; 888#if TCG_TARGET_REG_BITS == 64 889 case TCG_TYPE_I64: 890 tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset); 891 break; 892#endif 893 default: 894 g_assert_not_reached(); 895 } 896} 897 898static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 899 TCGReg base, intptr_t ofs) 900{ 901 return false; 902} 903 904/* Test if a constant matches the constraint. */ 905static bool tcg_target_const_match(int64_t val, int ct, 906 TCGType type, TCGCond cond, int vece) 907{ 908 return ct & TCG_CT_CONST; 909} 910 911static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 912{ 913 memset(p, 0, sizeof(*p) * count); 914} 915 916static void tcg_target_init(TCGContext *s) 917{ 918 /* The current code uses uint8_t for tcg operations. */ 919 tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX); 920 921 /* Registers available for 32 bit operations. */ 922 tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1; 923 /* Registers available for 64 bit operations. */ 924 tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1; 925 /* 926 * The interpreter "registers" are in the local stack frame and 927 * cannot be clobbered by the called helper functions. However, 928 * the interpreter assumes a 128-bit return value and assigns to 929 * the return value registers. 930 */ 931 tcg_target_call_clobber_regs = 932 MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS); 933 934 s->reserved_regs = 0; 935 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 936 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 937 938 /* The call arguments come first, followed by the temp storage. */ 939 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 940 TCG_STATIC_FRAME_SIZE); 941} 942 943/* Generate global QEMU prologue and epilogue code. */ 944static inline void tcg_target_qemu_prologue(TCGContext *s) 945{ 946} 947 948static void tcg_out_tb_start(TCGContext *s) 949{ 950 /* nothing to do */ 951} 952 953bool tcg_target_has_memory_bswap(MemOp memop) 954{ 955 return true; 956} 957 958static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 959{ 960 g_assert_not_reached(); 961} 962 963static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 964{ 965 g_assert_not_reached(); 966} 967