1/* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2009, 2011 Stefan Weil 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25/* Used for function call generation. */ 26#define TCG_TARGET_CALL_STACK_OFFSET 0 27#define TCG_TARGET_STACK_ALIGN 8 28#if TCG_TARGET_REG_BITS == 32 29# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN 30# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN 31# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN 32#else 33# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL 34# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL 35# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL 36#endif 37#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL 38 39static TCGConstraintSetIndex 40tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) 41{ 42 switch (op) { 43 case INDEX_op_goto_ptr: 44 return C_O0_I1(r); 45 46 case INDEX_op_ld8u_i32: 47 case INDEX_op_ld8s_i32: 48 case INDEX_op_ld16u_i32: 49 case INDEX_op_ld16s_i32: 50 case INDEX_op_ld_i32: 51 case INDEX_op_ld8u_i64: 52 case INDEX_op_ld8s_i64: 53 case INDEX_op_ld16u_i64: 54 case INDEX_op_ld16s_i64: 55 case INDEX_op_ld32u_i64: 56 case INDEX_op_ld32s_i64: 57 case INDEX_op_ld_i64: 58 case INDEX_op_ext_i32_i64: 59 case INDEX_op_extu_i32_i64: 60 case INDEX_op_bswap16_i32: 61 case INDEX_op_bswap16_i64: 62 case INDEX_op_bswap32_i32: 63 case INDEX_op_bswap32_i64: 64 case INDEX_op_bswap64_i64: 65 case INDEX_op_extract_i32: 66 case INDEX_op_extract_i64: 67 case INDEX_op_sextract_i32: 68 case INDEX_op_sextract_i64: 69 return C_O1_I1(r, r); 70 71 case INDEX_op_st8_i32: 72 case INDEX_op_st16_i32: 73 case INDEX_op_st_i32: 74 case INDEX_op_st8_i64: 75 case INDEX_op_st16_i64: 76 case INDEX_op_st32_i64: 77 case INDEX_op_st_i64: 78 return C_O0_I2(r, r); 79 80 case INDEX_op_deposit_i32: 81 case INDEX_op_deposit_i64: 82 return C_O1_I2(r, r, r); 83 84 case INDEX_op_add2_i32: 85 case INDEX_op_add2_i64: 86 case INDEX_op_sub2_i32: 87 case INDEX_op_sub2_i64: 88 return C_O2_I4(r, r, r, r, r, r); 89 90#if TCG_TARGET_REG_BITS == 32 91 case INDEX_op_brcond2_i32: 92 return C_O0_I4(r, r, r, r); 93#endif 94 95 case INDEX_op_setcond2_i32: 96 return C_O1_I4(r, r, r, r, r); 97 98 case INDEX_op_qemu_ld_i32: 99 return C_O1_I1(r, r); 100 case INDEX_op_qemu_ld_i64: 101 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); 102 case INDEX_op_qemu_st_i32: 103 return C_O0_I2(r, r); 104 case INDEX_op_qemu_st_i64: 105 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); 106 107 default: 108 return C_NotImplemented; 109 } 110} 111 112static const int tcg_target_reg_alloc_order[] = { 113 TCG_REG_R4, 114 TCG_REG_R5, 115 TCG_REG_R6, 116 TCG_REG_R7, 117 TCG_REG_R8, 118 TCG_REG_R9, 119 TCG_REG_R10, 120 TCG_REG_R11, 121 TCG_REG_R12, 122 TCG_REG_R13, 123 TCG_REG_R14, 124 TCG_REG_R15, 125 /* Either 2 or 4 of these are call clobbered, so use them last. */ 126 TCG_REG_R3, 127 TCG_REG_R2, 128 TCG_REG_R1, 129 TCG_REG_R0, 130}; 131 132/* No call arguments via registers. All will be stored on the "stack". */ 133static const int tcg_target_call_iarg_regs[] = { }; 134 135static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) 136{ 137 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); 138 tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS); 139 return TCG_REG_R0 + slot; 140} 141 142#ifdef CONFIG_DEBUG_TCG 143static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 144 "r00", 145 "r01", 146 "r02", 147 "r03", 148 "r04", 149 "r05", 150 "r06", 151 "r07", 152 "r08", 153 "r09", 154 "r10", 155 "r11", 156 "r12", 157 "r13", 158 "r14", 159 "r15", 160}; 161#endif 162 163static bool patch_reloc(tcg_insn_unit *code_ptr, int type, 164 intptr_t value, intptr_t addend) 165{ 166 intptr_t diff = value - (intptr_t)(code_ptr + 1); 167 168 tcg_debug_assert(addend == 0); 169 tcg_debug_assert(type == 20); 170 171 if (diff == sextract32(diff, 0, type)) { 172 tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff)); 173 return true; 174 } 175 return false; 176} 177 178static void stack_bounds_check(TCGReg base, intptr_t offset) 179{ 180 if (base == TCG_REG_CALL_STACK) { 181 tcg_debug_assert(offset >= 0); 182 tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE + 183 TCG_STATIC_FRAME_SIZE)); 184 } 185} 186 187static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0) 188{ 189 tcg_insn_unit insn = 0; 190 191 tcg_out_reloc(s, s->code_ptr, 20, l0, 0); 192 insn = deposit32(insn, 0, 8, op); 193 tcg_out32(s, insn); 194} 195 196static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0) 197{ 198 tcg_insn_unit insn = 0; 199 intptr_t diff; 200 201 /* Special case for exit_tb: map null -> 0. */ 202 if (p0 == NULL) { 203 diff = 0; 204 } else { 205 diff = p0 - (void *)(s->code_ptr + 1); 206 tcg_debug_assert(diff != 0); 207 if (diff != sextract32(diff, 0, 20)) { 208 tcg_raise_tb_overflow(s); 209 } 210 } 211 insn = deposit32(insn, 0, 8, op); 212 insn = deposit32(insn, 12, 20, diff); 213 tcg_out32(s, insn); 214} 215 216static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0) 217{ 218 tcg_insn_unit insn = 0; 219 220 insn = deposit32(insn, 0, 8, op); 221 insn = deposit32(insn, 8, 4, r0); 222 tcg_out32(s, insn); 223} 224 225static void tcg_out_op_v(TCGContext *s, TCGOpcode op) 226{ 227 tcg_out32(s, (uint8_t)op); 228} 229 230static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1) 231{ 232 tcg_insn_unit insn = 0; 233 234 tcg_debug_assert(i1 == sextract32(i1, 0, 20)); 235 insn = deposit32(insn, 0, 8, op); 236 insn = deposit32(insn, 8, 4, r0); 237 insn = deposit32(insn, 12, 20, i1); 238 tcg_out32(s, insn); 239} 240 241static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1) 242{ 243 tcg_insn_unit insn = 0; 244 245 tcg_out_reloc(s, s->code_ptr, 20, l1, 0); 246 insn = deposit32(insn, 0, 8, op); 247 insn = deposit32(insn, 8, 4, r0); 248 tcg_out32(s, insn); 249} 250 251static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1) 252{ 253 tcg_insn_unit insn = 0; 254 255 insn = deposit32(insn, 0, 8, op); 256 insn = deposit32(insn, 8, 4, r0); 257 insn = deposit32(insn, 12, 4, r1); 258 tcg_out32(s, insn); 259} 260 261static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op, 262 TCGReg r0, TCGReg r1, TCGArg m2) 263{ 264 tcg_insn_unit insn = 0; 265 266 tcg_debug_assert(m2 == extract32(m2, 0, 16)); 267 insn = deposit32(insn, 0, 8, op); 268 insn = deposit32(insn, 8, 4, r0); 269 insn = deposit32(insn, 12, 4, r1); 270 insn = deposit32(insn, 16, 16, m2); 271 tcg_out32(s, insn); 272} 273 274static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op, 275 TCGReg r0, TCGReg r1, TCGReg r2) 276{ 277 tcg_insn_unit insn = 0; 278 279 insn = deposit32(insn, 0, 8, op); 280 insn = deposit32(insn, 8, 4, r0); 281 insn = deposit32(insn, 12, 4, r1); 282 insn = deposit32(insn, 16, 4, r2); 283 tcg_out32(s, insn); 284} 285 286static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op, 287 TCGReg r0, TCGReg r1, intptr_t i2) 288{ 289 tcg_insn_unit insn = 0; 290 291 tcg_debug_assert(i2 == sextract32(i2, 0, 16)); 292 insn = deposit32(insn, 0, 8, op); 293 insn = deposit32(insn, 8, 4, r0); 294 insn = deposit32(insn, 12, 4, r1); 295 insn = deposit32(insn, 16, 16, i2); 296 tcg_out32(s, insn); 297} 298 299static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0, 300 TCGReg r1, uint8_t b2, uint8_t b3) 301{ 302 tcg_insn_unit insn = 0; 303 304 tcg_debug_assert(b2 == extract32(b2, 0, 6)); 305 tcg_debug_assert(b3 == extract32(b3, 0, 6)); 306 insn = deposit32(insn, 0, 8, op); 307 insn = deposit32(insn, 8, 4, r0); 308 insn = deposit32(insn, 12, 4, r1); 309 insn = deposit32(insn, 16, 6, b2); 310 insn = deposit32(insn, 22, 6, b3); 311 tcg_out32(s, insn); 312} 313 314static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op, 315 TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3) 316{ 317 tcg_insn_unit insn = 0; 318 319 insn = deposit32(insn, 0, 8, op); 320 insn = deposit32(insn, 8, 4, r0); 321 insn = deposit32(insn, 12, 4, r1); 322 insn = deposit32(insn, 16, 4, r2); 323 insn = deposit32(insn, 20, 4, c3); 324 tcg_out32(s, insn); 325} 326 327static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0, 328 TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4) 329{ 330 tcg_insn_unit insn = 0; 331 332 tcg_debug_assert(b3 == extract32(b3, 0, 6)); 333 tcg_debug_assert(b4 == extract32(b4, 0, 6)); 334 insn = deposit32(insn, 0, 8, op); 335 insn = deposit32(insn, 8, 4, r0); 336 insn = deposit32(insn, 12, 4, r1); 337 insn = deposit32(insn, 16, 4, r2); 338 insn = deposit32(insn, 20, 6, b3); 339 insn = deposit32(insn, 26, 6, b4); 340 tcg_out32(s, insn); 341} 342 343static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op, 344 TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3) 345{ 346 tcg_insn_unit insn = 0; 347 348 insn = deposit32(insn, 0, 8, op); 349 insn = deposit32(insn, 8, 4, r0); 350 insn = deposit32(insn, 12, 4, r1); 351 insn = deposit32(insn, 16, 4, r2); 352 insn = deposit32(insn, 20, 4, r3); 353 tcg_out32(s, insn); 354} 355 356static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op, 357 TCGReg r0, TCGReg r1, TCGReg r2, 358 TCGReg r3, TCGReg r4, TCGCond c5) 359{ 360 tcg_insn_unit insn = 0; 361 362 insn = deposit32(insn, 0, 8, op); 363 insn = deposit32(insn, 8, 4, r0); 364 insn = deposit32(insn, 12, 4, r1); 365 insn = deposit32(insn, 16, 4, r2); 366 insn = deposit32(insn, 20, 4, r3); 367 insn = deposit32(insn, 24, 4, r4); 368 insn = deposit32(insn, 28, 4, c5); 369 tcg_out32(s, insn); 370} 371 372static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op, 373 TCGReg r0, TCGReg r1, TCGReg r2, 374 TCGReg r3, TCGReg r4, TCGReg r5) 375{ 376 tcg_insn_unit insn = 0; 377 378 insn = deposit32(insn, 0, 8, op); 379 insn = deposit32(insn, 8, 4, r0); 380 insn = deposit32(insn, 12, 4, r1); 381 insn = deposit32(insn, 16, 4, r2); 382 insn = deposit32(insn, 20, 4, r3); 383 insn = deposit32(insn, 24, 4, r4); 384 insn = deposit32(insn, 28, 4, r5); 385 tcg_out32(s, insn); 386} 387 388static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, 389 TCGReg base, intptr_t offset) 390{ 391 stack_bounds_check(base, offset); 392 if (offset != sextract32(offset, 0, 16)) { 393 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); 394 tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base); 395 base = TCG_REG_TMP; 396 offset = 0; 397 } 398 tcg_out_op_rrs(s, op, val, base, offset); 399} 400 401static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base, 402 intptr_t offset) 403{ 404 switch (type) { 405 case TCG_TYPE_I32: 406 tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset); 407 break; 408#if TCG_TARGET_REG_BITS == 64 409 case TCG_TYPE_I64: 410 tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset); 411 break; 412#endif 413 default: 414 g_assert_not_reached(); 415 } 416} 417 418static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) 419{ 420 tcg_out_op_rr(s, INDEX_op_mov, ret, arg); 421 return true; 422} 423 424static void tcg_out_movi(TCGContext *s, TCGType type, 425 TCGReg ret, tcg_target_long arg) 426{ 427 switch (type) { 428 case TCG_TYPE_I32: 429#if TCG_TARGET_REG_BITS == 64 430 arg = (int32_t)arg; 431 /* fall through */ 432 case TCG_TYPE_I64: 433#endif 434 break; 435 default: 436 g_assert_not_reached(); 437 } 438 439 if (arg == sextract32(arg, 0, 20)) { 440 tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg); 441 } else { 442 tcg_insn_unit insn = 0; 443 444 new_pool_label(s, arg, 20, s->code_ptr, 0); 445 insn = deposit32(insn, 0, 8, INDEX_op_tci_movl); 446 insn = deposit32(insn, 8, 4, ret); 447 tcg_out32(s, insn); 448 } 449} 450 451static void tcg_out_extract(TCGContext *s, TCGType type, TCGReg rd, 452 TCGReg rs, unsigned pos, unsigned len) 453{ 454 TCGOpcode opc = type == TCG_TYPE_I32 ? 455 INDEX_op_extract_i32 : 456 INDEX_op_extract_i64; 457 tcg_out_op_rrbb(s, opc, rd, rs, pos, len); 458} 459 460static void tcg_out_sextract(TCGContext *s, TCGType type, TCGReg rd, 461 TCGReg rs, unsigned pos, unsigned len) 462{ 463 TCGOpcode opc = type == TCG_TYPE_I32 ? 464 INDEX_op_sextract_i32 : 465 INDEX_op_sextract_i64; 466 tcg_out_op_rrbb(s, opc, rd, rs, pos, len); 467} 468 469static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 470{ 471 tcg_out_sextract(s, type, rd, rs, 0, 8); 472} 473 474static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) 475{ 476 tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 8); 477} 478 479static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) 480{ 481 tcg_out_sextract(s, type, rd, rs, 0, 16); 482} 483 484static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) 485{ 486 tcg_out_extract(s, TCG_TYPE_REG, rd, rs, 0, 16); 487} 488 489static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) 490{ 491 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 492 tcg_out_sextract(s, TCG_TYPE_I64, rd, rs, 0, 32); 493} 494 495static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) 496{ 497 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 498 tcg_out_extract(s, TCG_TYPE_I64, rd, rs, 0, 32); 499} 500 501static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 502{ 503 tcg_out_ext32s(s, rd, rs); 504} 505 506static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) 507{ 508 tcg_out_ext32u(s, rd, rs); 509} 510 511static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) 512{ 513 tcg_debug_assert(TCG_TARGET_REG_BITS == 64); 514 tcg_out_mov(s, TCG_TYPE_I32, rd, rs); 515} 516 517static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) 518{ 519 return false; 520} 521 522static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, 523 tcg_target_long imm) 524{ 525 /* This function is only used for passing structs by reference. */ 526 g_assert_not_reached(); 527} 528 529static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func, 530 const TCGHelperInfo *info) 531{ 532 ffi_cif *cif = info->cif; 533 tcg_insn_unit insn = 0; 534 uint8_t which; 535 536 if (cif->rtype == &ffi_type_void) { 537 which = 0; 538 } else { 539 tcg_debug_assert(cif->rtype->size == 4 || 540 cif->rtype->size == 8 || 541 cif->rtype->size == 16); 542 which = ctz32(cif->rtype->size) - 1; 543 } 544 new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif); 545 insn = deposit32(insn, 0, 8, INDEX_op_call); 546 insn = deposit32(insn, 8, 4, which); 547 tcg_out32(s, insn); 548} 549 550#if TCG_TARGET_REG_BITS == 64 551# define CASE_32_64(x) \ 552 case glue(glue(INDEX_op_, x), _i64): \ 553 case glue(glue(INDEX_op_, x), _i32): 554# define CASE_64(x) \ 555 case glue(glue(INDEX_op_, x), _i64): 556#else 557# define CASE_32_64(x) \ 558 case glue(glue(INDEX_op_, x), _i32): 559# define CASE_64(x) 560#endif 561 562static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) 563{ 564 tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg); 565} 566 567static void tcg_out_goto_tb(TCGContext *s, int which) 568{ 569 /* indirect jump method. */ 570 tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which)); 571 set_jmp_reset_offset(s, which); 572} 573 574void tb_target_set_jmp_target(const TranslationBlock *tb, int n, 575 uintptr_t jmp_rx, uintptr_t jmp_rw) 576{ 577 /* Always indirect, nothing to do */ 578} 579 580static void tgen_add(TCGContext *s, TCGType type, 581 TCGReg a0, TCGReg a1, TCGReg a2) 582{ 583 tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2); 584} 585 586static const TCGOutOpBinary outop_add = { 587 .base.static_constraint = C_O1_I2(r, r, r), 588 .out_rrr = tgen_add, 589}; 590 591static void tgen_and(TCGContext *s, TCGType type, 592 TCGReg a0, TCGReg a1, TCGReg a2) 593{ 594 tcg_out_op_rrr(s, INDEX_op_and, a0, a1, a2); 595} 596 597static const TCGOutOpBinary outop_and = { 598 .base.static_constraint = C_O1_I2(r, r, r), 599 .out_rrr = tgen_and, 600}; 601 602static void tgen_andc(TCGContext *s, TCGType type, 603 TCGReg a0, TCGReg a1, TCGReg a2) 604{ 605 tcg_out_op_rrr(s, INDEX_op_andc, a0, a1, a2); 606} 607 608static const TCGOutOpBinary outop_andc = { 609 .base.static_constraint = C_O1_I2(r, r, r), 610 .out_rrr = tgen_andc, 611}; 612 613static void tgen_clz(TCGContext *s, TCGType type, 614 TCGReg a0, TCGReg a1, TCGReg a2) 615{ 616 TCGOpcode opc = (type == TCG_TYPE_I32 617 ? INDEX_op_tci_clz32 618 : INDEX_op_clz); 619 tcg_out_op_rrr(s, opc, a0, a1, a2); 620} 621 622static const TCGOutOpBinary outop_clz = { 623 .base.static_constraint = C_O1_I2(r, r, r), 624 .out_rrr = tgen_clz, 625}; 626 627static void tgen_ctz(TCGContext *s, TCGType type, 628 TCGReg a0, TCGReg a1, TCGReg a2) 629{ 630 TCGOpcode opc = (type == TCG_TYPE_I32 631 ? INDEX_op_tci_ctz32 632 : INDEX_op_ctz); 633 tcg_out_op_rrr(s, opc, a0, a1, a2); 634} 635 636static const TCGOutOpBinary outop_ctz = { 637 .base.static_constraint = C_O1_I2(r, r, r), 638 .out_rrr = tgen_ctz, 639}; 640 641static void tgen_divs(TCGContext *s, TCGType type, 642 TCGReg a0, TCGReg a1, TCGReg a2) 643{ 644 TCGOpcode opc = (type == TCG_TYPE_I32 645 ? INDEX_op_tci_divs32 646 : INDEX_op_divs); 647 tcg_out_op_rrr(s, opc, a0, a1, a2); 648} 649 650static const TCGOutOpBinary outop_divs = { 651 .base.static_constraint = C_O1_I2(r, r, r), 652 .out_rrr = tgen_divs, 653}; 654 655static const TCGOutOpDivRem outop_divs2 = { 656 .base.static_constraint = C_NotImplemented, 657}; 658 659static void tgen_divu(TCGContext *s, TCGType type, 660 TCGReg a0, TCGReg a1, TCGReg a2) 661{ 662 TCGOpcode opc = (type == TCG_TYPE_I32 663 ? INDEX_op_tci_divu32 664 : INDEX_op_divu); 665 tcg_out_op_rrr(s, opc, a0, a1, a2); 666} 667 668static const TCGOutOpBinary outop_divu = { 669 .base.static_constraint = C_O1_I2(r, r, r), 670 .out_rrr = tgen_divu, 671}; 672 673static const TCGOutOpDivRem outop_divu2 = { 674 .base.static_constraint = C_NotImplemented, 675}; 676 677static void tgen_eqv(TCGContext *s, TCGType type, 678 TCGReg a0, TCGReg a1, TCGReg a2) 679{ 680 tcg_out_op_rrr(s, INDEX_op_eqv, a0, a1, a2); 681} 682 683static const TCGOutOpBinary outop_eqv = { 684 .base.static_constraint = C_O1_I2(r, r, r), 685 .out_rrr = tgen_eqv, 686}; 687 688static void tgen_mul(TCGContext *s, TCGType type, 689 TCGReg a0, TCGReg a1, TCGReg a2) 690{ 691 tcg_out_op_rrr(s, INDEX_op_mul, a0, a1, a2); 692} 693 694static const TCGOutOpBinary outop_mul = { 695 .base.static_constraint = C_O1_I2(r, r, r), 696 .out_rrr = tgen_mul, 697}; 698 699static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags) 700{ 701 return type == TCG_TYPE_REG ? C_O2_I2(r, r, r, r) : C_NotImplemented; 702} 703 704static void tgen_muls2(TCGContext *s, TCGType type, 705 TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) 706{ 707 tcg_out_op_rrrr(s, INDEX_op_muls2, a0, a1, a2, a3); 708} 709 710static const TCGOutOpMul2 outop_muls2 = { 711 .base.static_constraint = C_Dynamic, 712 .base.dynamic_constraint = cset_mul2, 713 .out_rrrr = tgen_muls2, 714}; 715 716static const TCGOutOpBinary outop_mulsh = { 717 .base.static_constraint = C_NotImplemented, 718}; 719 720static void tgen_mulu2(TCGContext *s, TCGType type, 721 TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3) 722{ 723 tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3); 724} 725 726static const TCGOutOpMul2 outop_mulu2 = { 727 .base.static_constraint = C_Dynamic, 728 .base.dynamic_constraint = cset_mul2, 729 .out_rrrr = tgen_mulu2, 730}; 731 732static const TCGOutOpBinary outop_muluh = { 733 .base.static_constraint = C_NotImplemented, 734}; 735 736static void tgen_nand(TCGContext *s, TCGType type, 737 TCGReg a0, TCGReg a1, TCGReg a2) 738{ 739 tcg_out_op_rrr(s, INDEX_op_nand, a0, a1, a2); 740} 741 742static const TCGOutOpBinary outop_nand = { 743 .base.static_constraint = C_O1_I2(r, r, r), 744 .out_rrr = tgen_nand, 745}; 746 747static void tgen_nor(TCGContext *s, TCGType type, 748 TCGReg a0, TCGReg a1, TCGReg a2) 749{ 750 tcg_out_op_rrr(s, INDEX_op_nor, a0, a1, a2); 751} 752 753static const TCGOutOpBinary outop_nor = { 754 .base.static_constraint = C_O1_I2(r, r, r), 755 .out_rrr = tgen_nor, 756}; 757 758static void tgen_or(TCGContext *s, TCGType type, 759 TCGReg a0, TCGReg a1, TCGReg a2) 760{ 761 tcg_out_op_rrr(s, INDEX_op_or, a0, a1, a2); 762} 763 764static const TCGOutOpBinary outop_or = { 765 .base.static_constraint = C_O1_I2(r, r, r), 766 .out_rrr = tgen_or, 767}; 768 769static void tgen_orc(TCGContext *s, TCGType type, 770 TCGReg a0, TCGReg a1, TCGReg a2) 771{ 772 tcg_out_op_rrr(s, INDEX_op_orc, a0, a1, a2); 773} 774 775static const TCGOutOpBinary outop_orc = { 776 .base.static_constraint = C_O1_I2(r, r, r), 777 .out_rrr = tgen_orc, 778}; 779 780static void tgen_rems(TCGContext *s, TCGType type, 781 TCGReg a0, TCGReg a1, TCGReg a2) 782{ 783 TCGOpcode opc = (type == TCG_TYPE_I32 784 ? INDEX_op_tci_rems32 785 : INDEX_op_rems); 786 tcg_out_op_rrr(s, opc, a0, a1, a2); 787} 788 789static const TCGOutOpBinary outop_rems = { 790 .base.static_constraint = C_O1_I2(r, r, r), 791 .out_rrr = tgen_rems, 792}; 793 794static void tgen_remu(TCGContext *s, TCGType type, 795 TCGReg a0, TCGReg a1, TCGReg a2) 796{ 797 TCGOpcode opc = (type == TCG_TYPE_I32 798 ? INDEX_op_tci_remu32 799 : INDEX_op_remu); 800 tcg_out_op_rrr(s, opc, a0, a1, a2); 801} 802 803static const TCGOutOpBinary outop_remu = { 804 .base.static_constraint = C_O1_I2(r, r, r), 805 .out_rrr = tgen_remu, 806}; 807 808static void tgen_rotl(TCGContext *s, TCGType type, 809 TCGReg a0, TCGReg a1, TCGReg a2) 810{ 811 TCGOpcode opc = (type == TCG_TYPE_I32 812 ? INDEX_op_tci_rotl32 813 : INDEX_op_rotl); 814 tcg_out_op_rrr(s, opc, a0, a1, a2); 815} 816 817static const TCGOutOpBinary outop_rotl = { 818 .base.static_constraint = C_O1_I2(r, r, r), 819 .out_rrr = tgen_rotl, 820}; 821 822static void tgen_rotr(TCGContext *s, TCGType type, 823 TCGReg a0, TCGReg a1, TCGReg a2) 824{ 825 TCGOpcode opc = (type == TCG_TYPE_I32 826 ? INDEX_op_tci_rotr32 827 : INDEX_op_rotr); 828 tcg_out_op_rrr(s, opc, a0, a1, a2); 829} 830 831static const TCGOutOpBinary outop_rotr = { 832 .base.static_constraint = C_O1_I2(r, r, r), 833 .out_rrr = tgen_rotr, 834}; 835 836static void tgen_sar(TCGContext *s, TCGType type, 837 TCGReg a0, TCGReg a1, TCGReg a2) 838{ 839 if (type < TCG_TYPE_REG) { 840 tcg_out_ext32s(s, TCG_REG_TMP, a1); 841 a1 = TCG_REG_TMP; 842 } 843 tcg_out_op_rrr(s, INDEX_op_sar, a0, a1, a2); 844} 845 846static const TCGOutOpBinary outop_sar = { 847 .base.static_constraint = C_O1_I2(r, r, r), 848 .out_rrr = tgen_sar, 849}; 850 851static void tgen_shl(TCGContext *s, TCGType type, 852 TCGReg a0, TCGReg a1, TCGReg a2) 853{ 854 tcg_out_op_rrr(s, INDEX_op_shl, a0, a1, a2); 855} 856 857static const TCGOutOpBinary outop_shl = { 858 .base.static_constraint = C_O1_I2(r, r, r), 859 .out_rrr = tgen_shl, 860}; 861 862static void tgen_shr(TCGContext *s, TCGType type, 863 TCGReg a0, TCGReg a1, TCGReg a2) 864{ 865 if (type < TCG_TYPE_REG) { 866 tcg_out_ext32u(s, TCG_REG_TMP, a1); 867 a1 = TCG_REG_TMP; 868 } 869 tcg_out_op_rrr(s, INDEX_op_shr, a0, a1, a2); 870} 871 872static const TCGOutOpBinary outop_shr = { 873 .base.static_constraint = C_O1_I2(r, r, r), 874 .out_rrr = tgen_shr, 875}; 876 877static void tgen_sub(TCGContext *s, TCGType type, 878 TCGReg a0, TCGReg a1, TCGReg a2) 879{ 880 tcg_out_op_rrr(s, INDEX_op_sub, a0, a1, a2); 881} 882 883static const TCGOutOpSubtract outop_sub = { 884 .base.static_constraint = C_O1_I2(r, r, r), 885 .out_rrr = tgen_sub, 886}; 887 888static void tgen_xor(TCGContext *s, TCGType type, 889 TCGReg a0, TCGReg a1, TCGReg a2) 890{ 891 tcg_out_op_rrr(s, INDEX_op_xor, a0, a1, a2); 892} 893 894static const TCGOutOpBinary outop_xor = { 895 .base.static_constraint = C_O1_I2(r, r, r), 896 .out_rrr = tgen_xor, 897}; 898 899static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 900{ 901 tcg_out_op_rr(s, INDEX_op_ctpop, a0, a1); 902} 903 904static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags) 905{ 906 return type == TCG_TYPE_REG ? C_O1_I1(r, r) : C_NotImplemented; 907} 908 909static const TCGOutOpUnary outop_ctpop = { 910 .base.static_constraint = C_Dynamic, 911 .base.dynamic_constraint = cset_ctpop, 912 .out_rr = tgen_ctpop, 913}; 914 915static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 916{ 917 tcg_out_op_rr(s, INDEX_op_neg, a0, a1); 918} 919 920static const TCGOutOpUnary outop_neg = { 921 .base.static_constraint = C_O1_I1(r, r), 922 .out_rr = tgen_neg, 923}; 924 925static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) 926{ 927 tcg_out_op_rr(s, INDEX_op_not, a0, a1); 928} 929 930static const TCGOutOpUnary outop_not = { 931 .base.static_constraint = C_O1_I1(r, r), 932 .out_rr = tgen_not, 933}; 934 935static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, 936 TCGReg dest, TCGReg arg1, TCGReg arg2) 937{ 938 TCGOpcode opc = (type == TCG_TYPE_I32 939 ? INDEX_op_tci_setcond32 940 : INDEX_op_setcond); 941 tcg_out_op_rrrc(s, opc, dest, arg1, arg2, cond); 942} 943 944static const TCGOutOpSetcond outop_setcond = { 945 .base.static_constraint = C_O1_I2(r, r, r), 946 .out_rrr = tgen_setcond, 947}; 948 949static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond, 950 TCGReg dest, TCGReg arg1, TCGReg arg2) 951{ 952 tgen_setcond(s, type, cond, dest, arg1, arg2); 953 tgen_neg(s, type, dest, dest); 954} 955 956static const TCGOutOpSetcond outop_negsetcond = { 957 .base.static_constraint = C_O1_I2(r, r, r), 958 .out_rrr = tgen_negsetcond, 959}; 960 961static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond, 962 TCGReg arg0, TCGReg arg1, TCGLabel *l) 963{ 964 tgen_setcond(s, type, cond, TCG_REG_TMP, arg0, arg1); 965 tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, l); 966} 967 968static const TCGOutOpBrcond outop_brcond = { 969 .base.static_constraint = C_O0_I2(r, r), 970 .out_rr = tgen_brcond, 971}; 972 973static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, 974 TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, 975 TCGArg vt, bool const_vt, TCGArg vf, bool consf_vf) 976{ 977 TCGOpcode opc = (type == TCG_TYPE_I32 978 ? INDEX_op_tci_movcond32 979 : INDEX_op_movcond); 980 tcg_out_op_rrrrrc(s, opc, ret, c1, c2, vt, vf, cond); 981} 982 983static const TCGOutOpMovcond outop_movcond = { 984 .base.static_constraint = C_O1_I4(r, r, r, r, r), 985 .out = tgen_movcond, 986}; 987 988static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type, 989 const TCGArg args[TCG_MAX_OP_ARGS], 990 const int const_args[TCG_MAX_OP_ARGS]) 991{ 992 int width; 993 994 switch (opc) { 995 case INDEX_op_goto_ptr: 996 tcg_out_op_r(s, opc, args[0]); 997 break; 998 999 case INDEX_op_br: 1000 tcg_out_op_l(s, opc, arg_label(args[0])); 1001 break; 1002 1003 case INDEX_op_setcond2_i32: 1004 tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2], 1005 args[3], args[4], args[5]); 1006 break; 1007 1008 CASE_32_64(ld8u) 1009 CASE_32_64(ld8s) 1010 CASE_32_64(ld16u) 1011 CASE_32_64(ld16s) 1012 case INDEX_op_ld_i32: 1013 CASE_64(ld32u) 1014 CASE_64(ld32s) 1015 CASE_64(ld) 1016 CASE_32_64(st8) 1017 CASE_32_64(st16) 1018 case INDEX_op_st_i32: 1019 CASE_64(st32) 1020 CASE_64(st) 1021 tcg_out_ldst(s, opc, args[0], args[1], args[2]); 1022 break; 1023 1024 CASE_32_64(deposit) 1025 tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]); 1026 break; 1027 1028 CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */ 1029 CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */ 1030 tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]); 1031 break; 1032 1033 case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ 1034 case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ 1035 tcg_out_op_rr(s, opc, args[0], args[1]); 1036 break; 1037 1038 case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ 1039 case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ 1040 width = 16; 1041 goto do_bswap; 1042 case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ 1043 width = 32; 1044 do_bswap: 1045 /* The base tci bswaps zero-extend, and ignore high bits. */ 1046 tcg_out_op_rr(s, opc, args[0], args[1]); 1047 if (args[2] & TCG_BSWAP_OS) { 1048 tcg_out_sextract(s, TCG_TYPE_REG, args[0], args[0], 0, width); 1049 } 1050 break; 1051 1052 CASE_32_64(add2) 1053 CASE_32_64(sub2) 1054 tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2], 1055 args[3], args[4], args[5]); 1056 break; 1057 1058#if TCG_TARGET_REG_BITS == 32 1059 case INDEX_op_brcond2_i32: 1060 tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, 1061 args[0], args[1], args[2], args[3], args[4]); 1062 tcg_out_op_rl(s, INDEX_op_brcond, TCG_REG_TMP, arg_label(args[5])); 1063 break; 1064#endif 1065 1066 case INDEX_op_qemu_ld_i64: 1067 case INDEX_op_qemu_st_i64: 1068 if (TCG_TARGET_REG_BITS == 32) { 1069 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]); 1070 tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP); 1071 break; 1072 } 1073 /* fall through */ 1074 case INDEX_op_qemu_ld_i32: 1075 case INDEX_op_qemu_st_i32: 1076 if (TCG_TARGET_REG_BITS == 64 && s->addr_type == TCG_TYPE_I32) { 1077 tcg_out_ext32u(s, TCG_REG_TMP, args[1]); 1078 tcg_out_op_rrm(s, opc, args[0], TCG_REG_TMP, args[2]); 1079 } else { 1080 tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); 1081 } 1082 break; 1083 1084 case INDEX_op_mb: 1085 tcg_out_op_v(s, opc); 1086 break; 1087 1088 case INDEX_op_call: /* Always emitted via tcg_out_call. */ 1089 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ 1090 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ 1091 case INDEX_op_ext_i32_i64: /* Always emitted via tcg_reg_alloc_op. */ 1092 case INDEX_op_extu_i32_i64: 1093 case INDEX_op_extrl_i64_i32: 1094 default: 1095 g_assert_not_reached(); 1096 } 1097} 1098 1099static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, 1100 intptr_t offset) 1101{ 1102 switch (type) { 1103 case TCG_TYPE_I32: 1104 tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset); 1105 break; 1106#if TCG_TARGET_REG_BITS == 64 1107 case TCG_TYPE_I64: 1108 tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset); 1109 break; 1110#endif 1111 default: 1112 g_assert_not_reached(); 1113 } 1114} 1115 1116static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, 1117 TCGReg base, intptr_t ofs) 1118{ 1119 return false; 1120} 1121 1122/* Test if a constant matches the constraint. */ 1123static bool tcg_target_const_match(int64_t val, int ct, 1124 TCGType type, TCGCond cond, int vece) 1125{ 1126 return ct & TCG_CT_CONST; 1127} 1128 1129static void tcg_out_nop_fill(tcg_insn_unit *p, int count) 1130{ 1131 memset(p, 0, sizeof(*p) * count); 1132} 1133 1134static void tcg_target_init(TCGContext *s) 1135{ 1136 /* The current code uses uint8_t for tcg operations. */ 1137 tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX); 1138 1139 /* Registers available for 32 bit operations. */ 1140 tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1; 1141 /* Registers available for 64 bit operations. */ 1142 tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1; 1143 /* 1144 * The interpreter "registers" are in the local stack frame and 1145 * cannot be clobbered by the called helper functions. However, 1146 * the interpreter assumes a 128-bit return value and assigns to 1147 * the return value registers. 1148 */ 1149 tcg_target_call_clobber_regs = 1150 MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS); 1151 1152 s->reserved_regs = 0; 1153 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); 1154 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); 1155 1156 /* The call arguments come first, followed by the temp storage. */ 1157 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, 1158 TCG_STATIC_FRAME_SIZE); 1159} 1160 1161/* Generate global QEMU prologue and epilogue code. */ 1162static inline void tcg_target_qemu_prologue(TCGContext *s) 1163{ 1164} 1165 1166static void tcg_out_tb_start(TCGContext *s) 1167{ 1168 /* nothing to do */ 1169} 1170 1171bool tcg_target_has_memory_bswap(MemOp memop) 1172{ 1173 return true; 1174} 1175 1176static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1177{ 1178 g_assert_not_reached(); 1179} 1180 1181static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) 1182{ 1183 g_assert_not_reached(); 1184} 1185