1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/cpu_ldst.h" 22 #include "tcg/tcg-op.h" 23 #include "tcg/tcg-ldst.h" 24 #include "qemu/compiler.h" 25 #include <ffi.h> 26 27 28 /* 29 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 30 * Without assertions, the interpreter runs much faster. 31 */ 32 #if defined(CONFIG_DEBUG_TCG) 33 # define tci_assert(cond) assert(cond) 34 #else 35 # define tci_assert(cond) ((void)(cond)) 36 #endif 37 38 __thread uintptr_t tci_tb_ptr; 39 40 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 41 uint32_t low_index, uint64_t value) 42 { 43 regs[low_index] = (uint32_t)value; 44 regs[high_index] = value >> 32; 45 } 46 47 /* Create a 64 bit value from two 32 bit values. */ 48 static uint64_t tci_uint64(uint32_t high, uint32_t low) 49 { 50 return ((uint64_t)high << 32) + low; 51 } 52 53 /* 54 * Load sets of arguments all at once. The naming convention is: 55 * tci_args_<arguments> 56 * where arguments is a sequence of 57 * 58 * b = immediate (bit position) 59 * c = condition (TCGCond) 60 * i = immediate (uint32_t) 61 * I = immediate (tcg_target_ulong) 62 * l = label or pointer 63 * m = immediate (MemOpIdx) 64 * n = immediate (call return length) 65 * r = register 66 * s = signed ldst offset 67 */ 68 69 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 70 { 71 int diff = sextract32(insn, 12, 20); 72 *l0 = diff ? (void *)tb_ptr + diff : NULL; 73 } 74 75 static void tci_args_r(uint32_t insn, TCGReg *r0) 76 { 77 *r0 = extract32(insn, 8, 4); 78 } 79 80 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 81 uint8_t *n0, void **l1) 82 { 83 *n0 = extract32(insn, 8, 4); 84 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 85 } 86 87 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 88 TCGReg *r0, void **l1) 89 { 90 *r0 = extract32(insn, 8, 4); 91 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 92 } 93 94 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 95 { 96 *r0 = extract32(insn, 8, 4); 97 *r1 = extract32(insn, 12, 4); 98 } 99 100 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 101 { 102 *r0 = extract32(insn, 8, 4); 103 *i1 = sextract32(insn, 12, 20); 104 } 105 106 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 107 TCGReg *r1, MemOpIdx *m2) 108 { 109 *r0 = extract32(insn, 8, 4); 110 *r1 = extract32(insn, 12, 4); 111 *m2 = extract32(insn, 20, 12); 112 } 113 114 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 115 { 116 *r0 = extract32(insn, 8, 4); 117 *r1 = extract32(insn, 12, 4); 118 *r2 = extract32(insn, 16, 4); 119 } 120 121 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 122 { 123 *r0 = extract32(insn, 8, 4); 124 *r1 = extract32(insn, 12, 4); 125 *i2 = sextract32(insn, 16, 16); 126 } 127 128 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 129 uint8_t *i2, uint8_t *i3) 130 { 131 *r0 = extract32(insn, 8, 4); 132 *r1 = extract32(insn, 12, 4); 133 *i2 = extract32(insn, 16, 6); 134 *i3 = extract32(insn, 22, 6); 135 } 136 137 static void tci_args_rrrc(uint32_t insn, 138 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 139 { 140 *r0 = extract32(insn, 8, 4); 141 *r1 = extract32(insn, 12, 4); 142 *r2 = extract32(insn, 16, 4); 143 *c3 = extract32(insn, 20, 4); 144 } 145 146 static void tci_args_rrrm(uint32_t insn, 147 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3) 148 { 149 *r0 = extract32(insn, 8, 4); 150 *r1 = extract32(insn, 12, 4); 151 *r2 = extract32(insn, 16, 4); 152 *m3 = extract32(insn, 20, 12); 153 } 154 155 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 156 TCGReg *r2, uint8_t *i3, uint8_t *i4) 157 { 158 *r0 = extract32(insn, 8, 4); 159 *r1 = extract32(insn, 12, 4); 160 *r2 = extract32(insn, 16, 4); 161 *i3 = extract32(insn, 20, 6); 162 *i4 = extract32(insn, 26, 6); 163 } 164 165 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 166 TCGReg *r2, TCGReg *r3, TCGReg *r4) 167 { 168 *r0 = extract32(insn, 8, 4); 169 *r1 = extract32(insn, 12, 4); 170 *r2 = extract32(insn, 16, 4); 171 *r3 = extract32(insn, 20, 4); 172 *r4 = extract32(insn, 24, 4); 173 } 174 175 static void tci_args_rrrr(uint32_t insn, 176 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 177 { 178 *r0 = extract32(insn, 8, 4); 179 *r1 = extract32(insn, 12, 4); 180 *r2 = extract32(insn, 16, 4); 181 *r3 = extract32(insn, 20, 4); 182 } 183 184 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 185 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 186 { 187 *r0 = extract32(insn, 8, 4); 188 *r1 = extract32(insn, 12, 4); 189 *r2 = extract32(insn, 16, 4); 190 *r3 = extract32(insn, 20, 4); 191 *r4 = extract32(insn, 24, 4); 192 *c5 = extract32(insn, 28, 4); 193 } 194 195 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 196 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) 197 { 198 *r0 = extract32(insn, 8, 4); 199 *r1 = extract32(insn, 12, 4); 200 *r2 = extract32(insn, 16, 4); 201 *r3 = extract32(insn, 20, 4); 202 *r4 = extract32(insn, 24, 4); 203 *r5 = extract32(insn, 28, 4); 204 } 205 206 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 207 { 208 bool result = false; 209 int32_t i0 = u0; 210 int32_t i1 = u1; 211 switch (condition) { 212 case TCG_COND_EQ: 213 result = (u0 == u1); 214 break; 215 case TCG_COND_NE: 216 result = (u0 != u1); 217 break; 218 case TCG_COND_LT: 219 result = (i0 < i1); 220 break; 221 case TCG_COND_GE: 222 result = (i0 >= i1); 223 break; 224 case TCG_COND_LE: 225 result = (i0 <= i1); 226 break; 227 case TCG_COND_GT: 228 result = (i0 > i1); 229 break; 230 case TCG_COND_LTU: 231 result = (u0 < u1); 232 break; 233 case TCG_COND_GEU: 234 result = (u0 >= u1); 235 break; 236 case TCG_COND_LEU: 237 result = (u0 <= u1); 238 break; 239 case TCG_COND_GTU: 240 result = (u0 > u1); 241 break; 242 default: 243 g_assert_not_reached(); 244 } 245 return result; 246 } 247 248 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 249 { 250 bool result = false; 251 int64_t i0 = u0; 252 int64_t i1 = u1; 253 switch (condition) { 254 case TCG_COND_EQ: 255 result = (u0 == u1); 256 break; 257 case TCG_COND_NE: 258 result = (u0 != u1); 259 break; 260 case TCG_COND_LT: 261 result = (i0 < i1); 262 break; 263 case TCG_COND_GE: 264 result = (i0 >= i1); 265 break; 266 case TCG_COND_LE: 267 result = (i0 <= i1); 268 break; 269 case TCG_COND_GT: 270 result = (i0 > i1); 271 break; 272 case TCG_COND_LTU: 273 result = (u0 < u1); 274 break; 275 case TCG_COND_GEU: 276 result = (u0 >= u1); 277 break; 278 case TCG_COND_LEU: 279 result = (u0 <= u1); 280 break; 281 case TCG_COND_GTU: 282 result = (u0 > u1); 283 break; 284 default: 285 g_assert_not_reached(); 286 } 287 return result; 288 } 289 290 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, 291 MemOpIdx oi, const void *tb_ptr) 292 { 293 MemOp mop = get_memop(oi); 294 uintptr_t ra = (uintptr_t)tb_ptr; 295 296 #ifdef CONFIG_SOFTMMU 297 switch (mop & (MO_BSWAP | MO_SSIZE)) { 298 case MO_UB: 299 return helper_ret_ldub_mmu(env, taddr, oi, ra); 300 case MO_SB: 301 return helper_ret_ldsb_mmu(env, taddr, oi, ra); 302 case MO_LEUW: 303 return helper_le_lduw_mmu(env, taddr, oi, ra); 304 case MO_LESW: 305 return helper_le_ldsw_mmu(env, taddr, oi, ra); 306 case MO_LEUL: 307 return helper_le_ldul_mmu(env, taddr, oi, ra); 308 case MO_LESL: 309 return helper_le_ldsl_mmu(env, taddr, oi, ra); 310 case MO_LEUQ: 311 return helper_le_ldq_mmu(env, taddr, oi, ra); 312 case MO_BEUW: 313 return helper_be_lduw_mmu(env, taddr, oi, ra); 314 case MO_BESW: 315 return helper_be_ldsw_mmu(env, taddr, oi, ra); 316 case MO_BEUL: 317 return helper_be_ldul_mmu(env, taddr, oi, ra); 318 case MO_BESL: 319 return helper_be_ldsl_mmu(env, taddr, oi, ra); 320 case MO_BEUQ: 321 return helper_be_ldq_mmu(env, taddr, oi, ra); 322 default: 323 g_assert_not_reached(); 324 } 325 #else 326 void *haddr = g2h(env_cpu(env), taddr); 327 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; 328 uint64_t ret; 329 330 set_helper_retaddr(ra); 331 if (taddr & a_mask) { 332 helper_unaligned_ld(env, taddr); 333 } 334 switch (mop & (MO_BSWAP | MO_SSIZE)) { 335 case MO_UB: 336 ret = ldub_p(haddr); 337 break; 338 case MO_SB: 339 ret = ldsb_p(haddr); 340 break; 341 case MO_LEUW: 342 ret = lduw_le_p(haddr); 343 break; 344 case MO_LESW: 345 ret = ldsw_le_p(haddr); 346 break; 347 case MO_LEUL: 348 ret = (uint32_t)ldl_le_p(haddr); 349 break; 350 case MO_LESL: 351 ret = (int32_t)ldl_le_p(haddr); 352 break; 353 case MO_LEUQ: 354 ret = ldq_le_p(haddr); 355 break; 356 case MO_BEUW: 357 ret = lduw_be_p(haddr); 358 break; 359 case MO_BESW: 360 ret = ldsw_be_p(haddr); 361 break; 362 case MO_BEUL: 363 ret = (uint32_t)ldl_be_p(haddr); 364 break; 365 case MO_BESL: 366 ret = (int32_t)ldl_be_p(haddr); 367 break; 368 case MO_BEUQ: 369 ret = ldq_be_p(haddr); 370 break; 371 default: 372 g_assert_not_reached(); 373 } 374 clear_helper_retaddr(); 375 return ret; 376 #endif 377 } 378 379 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, 380 MemOpIdx oi, const void *tb_ptr) 381 { 382 MemOp mop = get_memop(oi); 383 uintptr_t ra = (uintptr_t)tb_ptr; 384 385 #ifdef CONFIG_SOFTMMU 386 switch (mop & (MO_BSWAP | MO_SIZE)) { 387 case MO_UB: 388 helper_ret_stb_mmu(env, taddr, val, oi, ra); 389 break; 390 case MO_LEUW: 391 helper_le_stw_mmu(env, taddr, val, oi, ra); 392 break; 393 case MO_LEUL: 394 helper_le_stl_mmu(env, taddr, val, oi, ra); 395 break; 396 case MO_LEUQ: 397 helper_le_stq_mmu(env, taddr, val, oi, ra); 398 break; 399 case MO_BEUW: 400 helper_be_stw_mmu(env, taddr, val, oi, ra); 401 break; 402 case MO_BEUL: 403 helper_be_stl_mmu(env, taddr, val, oi, ra); 404 break; 405 case MO_BEUQ: 406 helper_be_stq_mmu(env, taddr, val, oi, ra); 407 break; 408 default: 409 g_assert_not_reached(); 410 } 411 #else 412 void *haddr = g2h(env_cpu(env), taddr); 413 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; 414 415 set_helper_retaddr(ra); 416 if (taddr & a_mask) { 417 helper_unaligned_st(env, taddr); 418 } 419 switch (mop & (MO_BSWAP | MO_SIZE)) { 420 case MO_UB: 421 stb_p(haddr, val); 422 break; 423 case MO_LEUW: 424 stw_le_p(haddr, val); 425 break; 426 case MO_LEUL: 427 stl_le_p(haddr, val); 428 break; 429 case MO_LEUQ: 430 stq_le_p(haddr, val); 431 break; 432 case MO_BEUW: 433 stw_be_p(haddr, val); 434 break; 435 case MO_BEUL: 436 stl_be_p(haddr, val); 437 break; 438 case MO_BEUQ: 439 stq_be_p(haddr, val); 440 break; 441 default: 442 g_assert_not_reached(); 443 } 444 clear_helper_retaddr(); 445 #endif 446 } 447 448 #if TCG_TARGET_REG_BITS == 64 449 # define CASE_32_64(x) \ 450 case glue(glue(INDEX_op_, x), _i64): \ 451 case glue(glue(INDEX_op_, x), _i32): 452 # define CASE_64(x) \ 453 case glue(glue(INDEX_op_, x), _i64): 454 #else 455 # define CASE_32_64(x) \ 456 case glue(glue(INDEX_op_, x), _i32): 457 # define CASE_64(x) 458 #endif 459 460 /* Interpret pseudo code in tb. */ 461 /* 462 * Disable CFI checks. 463 * One possible operation in the pseudo code is a call to binary code. 464 * Therefore, disable CFI checks in the interpreter function 465 */ 466 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 467 const void *v_tb_ptr) 468 { 469 const uint32_t *tb_ptr = v_tb_ptr; 470 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 471 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 472 / sizeof(uint64_t)]; 473 void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)]; 474 475 regs[TCG_AREG0] = (tcg_target_ulong)env; 476 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 477 /* Other call_slots entries initialized at first use (see below). */ 478 call_slots[0] = NULL; 479 tci_assert(tb_ptr); 480 481 for (;;) { 482 uint32_t insn; 483 TCGOpcode opc; 484 TCGReg r0, r1, r2, r3, r4, r5; 485 tcg_target_ulong t1; 486 TCGCond condition; 487 target_ulong taddr; 488 uint8_t pos, len; 489 uint32_t tmp32; 490 uint64_t tmp64; 491 uint64_t T1, T2; 492 MemOpIdx oi; 493 int32_t ofs; 494 void *ptr; 495 496 insn = *tb_ptr++; 497 opc = extract32(insn, 0, 8); 498 499 switch (opc) { 500 case INDEX_op_call: 501 /* 502 * Set up the ffi_avalue array once, delayed until now 503 * because many TB's do not make any calls. In tcg_gen_callN, 504 * we arranged for every real argument to be "left-aligned" 505 * in each 64-bit slot. 506 */ 507 if (unlikely(call_slots[0] == NULL)) { 508 for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) { 509 call_slots[i] = &stack[i]; 510 } 511 } 512 513 tci_args_nl(insn, tb_ptr, &len, &ptr); 514 515 /* Helper functions may need to access the "return address" */ 516 tci_tb_ptr = (uintptr_t)tb_ptr; 517 518 { 519 void **pptr = ptr; 520 ffi_call(pptr[1], pptr[0], stack, call_slots); 521 } 522 523 /* Any result winds up "left-aligned" in the stack[0] slot. */ 524 switch (len) { 525 case 0: /* void */ 526 break; 527 case 1: /* uint32_t */ 528 /* 529 * Note that libffi has an odd special case in that it will 530 * always widen an integral result to ffi_arg. 531 */ 532 if (sizeof(ffi_arg) == 4) { 533 regs[TCG_REG_R0] = *(uint32_t *)stack; 534 break; 535 } 536 /* fall through */ 537 case 2: /* uint64_t */ 538 if (TCG_TARGET_REG_BITS == 32) { 539 tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]); 540 } else { 541 regs[TCG_REG_R0] = stack[0]; 542 } 543 break; 544 default: 545 g_assert_not_reached(); 546 } 547 break; 548 549 case INDEX_op_br: 550 tci_args_l(insn, tb_ptr, &ptr); 551 tb_ptr = ptr; 552 continue; 553 case INDEX_op_setcond_i32: 554 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 555 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 556 break; 557 case INDEX_op_movcond_i32: 558 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 559 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 560 regs[r0] = regs[tmp32 ? r3 : r4]; 561 break; 562 #if TCG_TARGET_REG_BITS == 32 563 case INDEX_op_setcond2_i32: 564 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 565 T1 = tci_uint64(regs[r2], regs[r1]); 566 T2 = tci_uint64(regs[r4], regs[r3]); 567 regs[r0] = tci_compare64(T1, T2, condition); 568 break; 569 #elif TCG_TARGET_REG_BITS == 64 570 case INDEX_op_setcond_i64: 571 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 572 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 573 break; 574 case INDEX_op_movcond_i64: 575 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 576 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 577 regs[r0] = regs[tmp32 ? r3 : r4]; 578 break; 579 #endif 580 CASE_32_64(mov) 581 tci_args_rr(insn, &r0, &r1); 582 regs[r0] = regs[r1]; 583 break; 584 case INDEX_op_tci_movi: 585 tci_args_ri(insn, &r0, &t1); 586 regs[r0] = t1; 587 break; 588 case INDEX_op_tci_movl: 589 tci_args_rl(insn, tb_ptr, &r0, &ptr); 590 regs[r0] = *(tcg_target_ulong *)ptr; 591 break; 592 593 /* Load/store operations (32 bit). */ 594 595 CASE_32_64(ld8u) 596 tci_args_rrs(insn, &r0, &r1, &ofs); 597 ptr = (void *)(regs[r1] + ofs); 598 regs[r0] = *(uint8_t *)ptr; 599 break; 600 CASE_32_64(ld8s) 601 tci_args_rrs(insn, &r0, &r1, &ofs); 602 ptr = (void *)(regs[r1] + ofs); 603 regs[r0] = *(int8_t *)ptr; 604 break; 605 CASE_32_64(ld16u) 606 tci_args_rrs(insn, &r0, &r1, &ofs); 607 ptr = (void *)(regs[r1] + ofs); 608 regs[r0] = *(uint16_t *)ptr; 609 break; 610 CASE_32_64(ld16s) 611 tci_args_rrs(insn, &r0, &r1, &ofs); 612 ptr = (void *)(regs[r1] + ofs); 613 regs[r0] = *(int16_t *)ptr; 614 break; 615 case INDEX_op_ld_i32: 616 CASE_64(ld32u) 617 tci_args_rrs(insn, &r0, &r1, &ofs); 618 ptr = (void *)(regs[r1] + ofs); 619 regs[r0] = *(uint32_t *)ptr; 620 break; 621 CASE_32_64(st8) 622 tci_args_rrs(insn, &r0, &r1, &ofs); 623 ptr = (void *)(regs[r1] + ofs); 624 *(uint8_t *)ptr = regs[r0]; 625 break; 626 CASE_32_64(st16) 627 tci_args_rrs(insn, &r0, &r1, &ofs); 628 ptr = (void *)(regs[r1] + ofs); 629 *(uint16_t *)ptr = regs[r0]; 630 break; 631 case INDEX_op_st_i32: 632 CASE_64(st32) 633 tci_args_rrs(insn, &r0, &r1, &ofs); 634 ptr = (void *)(regs[r1] + ofs); 635 *(uint32_t *)ptr = regs[r0]; 636 break; 637 638 /* Arithmetic operations (mixed 32/64 bit). */ 639 640 CASE_32_64(add) 641 tci_args_rrr(insn, &r0, &r1, &r2); 642 regs[r0] = regs[r1] + regs[r2]; 643 break; 644 CASE_32_64(sub) 645 tci_args_rrr(insn, &r0, &r1, &r2); 646 regs[r0] = regs[r1] - regs[r2]; 647 break; 648 CASE_32_64(mul) 649 tci_args_rrr(insn, &r0, &r1, &r2); 650 regs[r0] = regs[r1] * regs[r2]; 651 break; 652 CASE_32_64(and) 653 tci_args_rrr(insn, &r0, &r1, &r2); 654 regs[r0] = regs[r1] & regs[r2]; 655 break; 656 CASE_32_64(or) 657 tci_args_rrr(insn, &r0, &r1, &r2); 658 regs[r0] = regs[r1] | regs[r2]; 659 break; 660 CASE_32_64(xor) 661 tci_args_rrr(insn, &r0, &r1, &r2); 662 regs[r0] = regs[r1] ^ regs[r2]; 663 break; 664 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 665 CASE_32_64(andc) 666 tci_args_rrr(insn, &r0, &r1, &r2); 667 regs[r0] = regs[r1] & ~regs[r2]; 668 break; 669 #endif 670 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 671 CASE_32_64(orc) 672 tci_args_rrr(insn, &r0, &r1, &r2); 673 regs[r0] = regs[r1] | ~regs[r2]; 674 break; 675 #endif 676 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 677 CASE_32_64(eqv) 678 tci_args_rrr(insn, &r0, &r1, &r2); 679 regs[r0] = ~(regs[r1] ^ regs[r2]); 680 break; 681 #endif 682 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 683 CASE_32_64(nand) 684 tci_args_rrr(insn, &r0, &r1, &r2); 685 regs[r0] = ~(regs[r1] & regs[r2]); 686 break; 687 #endif 688 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 689 CASE_32_64(nor) 690 tci_args_rrr(insn, &r0, &r1, &r2); 691 regs[r0] = ~(regs[r1] | regs[r2]); 692 break; 693 #endif 694 695 /* Arithmetic operations (32 bit). */ 696 697 case INDEX_op_div_i32: 698 tci_args_rrr(insn, &r0, &r1, &r2); 699 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 700 break; 701 case INDEX_op_divu_i32: 702 tci_args_rrr(insn, &r0, &r1, &r2); 703 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 704 break; 705 case INDEX_op_rem_i32: 706 tci_args_rrr(insn, &r0, &r1, &r2); 707 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 708 break; 709 case INDEX_op_remu_i32: 710 tci_args_rrr(insn, &r0, &r1, &r2); 711 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 712 break; 713 #if TCG_TARGET_HAS_clz_i32 714 case INDEX_op_clz_i32: 715 tci_args_rrr(insn, &r0, &r1, &r2); 716 tmp32 = regs[r1]; 717 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 718 break; 719 #endif 720 #if TCG_TARGET_HAS_ctz_i32 721 case INDEX_op_ctz_i32: 722 tci_args_rrr(insn, &r0, &r1, &r2); 723 tmp32 = regs[r1]; 724 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 725 break; 726 #endif 727 #if TCG_TARGET_HAS_ctpop_i32 728 case INDEX_op_ctpop_i32: 729 tci_args_rr(insn, &r0, &r1); 730 regs[r0] = ctpop32(regs[r1]); 731 break; 732 #endif 733 734 /* Shift/rotate operations (32 bit). */ 735 736 case INDEX_op_shl_i32: 737 tci_args_rrr(insn, &r0, &r1, &r2); 738 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); 739 break; 740 case INDEX_op_shr_i32: 741 tci_args_rrr(insn, &r0, &r1, &r2); 742 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); 743 break; 744 case INDEX_op_sar_i32: 745 tci_args_rrr(insn, &r0, &r1, &r2); 746 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); 747 break; 748 #if TCG_TARGET_HAS_rot_i32 749 case INDEX_op_rotl_i32: 750 tci_args_rrr(insn, &r0, &r1, &r2); 751 regs[r0] = rol32(regs[r1], regs[r2] & 31); 752 break; 753 case INDEX_op_rotr_i32: 754 tci_args_rrr(insn, &r0, &r1, &r2); 755 regs[r0] = ror32(regs[r1], regs[r2] & 31); 756 break; 757 #endif 758 #if TCG_TARGET_HAS_deposit_i32 759 case INDEX_op_deposit_i32: 760 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 761 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); 762 break; 763 #endif 764 #if TCG_TARGET_HAS_extract_i32 765 case INDEX_op_extract_i32: 766 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 767 regs[r0] = extract32(regs[r1], pos, len); 768 break; 769 #endif 770 #if TCG_TARGET_HAS_sextract_i32 771 case INDEX_op_sextract_i32: 772 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 773 regs[r0] = sextract32(regs[r1], pos, len); 774 break; 775 #endif 776 case INDEX_op_brcond_i32: 777 tci_args_rl(insn, tb_ptr, &r0, &ptr); 778 if ((uint32_t)regs[r0]) { 779 tb_ptr = ptr; 780 } 781 break; 782 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 783 case INDEX_op_add2_i32: 784 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 785 T1 = tci_uint64(regs[r3], regs[r2]); 786 T2 = tci_uint64(regs[r5], regs[r4]); 787 tci_write_reg64(regs, r1, r0, T1 + T2); 788 break; 789 #endif 790 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 791 case INDEX_op_sub2_i32: 792 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 793 T1 = tci_uint64(regs[r3], regs[r2]); 794 T2 = tci_uint64(regs[r5], regs[r4]); 795 tci_write_reg64(regs, r1, r0, T1 - T2); 796 break; 797 #endif 798 #if TCG_TARGET_HAS_mulu2_i32 799 case INDEX_op_mulu2_i32: 800 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 801 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 802 tci_write_reg64(regs, r1, r0, tmp64); 803 break; 804 #endif 805 #if TCG_TARGET_HAS_muls2_i32 806 case INDEX_op_muls2_i32: 807 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 808 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 809 tci_write_reg64(regs, r1, r0, tmp64); 810 break; 811 #endif 812 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 813 CASE_32_64(ext8s) 814 tci_args_rr(insn, &r0, &r1); 815 regs[r0] = (int8_t)regs[r1]; 816 break; 817 #endif 818 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \ 819 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 820 CASE_32_64(ext16s) 821 tci_args_rr(insn, &r0, &r1); 822 regs[r0] = (int16_t)regs[r1]; 823 break; 824 #endif 825 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 826 CASE_32_64(ext8u) 827 tci_args_rr(insn, &r0, &r1); 828 regs[r0] = (uint8_t)regs[r1]; 829 break; 830 #endif 831 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 832 CASE_32_64(ext16u) 833 tci_args_rr(insn, &r0, &r1); 834 regs[r0] = (uint16_t)regs[r1]; 835 break; 836 #endif 837 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 838 CASE_32_64(bswap16) 839 tci_args_rr(insn, &r0, &r1); 840 regs[r0] = bswap16(regs[r1]); 841 break; 842 #endif 843 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 844 CASE_32_64(bswap32) 845 tci_args_rr(insn, &r0, &r1); 846 regs[r0] = bswap32(regs[r1]); 847 break; 848 #endif 849 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 850 CASE_32_64(not) 851 tci_args_rr(insn, &r0, &r1); 852 regs[r0] = ~regs[r1]; 853 break; 854 #endif 855 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 856 CASE_32_64(neg) 857 tci_args_rr(insn, &r0, &r1); 858 regs[r0] = -regs[r1]; 859 break; 860 #endif 861 #if TCG_TARGET_REG_BITS == 64 862 /* Load/store operations (64 bit). */ 863 864 case INDEX_op_ld32s_i64: 865 tci_args_rrs(insn, &r0, &r1, &ofs); 866 ptr = (void *)(regs[r1] + ofs); 867 regs[r0] = *(int32_t *)ptr; 868 break; 869 case INDEX_op_ld_i64: 870 tci_args_rrs(insn, &r0, &r1, &ofs); 871 ptr = (void *)(regs[r1] + ofs); 872 regs[r0] = *(uint64_t *)ptr; 873 break; 874 case INDEX_op_st_i64: 875 tci_args_rrs(insn, &r0, &r1, &ofs); 876 ptr = (void *)(regs[r1] + ofs); 877 *(uint64_t *)ptr = regs[r0]; 878 break; 879 880 /* Arithmetic operations (64 bit). */ 881 882 case INDEX_op_div_i64: 883 tci_args_rrr(insn, &r0, &r1, &r2); 884 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 885 break; 886 case INDEX_op_divu_i64: 887 tci_args_rrr(insn, &r0, &r1, &r2); 888 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 889 break; 890 case INDEX_op_rem_i64: 891 tci_args_rrr(insn, &r0, &r1, &r2); 892 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 893 break; 894 case INDEX_op_remu_i64: 895 tci_args_rrr(insn, &r0, &r1, &r2); 896 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 897 break; 898 #if TCG_TARGET_HAS_clz_i64 899 case INDEX_op_clz_i64: 900 tci_args_rrr(insn, &r0, &r1, &r2); 901 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 902 break; 903 #endif 904 #if TCG_TARGET_HAS_ctz_i64 905 case INDEX_op_ctz_i64: 906 tci_args_rrr(insn, &r0, &r1, &r2); 907 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 908 break; 909 #endif 910 #if TCG_TARGET_HAS_ctpop_i64 911 case INDEX_op_ctpop_i64: 912 tci_args_rr(insn, &r0, &r1); 913 regs[r0] = ctpop64(regs[r1]); 914 break; 915 #endif 916 #if TCG_TARGET_HAS_mulu2_i64 917 case INDEX_op_mulu2_i64: 918 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 919 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 920 break; 921 #endif 922 #if TCG_TARGET_HAS_muls2_i64 923 case INDEX_op_muls2_i64: 924 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 925 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 926 break; 927 #endif 928 #if TCG_TARGET_HAS_add2_i64 929 case INDEX_op_add2_i64: 930 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 931 T1 = regs[r2] + regs[r4]; 932 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); 933 regs[r0] = T1; 934 regs[r1] = T2; 935 break; 936 #endif 937 #if TCG_TARGET_HAS_add2_i64 938 case INDEX_op_sub2_i64: 939 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 940 T1 = regs[r2] - regs[r4]; 941 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); 942 regs[r0] = T1; 943 regs[r1] = T2; 944 break; 945 #endif 946 947 /* Shift/rotate operations (64 bit). */ 948 949 case INDEX_op_shl_i64: 950 tci_args_rrr(insn, &r0, &r1, &r2); 951 regs[r0] = regs[r1] << (regs[r2] & 63); 952 break; 953 case INDEX_op_shr_i64: 954 tci_args_rrr(insn, &r0, &r1, &r2); 955 regs[r0] = regs[r1] >> (regs[r2] & 63); 956 break; 957 case INDEX_op_sar_i64: 958 tci_args_rrr(insn, &r0, &r1, &r2); 959 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); 960 break; 961 #if TCG_TARGET_HAS_rot_i64 962 case INDEX_op_rotl_i64: 963 tci_args_rrr(insn, &r0, &r1, &r2); 964 regs[r0] = rol64(regs[r1], regs[r2] & 63); 965 break; 966 case INDEX_op_rotr_i64: 967 tci_args_rrr(insn, &r0, &r1, &r2); 968 regs[r0] = ror64(regs[r1], regs[r2] & 63); 969 break; 970 #endif 971 #if TCG_TARGET_HAS_deposit_i64 972 case INDEX_op_deposit_i64: 973 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 974 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); 975 break; 976 #endif 977 #if TCG_TARGET_HAS_extract_i64 978 case INDEX_op_extract_i64: 979 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 980 regs[r0] = extract64(regs[r1], pos, len); 981 break; 982 #endif 983 #if TCG_TARGET_HAS_sextract_i64 984 case INDEX_op_sextract_i64: 985 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 986 regs[r0] = sextract64(regs[r1], pos, len); 987 break; 988 #endif 989 case INDEX_op_brcond_i64: 990 tci_args_rl(insn, tb_ptr, &r0, &ptr); 991 if (regs[r0]) { 992 tb_ptr = ptr; 993 } 994 break; 995 case INDEX_op_ext32s_i64: 996 case INDEX_op_ext_i32_i64: 997 tci_args_rr(insn, &r0, &r1); 998 regs[r0] = (int32_t)regs[r1]; 999 break; 1000 case INDEX_op_ext32u_i64: 1001 case INDEX_op_extu_i32_i64: 1002 tci_args_rr(insn, &r0, &r1); 1003 regs[r0] = (uint32_t)regs[r1]; 1004 break; 1005 #if TCG_TARGET_HAS_bswap64_i64 1006 case INDEX_op_bswap64_i64: 1007 tci_args_rr(insn, &r0, &r1); 1008 regs[r0] = bswap64(regs[r1]); 1009 break; 1010 #endif 1011 #endif /* TCG_TARGET_REG_BITS == 64 */ 1012 1013 /* QEMU specific operations. */ 1014 1015 case INDEX_op_exit_tb: 1016 tci_args_l(insn, tb_ptr, &ptr); 1017 return (uintptr_t)ptr; 1018 1019 case INDEX_op_goto_tb: 1020 tci_args_l(insn, tb_ptr, &ptr); 1021 tb_ptr = *(void **)ptr; 1022 break; 1023 1024 case INDEX_op_goto_ptr: 1025 tci_args_r(insn, &r0); 1026 ptr = (void *)regs[r0]; 1027 if (!ptr) { 1028 return 0; 1029 } 1030 tb_ptr = ptr; 1031 break; 1032 1033 case INDEX_op_qemu_ld_i32: 1034 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1035 tci_args_rrm(insn, &r0, &r1, &oi); 1036 taddr = regs[r1]; 1037 } else { 1038 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1039 taddr = tci_uint64(regs[r2], regs[r1]); 1040 } 1041 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1042 regs[r0] = tmp32; 1043 break; 1044 1045 case INDEX_op_qemu_ld_i64: 1046 if (TCG_TARGET_REG_BITS == 64) { 1047 tci_args_rrm(insn, &r0, &r1, &oi); 1048 taddr = regs[r1]; 1049 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1050 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1051 taddr = regs[r2]; 1052 } else { 1053 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1054 taddr = tci_uint64(regs[r3], regs[r2]); 1055 oi = regs[r4]; 1056 } 1057 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1058 if (TCG_TARGET_REG_BITS == 32) { 1059 tci_write_reg64(regs, r1, r0, tmp64); 1060 } else { 1061 regs[r0] = tmp64; 1062 } 1063 break; 1064 1065 case INDEX_op_qemu_st_i32: 1066 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1067 tci_args_rrm(insn, &r0, &r1, &oi); 1068 taddr = regs[r1]; 1069 } else { 1070 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1071 taddr = tci_uint64(regs[r2], regs[r1]); 1072 } 1073 tmp32 = regs[r0]; 1074 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); 1075 break; 1076 1077 case INDEX_op_qemu_st_i64: 1078 if (TCG_TARGET_REG_BITS == 64) { 1079 tci_args_rrm(insn, &r0, &r1, &oi); 1080 taddr = regs[r1]; 1081 tmp64 = regs[r0]; 1082 } else { 1083 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1084 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1085 taddr = regs[r2]; 1086 } else { 1087 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1088 taddr = tci_uint64(regs[r3], regs[r2]); 1089 oi = regs[r4]; 1090 } 1091 tmp64 = tci_uint64(regs[r1], regs[r0]); 1092 } 1093 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 1094 break; 1095 1096 case INDEX_op_mb: 1097 /* Ensure ordering for all kinds */ 1098 smp_mb(); 1099 break; 1100 default: 1101 g_assert_not_reached(); 1102 } 1103 } 1104 } 1105 1106 /* 1107 * Disassembler that matches the interpreter 1108 */ 1109 1110 static const char *str_r(TCGReg r) 1111 { 1112 static const char regs[TCG_TARGET_NB_REGS][4] = { 1113 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1114 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 1115 }; 1116 1117 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 1118 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 1119 1120 assert((unsigned)r < TCG_TARGET_NB_REGS); 1121 return regs[r]; 1122 } 1123 1124 static const char *str_c(TCGCond c) 1125 { 1126 static const char cond[16][8] = { 1127 [TCG_COND_NEVER] = "never", 1128 [TCG_COND_ALWAYS] = "always", 1129 [TCG_COND_EQ] = "eq", 1130 [TCG_COND_NE] = "ne", 1131 [TCG_COND_LT] = "lt", 1132 [TCG_COND_GE] = "ge", 1133 [TCG_COND_LE] = "le", 1134 [TCG_COND_GT] = "gt", 1135 [TCG_COND_LTU] = "ltu", 1136 [TCG_COND_GEU] = "geu", 1137 [TCG_COND_LEU] = "leu", 1138 [TCG_COND_GTU] = "gtu", 1139 }; 1140 1141 assert((unsigned)c < ARRAY_SIZE(cond)); 1142 assert(cond[c][0] != 0); 1143 return cond[c]; 1144 } 1145 1146 /* Disassemble TCI bytecode. */ 1147 int print_insn_tci(bfd_vma addr, disassemble_info *info) 1148 { 1149 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 1150 const TCGOpDef *def; 1151 const char *op_name; 1152 uint32_t insn; 1153 TCGOpcode op; 1154 TCGReg r0, r1, r2, r3, r4, r5; 1155 tcg_target_ulong i1; 1156 int32_t s2; 1157 TCGCond c; 1158 MemOpIdx oi; 1159 uint8_t pos, len; 1160 void *ptr; 1161 1162 /* TCI is always the host, so we don't need to load indirect. */ 1163 insn = *tb_ptr++; 1164 1165 info->fprintf_func(info->stream, "%08x ", insn); 1166 1167 op = extract32(insn, 0, 8); 1168 def = &tcg_op_defs[op]; 1169 op_name = def->name; 1170 1171 switch (op) { 1172 case INDEX_op_br: 1173 case INDEX_op_exit_tb: 1174 case INDEX_op_goto_tb: 1175 tci_args_l(insn, tb_ptr, &ptr); 1176 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 1177 break; 1178 1179 case INDEX_op_goto_ptr: 1180 tci_args_r(insn, &r0); 1181 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 1182 break; 1183 1184 case INDEX_op_call: 1185 tci_args_nl(insn, tb_ptr, &len, &ptr); 1186 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 1187 break; 1188 1189 case INDEX_op_brcond_i32: 1190 case INDEX_op_brcond_i64: 1191 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1192 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 1193 op_name, str_r(r0), ptr); 1194 break; 1195 1196 case INDEX_op_setcond_i32: 1197 case INDEX_op_setcond_i64: 1198 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 1199 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1200 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 1201 break; 1202 1203 case INDEX_op_tci_movi: 1204 tci_args_ri(insn, &r0, &i1); 1205 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 1206 op_name, str_r(r0), i1); 1207 break; 1208 1209 case INDEX_op_tci_movl: 1210 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1211 info->fprintf_func(info->stream, "%-12s %s, %p", 1212 op_name, str_r(r0), ptr); 1213 break; 1214 1215 case INDEX_op_ld8u_i32: 1216 case INDEX_op_ld8u_i64: 1217 case INDEX_op_ld8s_i32: 1218 case INDEX_op_ld8s_i64: 1219 case INDEX_op_ld16u_i32: 1220 case INDEX_op_ld16u_i64: 1221 case INDEX_op_ld16s_i32: 1222 case INDEX_op_ld16s_i64: 1223 case INDEX_op_ld32u_i64: 1224 case INDEX_op_ld32s_i64: 1225 case INDEX_op_ld_i32: 1226 case INDEX_op_ld_i64: 1227 case INDEX_op_st8_i32: 1228 case INDEX_op_st8_i64: 1229 case INDEX_op_st16_i32: 1230 case INDEX_op_st16_i64: 1231 case INDEX_op_st32_i64: 1232 case INDEX_op_st_i32: 1233 case INDEX_op_st_i64: 1234 tci_args_rrs(insn, &r0, &r1, &s2); 1235 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 1236 op_name, str_r(r0), str_r(r1), s2); 1237 break; 1238 1239 case INDEX_op_mov_i32: 1240 case INDEX_op_mov_i64: 1241 case INDEX_op_ext8s_i32: 1242 case INDEX_op_ext8s_i64: 1243 case INDEX_op_ext8u_i32: 1244 case INDEX_op_ext8u_i64: 1245 case INDEX_op_ext16s_i32: 1246 case INDEX_op_ext16s_i64: 1247 case INDEX_op_ext16u_i32: 1248 case INDEX_op_ext32s_i64: 1249 case INDEX_op_ext32u_i64: 1250 case INDEX_op_ext_i32_i64: 1251 case INDEX_op_extu_i32_i64: 1252 case INDEX_op_bswap16_i32: 1253 case INDEX_op_bswap16_i64: 1254 case INDEX_op_bswap32_i32: 1255 case INDEX_op_bswap32_i64: 1256 case INDEX_op_bswap64_i64: 1257 case INDEX_op_not_i32: 1258 case INDEX_op_not_i64: 1259 case INDEX_op_neg_i32: 1260 case INDEX_op_neg_i64: 1261 case INDEX_op_ctpop_i32: 1262 case INDEX_op_ctpop_i64: 1263 tci_args_rr(insn, &r0, &r1); 1264 info->fprintf_func(info->stream, "%-12s %s, %s", 1265 op_name, str_r(r0), str_r(r1)); 1266 break; 1267 1268 case INDEX_op_add_i32: 1269 case INDEX_op_add_i64: 1270 case INDEX_op_sub_i32: 1271 case INDEX_op_sub_i64: 1272 case INDEX_op_mul_i32: 1273 case INDEX_op_mul_i64: 1274 case INDEX_op_and_i32: 1275 case INDEX_op_and_i64: 1276 case INDEX_op_or_i32: 1277 case INDEX_op_or_i64: 1278 case INDEX_op_xor_i32: 1279 case INDEX_op_xor_i64: 1280 case INDEX_op_andc_i32: 1281 case INDEX_op_andc_i64: 1282 case INDEX_op_orc_i32: 1283 case INDEX_op_orc_i64: 1284 case INDEX_op_eqv_i32: 1285 case INDEX_op_eqv_i64: 1286 case INDEX_op_nand_i32: 1287 case INDEX_op_nand_i64: 1288 case INDEX_op_nor_i32: 1289 case INDEX_op_nor_i64: 1290 case INDEX_op_div_i32: 1291 case INDEX_op_div_i64: 1292 case INDEX_op_rem_i32: 1293 case INDEX_op_rem_i64: 1294 case INDEX_op_divu_i32: 1295 case INDEX_op_divu_i64: 1296 case INDEX_op_remu_i32: 1297 case INDEX_op_remu_i64: 1298 case INDEX_op_shl_i32: 1299 case INDEX_op_shl_i64: 1300 case INDEX_op_shr_i32: 1301 case INDEX_op_shr_i64: 1302 case INDEX_op_sar_i32: 1303 case INDEX_op_sar_i64: 1304 case INDEX_op_rotl_i32: 1305 case INDEX_op_rotl_i64: 1306 case INDEX_op_rotr_i32: 1307 case INDEX_op_rotr_i64: 1308 case INDEX_op_clz_i32: 1309 case INDEX_op_clz_i64: 1310 case INDEX_op_ctz_i32: 1311 case INDEX_op_ctz_i64: 1312 tci_args_rrr(insn, &r0, &r1, &r2); 1313 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1314 op_name, str_r(r0), str_r(r1), str_r(r2)); 1315 break; 1316 1317 case INDEX_op_deposit_i32: 1318 case INDEX_op_deposit_i64: 1319 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1320 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1321 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1322 break; 1323 1324 case INDEX_op_extract_i32: 1325 case INDEX_op_extract_i64: 1326 case INDEX_op_sextract_i32: 1327 case INDEX_op_sextract_i64: 1328 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1329 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1330 op_name, str_r(r0), str_r(r1), pos, len); 1331 break; 1332 1333 case INDEX_op_movcond_i32: 1334 case INDEX_op_movcond_i64: 1335 case INDEX_op_setcond2_i32: 1336 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1337 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1338 op_name, str_r(r0), str_r(r1), str_r(r2), 1339 str_r(r3), str_r(r4), str_c(c)); 1340 break; 1341 1342 case INDEX_op_mulu2_i32: 1343 case INDEX_op_mulu2_i64: 1344 case INDEX_op_muls2_i32: 1345 case INDEX_op_muls2_i64: 1346 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1347 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1348 op_name, str_r(r0), str_r(r1), 1349 str_r(r2), str_r(r3)); 1350 break; 1351 1352 case INDEX_op_add2_i32: 1353 case INDEX_op_add2_i64: 1354 case INDEX_op_sub2_i32: 1355 case INDEX_op_sub2_i64: 1356 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 1357 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1358 op_name, str_r(r0), str_r(r1), str_r(r2), 1359 str_r(r3), str_r(r4), str_r(r5)); 1360 break; 1361 1362 case INDEX_op_qemu_ld_i64: 1363 case INDEX_op_qemu_st_i64: 1364 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1365 goto do_qemu_ldst; 1366 case INDEX_op_qemu_ld_i32: 1367 case INDEX_op_qemu_st_i32: 1368 len = 1; 1369 do_qemu_ldst: 1370 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); 1371 switch (len) { 1372 case 2: 1373 tci_args_rrm(insn, &r0, &r1, &oi); 1374 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1375 op_name, str_r(r0), str_r(r1), oi); 1376 break; 1377 case 3: 1378 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1379 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x", 1380 op_name, str_r(r0), str_r(r1), str_r(r2), oi); 1381 break; 1382 case 4: 1383 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1384 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", 1385 op_name, str_r(r0), str_r(r1), 1386 str_r(r2), str_r(r3), str_r(r4)); 1387 break; 1388 default: 1389 g_assert_not_reached(); 1390 } 1391 break; 1392 1393 case 0: 1394 /* tcg_out_nop_fill uses zeros */ 1395 if (insn == 0) { 1396 info->fprintf_func(info->stream, "align"); 1397 break; 1398 } 1399 /* fall through */ 1400 1401 default: 1402 info->fprintf_func(info->stream, "illegal opcode %d", op); 1403 break; 1404 } 1405 1406 return sizeof(insn); 1407 } 1408