1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/cpu_ldst.h" 22 #include "tcg/tcg-op.h" 23 #include "tcg/tcg-ldst.h" 24 #include <ffi.h> 25 26 27 /* 28 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 29 * Without assertions, the interpreter runs much faster. 30 */ 31 #if defined(CONFIG_DEBUG_TCG) 32 # define tci_assert(cond) assert(cond) 33 #else 34 # define tci_assert(cond) ((void)(cond)) 35 #endif 36 37 __thread uintptr_t tci_tb_ptr; 38 39 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 40 uint32_t low_index, uint64_t value) 41 { 42 regs[low_index] = (uint32_t)value; 43 regs[high_index] = value >> 32; 44 } 45 46 /* Create a 64 bit value from two 32 bit values. */ 47 static uint64_t tci_uint64(uint32_t high, uint32_t low) 48 { 49 return ((uint64_t)high << 32) + low; 50 } 51 52 /* 53 * Load sets of arguments all at once. The naming convention is: 54 * tci_args_<arguments> 55 * where arguments is a sequence of 56 * 57 * b = immediate (bit position) 58 * c = condition (TCGCond) 59 * i = immediate (uint32_t) 60 * I = immediate (tcg_target_ulong) 61 * l = label or pointer 62 * m = immediate (MemOpIdx) 63 * n = immediate (call return length) 64 * r = register 65 * s = signed ldst offset 66 */ 67 68 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 69 { 70 int diff = sextract32(insn, 12, 20); 71 *l0 = diff ? (void *)tb_ptr + diff : NULL; 72 } 73 74 static void tci_args_r(uint32_t insn, TCGReg *r0) 75 { 76 *r0 = extract32(insn, 8, 4); 77 } 78 79 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 80 uint8_t *n0, void **l1) 81 { 82 *n0 = extract32(insn, 8, 4); 83 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 84 } 85 86 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 87 TCGReg *r0, void **l1) 88 { 89 *r0 = extract32(insn, 8, 4); 90 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 91 } 92 93 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 94 { 95 *r0 = extract32(insn, 8, 4); 96 *r1 = extract32(insn, 12, 4); 97 } 98 99 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 100 { 101 *r0 = extract32(insn, 8, 4); 102 *i1 = sextract32(insn, 12, 20); 103 } 104 105 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 106 TCGReg *r1, MemOpIdx *m2) 107 { 108 *r0 = extract32(insn, 8, 4); 109 *r1 = extract32(insn, 12, 4); 110 *m2 = extract32(insn, 20, 12); 111 } 112 113 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 114 { 115 *r0 = extract32(insn, 8, 4); 116 *r1 = extract32(insn, 12, 4); 117 *r2 = extract32(insn, 16, 4); 118 } 119 120 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 121 { 122 *r0 = extract32(insn, 8, 4); 123 *r1 = extract32(insn, 12, 4); 124 *i2 = sextract32(insn, 16, 16); 125 } 126 127 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 128 uint8_t *i2, uint8_t *i3) 129 { 130 *r0 = extract32(insn, 8, 4); 131 *r1 = extract32(insn, 12, 4); 132 *i2 = extract32(insn, 16, 6); 133 *i3 = extract32(insn, 22, 6); 134 } 135 136 static void tci_args_rrrc(uint32_t insn, 137 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 138 { 139 *r0 = extract32(insn, 8, 4); 140 *r1 = extract32(insn, 12, 4); 141 *r2 = extract32(insn, 16, 4); 142 *c3 = extract32(insn, 20, 4); 143 } 144 145 static void tci_args_rrrm(uint32_t insn, 146 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3) 147 { 148 *r0 = extract32(insn, 8, 4); 149 *r1 = extract32(insn, 12, 4); 150 *r2 = extract32(insn, 16, 4); 151 *m3 = extract32(insn, 20, 12); 152 } 153 154 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 155 TCGReg *r2, uint8_t *i3, uint8_t *i4) 156 { 157 *r0 = extract32(insn, 8, 4); 158 *r1 = extract32(insn, 12, 4); 159 *r2 = extract32(insn, 16, 4); 160 *i3 = extract32(insn, 20, 6); 161 *i4 = extract32(insn, 26, 6); 162 } 163 164 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 165 TCGReg *r2, TCGReg *r3, TCGReg *r4) 166 { 167 *r0 = extract32(insn, 8, 4); 168 *r1 = extract32(insn, 12, 4); 169 *r2 = extract32(insn, 16, 4); 170 *r3 = extract32(insn, 20, 4); 171 *r4 = extract32(insn, 24, 4); 172 } 173 174 static void tci_args_rrrr(uint32_t insn, 175 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 176 { 177 *r0 = extract32(insn, 8, 4); 178 *r1 = extract32(insn, 12, 4); 179 *r2 = extract32(insn, 16, 4); 180 *r3 = extract32(insn, 20, 4); 181 } 182 183 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 184 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 185 { 186 *r0 = extract32(insn, 8, 4); 187 *r1 = extract32(insn, 12, 4); 188 *r2 = extract32(insn, 16, 4); 189 *r3 = extract32(insn, 20, 4); 190 *r4 = extract32(insn, 24, 4); 191 *c5 = extract32(insn, 28, 4); 192 } 193 194 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 195 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) 196 { 197 *r0 = extract32(insn, 8, 4); 198 *r1 = extract32(insn, 12, 4); 199 *r2 = extract32(insn, 16, 4); 200 *r3 = extract32(insn, 20, 4); 201 *r4 = extract32(insn, 24, 4); 202 *r5 = extract32(insn, 28, 4); 203 } 204 205 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 206 { 207 bool result = false; 208 int32_t i0 = u0; 209 int32_t i1 = u1; 210 switch (condition) { 211 case TCG_COND_EQ: 212 result = (u0 == u1); 213 break; 214 case TCG_COND_NE: 215 result = (u0 != u1); 216 break; 217 case TCG_COND_LT: 218 result = (i0 < i1); 219 break; 220 case TCG_COND_GE: 221 result = (i0 >= i1); 222 break; 223 case TCG_COND_LE: 224 result = (i0 <= i1); 225 break; 226 case TCG_COND_GT: 227 result = (i0 > i1); 228 break; 229 case TCG_COND_LTU: 230 result = (u0 < u1); 231 break; 232 case TCG_COND_GEU: 233 result = (u0 >= u1); 234 break; 235 case TCG_COND_LEU: 236 result = (u0 <= u1); 237 break; 238 case TCG_COND_GTU: 239 result = (u0 > u1); 240 break; 241 default: 242 g_assert_not_reached(); 243 } 244 return result; 245 } 246 247 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 248 { 249 bool result = false; 250 int64_t i0 = u0; 251 int64_t i1 = u1; 252 switch (condition) { 253 case TCG_COND_EQ: 254 result = (u0 == u1); 255 break; 256 case TCG_COND_NE: 257 result = (u0 != u1); 258 break; 259 case TCG_COND_LT: 260 result = (i0 < i1); 261 break; 262 case TCG_COND_GE: 263 result = (i0 >= i1); 264 break; 265 case TCG_COND_LE: 266 result = (i0 <= i1); 267 break; 268 case TCG_COND_GT: 269 result = (i0 > i1); 270 break; 271 case TCG_COND_LTU: 272 result = (u0 < u1); 273 break; 274 case TCG_COND_GEU: 275 result = (u0 >= u1); 276 break; 277 case TCG_COND_LEU: 278 result = (u0 <= u1); 279 break; 280 case TCG_COND_GTU: 281 result = (u0 > u1); 282 break; 283 default: 284 g_assert_not_reached(); 285 } 286 return result; 287 } 288 289 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, 290 MemOpIdx oi, const void *tb_ptr) 291 { 292 MemOp mop = get_memop(oi); 293 uintptr_t ra = (uintptr_t)tb_ptr; 294 295 #ifdef CONFIG_SOFTMMU 296 switch (mop & (MO_BSWAP | MO_SSIZE)) { 297 case MO_UB: 298 return helper_ret_ldub_mmu(env, taddr, oi, ra); 299 case MO_SB: 300 return helper_ret_ldsb_mmu(env, taddr, oi, ra); 301 case MO_LEUW: 302 return helper_le_lduw_mmu(env, taddr, oi, ra); 303 case MO_LESW: 304 return helper_le_ldsw_mmu(env, taddr, oi, ra); 305 case MO_LEUL: 306 return helper_le_ldul_mmu(env, taddr, oi, ra); 307 case MO_LESL: 308 return helper_le_ldsl_mmu(env, taddr, oi, ra); 309 case MO_LEUQ: 310 return helper_le_ldq_mmu(env, taddr, oi, ra); 311 case MO_BEUW: 312 return helper_be_lduw_mmu(env, taddr, oi, ra); 313 case MO_BESW: 314 return helper_be_ldsw_mmu(env, taddr, oi, ra); 315 case MO_BEUL: 316 return helper_be_ldul_mmu(env, taddr, oi, ra); 317 case MO_BESL: 318 return helper_be_ldsl_mmu(env, taddr, oi, ra); 319 case MO_BEUQ: 320 return helper_be_ldq_mmu(env, taddr, oi, ra); 321 default: 322 g_assert_not_reached(); 323 } 324 #else 325 void *haddr = g2h(env_cpu(env), taddr); 326 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; 327 uint64_t ret; 328 329 set_helper_retaddr(ra); 330 if (taddr & a_mask) { 331 helper_unaligned_ld(env, taddr); 332 } 333 switch (mop & (MO_BSWAP | MO_SSIZE)) { 334 case MO_UB: 335 ret = ldub_p(haddr); 336 break; 337 case MO_SB: 338 ret = ldsb_p(haddr); 339 break; 340 case MO_LEUW: 341 ret = lduw_le_p(haddr); 342 break; 343 case MO_LESW: 344 ret = ldsw_le_p(haddr); 345 break; 346 case MO_LEUL: 347 ret = (uint32_t)ldl_le_p(haddr); 348 break; 349 case MO_LESL: 350 ret = (int32_t)ldl_le_p(haddr); 351 break; 352 case MO_LEUQ: 353 ret = ldq_le_p(haddr); 354 break; 355 case MO_BEUW: 356 ret = lduw_be_p(haddr); 357 break; 358 case MO_BESW: 359 ret = ldsw_be_p(haddr); 360 break; 361 case MO_BEUL: 362 ret = (uint32_t)ldl_be_p(haddr); 363 break; 364 case MO_BESL: 365 ret = (int32_t)ldl_be_p(haddr); 366 break; 367 case MO_BEUQ: 368 ret = ldq_be_p(haddr); 369 break; 370 default: 371 g_assert_not_reached(); 372 } 373 clear_helper_retaddr(); 374 return ret; 375 #endif 376 } 377 378 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, 379 MemOpIdx oi, const void *tb_ptr) 380 { 381 MemOp mop = get_memop(oi); 382 uintptr_t ra = (uintptr_t)tb_ptr; 383 384 #ifdef CONFIG_SOFTMMU 385 switch (mop & (MO_BSWAP | MO_SIZE)) { 386 case MO_UB: 387 helper_ret_stb_mmu(env, taddr, val, oi, ra); 388 break; 389 case MO_LEUW: 390 helper_le_stw_mmu(env, taddr, val, oi, ra); 391 break; 392 case MO_LEUL: 393 helper_le_stl_mmu(env, taddr, val, oi, ra); 394 break; 395 case MO_LEUQ: 396 helper_le_stq_mmu(env, taddr, val, oi, ra); 397 break; 398 case MO_BEUW: 399 helper_be_stw_mmu(env, taddr, val, oi, ra); 400 break; 401 case MO_BEUL: 402 helper_be_stl_mmu(env, taddr, val, oi, ra); 403 break; 404 case MO_BEUQ: 405 helper_be_stq_mmu(env, taddr, val, oi, ra); 406 break; 407 default: 408 g_assert_not_reached(); 409 } 410 #else 411 void *haddr = g2h(env_cpu(env), taddr); 412 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; 413 414 set_helper_retaddr(ra); 415 if (taddr & a_mask) { 416 helper_unaligned_st(env, taddr); 417 } 418 switch (mop & (MO_BSWAP | MO_SIZE)) { 419 case MO_UB: 420 stb_p(haddr, val); 421 break; 422 case MO_LEUW: 423 stw_le_p(haddr, val); 424 break; 425 case MO_LEUL: 426 stl_le_p(haddr, val); 427 break; 428 case MO_LEUQ: 429 stq_le_p(haddr, val); 430 break; 431 case MO_BEUW: 432 stw_be_p(haddr, val); 433 break; 434 case MO_BEUL: 435 stl_be_p(haddr, val); 436 break; 437 case MO_BEUQ: 438 stq_be_p(haddr, val); 439 break; 440 default: 441 g_assert_not_reached(); 442 } 443 clear_helper_retaddr(); 444 #endif 445 } 446 447 #if TCG_TARGET_REG_BITS == 64 448 # define CASE_32_64(x) \ 449 case glue(glue(INDEX_op_, x), _i64): \ 450 case glue(glue(INDEX_op_, x), _i32): 451 # define CASE_64(x) \ 452 case glue(glue(INDEX_op_, x), _i64): 453 #else 454 # define CASE_32_64(x) \ 455 case glue(glue(INDEX_op_, x), _i32): 456 # define CASE_64(x) 457 #endif 458 459 /* Interpret pseudo code in tb. */ 460 /* 461 * Disable CFI checks. 462 * One possible operation in the pseudo code is a call to binary code. 463 * Therefore, disable CFI checks in the interpreter function 464 */ 465 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 466 const void *v_tb_ptr) 467 { 468 const uint32_t *tb_ptr = v_tb_ptr; 469 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 470 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 471 / sizeof(uint64_t)]; 472 473 regs[TCG_AREG0] = (tcg_target_ulong)env; 474 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 475 tci_assert(tb_ptr); 476 477 for (;;) { 478 uint32_t insn; 479 TCGOpcode opc; 480 TCGReg r0, r1, r2, r3, r4, r5; 481 tcg_target_ulong t1; 482 TCGCond condition; 483 target_ulong taddr; 484 uint8_t pos, len; 485 uint32_t tmp32; 486 uint64_t tmp64; 487 uint64_t T1, T2; 488 MemOpIdx oi; 489 int32_t ofs; 490 void *ptr; 491 492 insn = *tb_ptr++; 493 opc = extract32(insn, 0, 8); 494 495 switch (opc) { 496 case INDEX_op_call: 497 { 498 void *call_slots[MAX_CALL_IARGS]; 499 ffi_cif *cif; 500 void *func; 501 unsigned i, s, n; 502 503 tci_args_nl(insn, tb_ptr, &len, &ptr); 504 func = ((void **)ptr)[0]; 505 cif = ((void **)ptr)[1]; 506 507 n = cif->nargs; 508 for (i = s = 0; i < n; ++i) { 509 ffi_type *t = cif->arg_types[i]; 510 call_slots[i] = &stack[s]; 511 s += DIV_ROUND_UP(t->size, 8); 512 } 513 514 /* Helper functions may need to access the "return address" */ 515 tci_tb_ptr = (uintptr_t)tb_ptr; 516 ffi_call(cif, func, stack, call_slots); 517 } 518 519 switch (len) { 520 case 0: /* void */ 521 break; 522 case 1: /* uint32_t */ 523 /* 524 * The result winds up "left-aligned" in the stack[0] slot. 525 * Note that libffi has an odd special case in that it will 526 * always widen an integral result to ffi_arg. 527 */ 528 if (sizeof(ffi_arg) == 8) { 529 regs[TCG_REG_R0] = (uint32_t)stack[0]; 530 } else { 531 regs[TCG_REG_R0] = *(uint32_t *)stack; 532 } 533 break; 534 case 2: /* uint64_t */ 535 /* 536 * For TCG_TARGET_REG_BITS == 32, the register pair 537 * must stay in host memory order. 538 */ 539 memcpy(®s[TCG_REG_R0], stack, 8); 540 break; 541 case 3: /* Int128 */ 542 memcpy(®s[TCG_REG_R0], stack, 16); 543 break; 544 default: 545 g_assert_not_reached(); 546 } 547 break; 548 549 case INDEX_op_br: 550 tci_args_l(insn, tb_ptr, &ptr); 551 tb_ptr = ptr; 552 continue; 553 case INDEX_op_setcond_i32: 554 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 555 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 556 break; 557 case INDEX_op_movcond_i32: 558 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 559 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 560 regs[r0] = regs[tmp32 ? r3 : r4]; 561 break; 562 #if TCG_TARGET_REG_BITS == 32 563 case INDEX_op_setcond2_i32: 564 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 565 T1 = tci_uint64(regs[r2], regs[r1]); 566 T2 = tci_uint64(regs[r4], regs[r3]); 567 regs[r0] = tci_compare64(T1, T2, condition); 568 break; 569 #elif TCG_TARGET_REG_BITS == 64 570 case INDEX_op_setcond_i64: 571 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 572 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 573 break; 574 case INDEX_op_movcond_i64: 575 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 576 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 577 regs[r0] = regs[tmp32 ? r3 : r4]; 578 break; 579 #endif 580 CASE_32_64(mov) 581 tci_args_rr(insn, &r0, &r1); 582 regs[r0] = regs[r1]; 583 break; 584 case INDEX_op_tci_movi: 585 tci_args_ri(insn, &r0, &t1); 586 regs[r0] = t1; 587 break; 588 case INDEX_op_tci_movl: 589 tci_args_rl(insn, tb_ptr, &r0, &ptr); 590 regs[r0] = *(tcg_target_ulong *)ptr; 591 break; 592 593 /* Load/store operations (32 bit). */ 594 595 CASE_32_64(ld8u) 596 tci_args_rrs(insn, &r0, &r1, &ofs); 597 ptr = (void *)(regs[r1] + ofs); 598 regs[r0] = *(uint8_t *)ptr; 599 break; 600 CASE_32_64(ld8s) 601 tci_args_rrs(insn, &r0, &r1, &ofs); 602 ptr = (void *)(regs[r1] + ofs); 603 regs[r0] = *(int8_t *)ptr; 604 break; 605 CASE_32_64(ld16u) 606 tci_args_rrs(insn, &r0, &r1, &ofs); 607 ptr = (void *)(regs[r1] + ofs); 608 regs[r0] = *(uint16_t *)ptr; 609 break; 610 CASE_32_64(ld16s) 611 tci_args_rrs(insn, &r0, &r1, &ofs); 612 ptr = (void *)(regs[r1] + ofs); 613 regs[r0] = *(int16_t *)ptr; 614 break; 615 case INDEX_op_ld_i32: 616 CASE_64(ld32u) 617 tci_args_rrs(insn, &r0, &r1, &ofs); 618 ptr = (void *)(regs[r1] + ofs); 619 regs[r0] = *(uint32_t *)ptr; 620 break; 621 CASE_32_64(st8) 622 tci_args_rrs(insn, &r0, &r1, &ofs); 623 ptr = (void *)(regs[r1] + ofs); 624 *(uint8_t *)ptr = regs[r0]; 625 break; 626 CASE_32_64(st16) 627 tci_args_rrs(insn, &r0, &r1, &ofs); 628 ptr = (void *)(regs[r1] + ofs); 629 *(uint16_t *)ptr = regs[r0]; 630 break; 631 case INDEX_op_st_i32: 632 CASE_64(st32) 633 tci_args_rrs(insn, &r0, &r1, &ofs); 634 ptr = (void *)(regs[r1] + ofs); 635 *(uint32_t *)ptr = regs[r0]; 636 break; 637 638 /* Arithmetic operations (mixed 32/64 bit). */ 639 640 CASE_32_64(add) 641 tci_args_rrr(insn, &r0, &r1, &r2); 642 regs[r0] = regs[r1] + regs[r2]; 643 break; 644 CASE_32_64(sub) 645 tci_args_rrr(insn, &r0, &r1, &r2); 646 regs[r0] = regs[r1] - regs[r2]; 647 break; 648 CASE_32_64(mul) 649 tci_args_rrr(insn, &r0, &r1, &r2); 650 regs[r0] = regs[r1] * regs[r2]; 651 break; 652 CASE_32_64(and) 653 tci_args_rrr(insn, &r0, &r1, &r2); 654 regs[r0] = regs[r1] & regs[r2]; 655 break; 656 CASE_32_64(or) 657 tci_args_rrr(insn, &r0, &r1, &r2); 658 regs[r0] = regs[r1] | regs[r2]; 659 break; 660 CASE_32_64(xor) 661 tci_args_rrr(insn, &r0, &r1, &r2); 662 regs[r0] = regs[r1] ^ regs[r2]; 663 break; 664 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 665 CASE_32_64(andc) 666 tci_args_rrr(insn, &r0, &r1, &r2); 667 regs[r0] = regs[r1] & ~regs[r2]; 668 break; 669 #endif 670 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 671 CASE_32_64(orc) 672 tci_args_rrr(insn, &r0, &r1, &r2); 673 regs[r0] = regs[r1] | ~regs[r2]; 674 break; 675 #endif 676 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 677 CASE_32_64(eqv) 678 tci_args_rrr(insn, &r0, &r1, &r2); 679 regs[r0] = ~(regs[r1] ^ regs[r2]); 680 break; 681 #endif 682 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 683 CASE_32_64(nand) 684 tci_args_rrr(insn, &r0, &r1, &r2); 685 regs[r0] = ~(regs[r1] & regs[r2]); 686 break; 687 #endif 688 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 689 CASE_32_64(nor) 690 tci_args_rrr(insn, &r0, &r1, &r2); 691 regs[r0] = ~(regs[r1] | regs[r2]); 692 break; 693 #endif 694 695 /* Arithmetic operations (32 bit). */ 696 697 case INDEX_op_div_i32: 698 tci_args_rrr(insn, &r0, &r1, &r2); 699 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 700 break; 701 case INDEX_op_divu_i32: 702 tci_args_rrr(insn, &r0, &r1, &r2); 703 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 704 break; 705 case INDEX_op_rem_i32: 706 tci_args_rrr(insn, &r0, &r1, &r2); 707 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 708 break; 709 case INDEX_op_remu_i32: 710 tci_args_rrr(insn, &r0, &r1, &r2); 711 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 712 break; 713 #if TCG_TARGET_HAS_clz_i32 714 case INDEX_op_clz_i32: 715 tci_args_rrr(insn, &r0, &r1, &r2); 716 tmp32 = regs[r1]; 717 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 718 break; 719 #endif 720 #if TCG_TARGET_HAS_ctz_i32 721 case INDEX_op_ctz_i32: 722 tci_args_rrr(insn, &r0, &r1, &r2); 723 tmp32 = regs[r1]; 724 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 725 break; 726 #endif 727 #if TCG_TARGET_HAS_ctpop_i32 728 case INDEX_op_ctpop_i32: 729 tci_args_rr(insn, &r0, &r1); 730 regs[r0] = ctpop32(regs[r1]); 731 break; 732 #endif 733 734 /* Shift/rotate operations (32 bit). */ 735 736 case INDEX_op_shl_i32: 737 tci_args_rrr(insn, &r0, &r1, &r2); 738 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); 739 break; 740 case INDEX_op_shr_i32: 741 tci_args_rrr(insn, &r0, &r1, &r2); 742 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); 743 break; 744 case INDEX_op_sar_i32: 745 tci_args_rrr(insn, &r0, &r1, &r2); 746 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); 747 break; 748 #if TCG_TARGET_HAS_rot_i32 749 case INDEX_op_rotl_i32: 750 tci_args_rrr(insn, &r0, &r1, &r2); 751 regs[r0] = rol32(regs[r1], regs[r2] & 31); 752 break; 753 case INDEX_op_rotr_i32: 754 tci_args_rrr(insn, &r0, &r1, &r2); 755 regs[r0] = ror32(regs[r1], regs[r2] & 31); 756 break; 757 #endif 758 #if TCG_TARGET_HAS_deposit_i32 759 case INDEX_op_deposit_i32: 760 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 761 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); 762 break; 763 #endif 764 #if TCG_TARGET_HAS_extract_i32 765 case INDEX_op_extract_i32: 766 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 767 regs[r0] = extract32(regs[r1], pos, len); 768 break; 769 #endif 770 #if TCG_TARGET_HAS_sextract_i32 771 case INDEX_op_sextract_i32: 772 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 773 regs[r0] = sextract32(regs[r1], pos, len); 774 break; 775 #endif 776 case INDEX_op_brcond_i32: 777 tci_args_rl(insn, tb_ptr, &r0, &ptr); 778 if ((uint32_t)regs[r0]) { 779 tb_ptr = ptr; 780 } 781 break; 782 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 783 case INDEX_op_add2_i32: 784 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 785 T1 = tci_uint64(regs[r3], regs[r2]); 786 T2 = tci_uint64(regs[r5], regs[r4]); 787 tci_write_reg64(regs, r1, r0, T1 + T2); 788 break; 789 #endif 790 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 791 case INDEX_op_sub2_i32: 792 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 793 T1 = tci_uint64(regs[r3], regs[r2]); 794 T2 = tci_uint64(regs[r5], regs[r4]); 795 tci_write_reg64(regs, r1, r0, T1 - T2); 796 break; 797 #endif 798 #if TCG_TARGET_HAS_mulu2_i32 799 case INDEX_op_mulu2_i32: 800 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 801 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 802 tci_write_reg64(regs, r1, r0, tmp64); 803 break; 804 #endif 805 #if TCG_TARGET_HAS_muls2_i32 806 case INDEX_op_muls2_i32: 807 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 808 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 809 tci_write_reg64(regs, r1, r0, tmp64); 810 break; 811 #endif 812 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 813 CASE_32_64(ext8s) 814 tci_args_rr(insn, &r0, &r1); 815 regs[r0] = (int8_t)regs[r1]; 816 break; 817 #endif 818 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \ 819 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 820 CASE_32_64(ext16s) 821 tci_args_rr(insn, &r0, &r1); 822 regs[r0] = (int16_t)regs[r1]; 823 break; 824 #endif 825 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 826 CASE_32_64(ext8u) 827 tci_args_rr(insn, &r0, &r1); 828 regs[r0] = (uint8_t)regs[r1]; 829 break; 830 #endif 831 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 832 CASE_32_64(ext16u) 833 tci_args_rr(insn, &r0, &r1); 834 regs[r0] = (uint16_t)regs[r1]; 835 break; 836 #endif 837 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 838 CASE_32_64(bswap16) 839 tci_args_rr(insn, &r0, &r1); 840 regs[r0] = bswap16(regs[r1]); 841 break; 842 #endif 843 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 844 CASE_32_64(bswap32) 845 tci_args_rr(insn, &r0, &r1); 846 regs[r0] = bswap32(regs[r1]); 847 break; 848 #endif 849 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 850 CASE_32_64(not) 851 tci_args_rr(insn, &r0, &r1); 852 regs[r0] = ~regs[r1]; 853 break; 854 #endif 855 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 856 CASE_32_64(neg) 857 tci_args_rr(insn, &r0, &r1); 858 regs[r0] = -regs[r1]; 859 break; 860 #endif 861 #if TCG_TARGET_REG_BITS == 64 862 /* Load/store operations (64 bit). */ 863 864 case INDEX_op_ld32s_i64: 865 tci_args_rrs(insn, &r0, &r1, &ofs); 866 ptr = (void *)(regs[r1] + ofs); 867 regs[r0] = *(int32_t *)ptr; 868 break; 869 case INDEX_op_ld_i64: 870 tci_args_rrs(insn, &r0, &r1, &ofs); 871 ptr = (void *)(regs[r1] + ofs); 872 regs[r0] = *(uint64_t *)ptr; 873 break; 874 case INDEX_op_st_i64: 875 tci_args_rrs(insn, &r0, &r1, &ofs); 876 ptr = (void *)(regs[r1] + ofs); 877 *(uint64_t *)ptr = regs[r0]; 878 break; 879 880 /* Arithmetic operations (64 bit). */ 881 882 case INDEX_op_div_i64: 883 tci_args_rrr(insn, &r0, &r1, &r2); 884 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 885 break; 886 case INDEX_op_divu_i64: 887 tci_args_rrr(insn, &r0, &r1, &r2); 888 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 889 break; 890 case INDEX_op_rem_i64: 891 tci_args_rrr(insn, &r0, &r1, &r2); 892 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 893 break; 894 case INDEX_op_remu_i64: 895 tci_args_rrr(insn, &r0, &r1, &r2); 896 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 897 break; 898 #if TCG_TARGET_HAS_clz_i64 899 case INDEX_op_clz_i64: 900 tci_args_rrr(insn, &r0, &r1, &r2); 901 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 902 break; 903 #endif 904 #if TCG_TARGET_HAS_ctz_i64 905 case INDEX_op_ctz_i64: 906 tci_args_rrr(insn, &r0, &r1, &r2); 907 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 908 break; 909 #endif 910 #if TCG_TARGET_HAS_ctpop_i64 911 case INDEX_op_ctpop_i64: 912 tci_args_rr(insn, &r0, &r1); 913 regs[r0] = ctpop64(regs[r1]); 914 break; 915 #endif 916 #if TCG_TARGET_HAS_mulu2_i64 917 case INDEX_op_mulu2_i64: 918 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 919 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 920 break; 921 #endif 922 #if TCG_TARGET_HAS_muls2_i64 923 case INDEX_op_muls2_i64: 924 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 925 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 926 break; 927 #endif 928 #if TCG_TARGET_HAS_add2_i64 929 case INDEX_op_add2_i64: 930 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 931 T1 = regs[r2] + regs[r4]; 932 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); 933 regs[r0] = T1; 934 regs[r1] = T2; 935 break; 936 #endif 937 #if TCG_TARGET_HAS_add2_i64 938 case INDEX_op_sub2_i64: 939 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 940 T1 = regs[r2] - regs[r4]; 941 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); 942 regs[r0] = T1; 943 regs[r1] = T2; 944 break; 945 #endif 946 947 /* Shift/rotate operations (64 bit). */ 948 949 case INDEX_op_shl_i64: 950 tci_args_rrr(insn, &r0, &r1, &r2); 951 regs[r0] = regs[r1] << (regs[r2] & 63); 952 break; 953 case INDEX_op_shr_i64: 954 tci_args_rrr(insn, &r0, &r1, &r2); 955 regs[r0] = regs[r1] >> (regs[r2] & 63); 956 break; 957 case INDEX_op_sar_i64: 958 tci_args_rrr(insn, &r0, &r1, &r2); 959 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); 960 break; 961 #if TCG_TARGET_HAS_rot_i64 962 case INDEX_op_rotl_i64: 963 tci_args_rrr(insn, &r0, &r1, &r2); 964 regs[r0] = rol64(regs[r1], regs[r2] & 63); 965 break; 966 case INDEX_op_rotr_i64: 967 tci_args_rrr(insn, &r0, &r1, &r2); 968 regs[r0] = ror64(regs[r1], regs[r2] & 63); 969 break; 970 #endif 971 #if TCG_TARGET_HAS_deposit_i64 972 case INDEX_op_deposit_i64: 973 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 974 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); 975 break; 976 #endif 977 #if TCG_TARGET_HAS_extract_i64 978 case INDEX_op_extract_i64: 979 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 980 regs[r0] = extract64(regs[r1], pos, len); 981 break; 982 #endif 983 #if TCG_TARGET_HAS_sextract_i64 984 case INDEX_op_sextract_i64: 985 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 986 regs[r0] = sextract64(regs[r1], pos, len); 987 break; 988 #endif 989 case INDEX_op_brcond_i64: 990 tci_args_rl(insn, tb_ptr, &r0, &ptr); 991 if (regs[r0]) { 992 tb_ptr = ptr; 993 } 994 break; 995 case INDEX_op_ext32s_i64: 996 case INDEX_op_ext_i32_i64: 997 tci_args_rr(insn, &r0, &r1); 998 regs[r0] = (int32_t)regs[r1]; 999 break; 1000 case INDEX_op_ext32u_i64: 1001 case INDEX_op_extu_i32_i64: 1002 tci_args_rr(insn, &r0, &r1); 1003 regs[r0] = (uint32_t)regs[r1]; 1004 break; 1005 #if TCG_TARGET_HAS_bswap64_i64 1006 case INDEX_op_bswap64_i64: 1007 tci_args_rr(insn, &r0, &r1); 1008 regs[r0] = bswap64(regs[r1]); 1009 break; 1010 #endif 1011 #endif /* TCG_TARGET_REG_BITS == 64 */ 1012 1013 /* QEMU specific operations. */ 1014 1015 case INDEX_op_exit_tb: 1016 tci_args_l(insn, tb_ptr, &ptr); 1017 return (uintptr_t)ptr; 1018 1019 case INDEX_op_goto_tb: 1020 tci_args_l(insn, tb_ptr, &ptr); 1021 tb_ptr = *(void **)ptr; 1022 break; 1023 1024 case INDEX_op_goto_ptr: 1025 tci_args_r(insn, &r0); 1026 ptr = (void *)regs[r0]; 1027 if (!ptr) { 1028 return 0; 1029 } 1030 tb_ptr = ptr; 1031 break; 1032 1033 case INDEX_op_qemu_ld_i32: 1034 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1035 tci_args_rrm(insn, &r0, &r1, &oi); 1036 taddr = regs[r1]; 1037 } else { 1038 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1039 taddr = tci_uint64(regs[r2], regs[r1]); 1040 } 1041 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1042 regs[r0] = tmp32; 1043 break; 1044 1045 case INDEX_op_qemu_ld_i64: 1046 if (TCG_TARGET_REG_BITS == 64) { 1047 tci_args_rrm(insn, &r0, &r1, &oi); 1048 taddr = regs[r1]; 1049 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1050 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1051 taddr = regs[r2]; 1052 } else { 1053 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1054 taddr = tci_uint64(regs[r3], regs[r2]); 1055 oi = regs[r4]; 1056 } 1057 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1058 if (TCG_TARGET_REG_BITS == 32) { 1059 tci_write_reg64(regs, r1, r0, tmp64); 1060 } else { 1061 regs[r0] = tmp64; 1062 } 1063 break; 1064 1065 case INDEX_op_qemu_st_i32: 1066 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1067 tci_args_rrm(insn, &r0, &r1, &oi); 1068 taddr = regs[r1]; 1069 } else { 1070 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1071 taddr = tci_uint64(regs[r2], regs[r1]); 1072 } 1073 tmp32 = regs[r0]; 1074 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); 1075 break; 1076 1077 case INDEX_op_qemu_st_i64: 1078 if (TCG_TARGET_REG_BITS == 64) { 1079 tci_args_rrm(insn, &r0, &r1, &oi); 1080 taddr = regs[r1]; 1081 tmp64 = regs[r0]; 1082 } else { 1083 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1084 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1085 taddr = regs[r2]; 1086 } else { 1087 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1088 taddr = tci_uint64(regs[r3], regs[r2]); 1089 oi = regs[r4]; 1090 } 1091 tmp64 = tci_uint64(regs[r1], regs[r0]); 1092 } 1093 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 1094 break; 1095 1096 case INDEX_op_mb: 1097 /* Ensure ordering for all kinds */ 1098 smp_mb(); 1099 break; 1100 default: 1101 g_assert_not_reached(); 1102 } 1103 } 1104 } 1105 1106 /* 1107 * Disassembler that matches the interpreter 1108 */ 1109 1110 static const char *str_r(TCGReg r) 1111 { 1112 static const char regs[TCG_TARGET_NB_REGS][4] = { 1113 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1114 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 1115 }; 1116 1117 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 1118 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 1119 1120 assert((unsigned)r < TCG_TARGET_NB_REGS); 1121 return regs[r]; 1122 } 1123 1124 static const char *str_c(TCGCond c) 1125 { 1126 static const char cond[16][8] = { 1127 [TCG_COND_NEVER] = "never", 1128 [TCG_COND_ALWAYS] = "always", 1129 [TCG_COND_EQ] = "eq", 1130 [TCG_COND_NE] = "ne", 1131 [TCG_COND_LT] = "lt", 1132 [TCG_COND_GE] = "ge", 1133 [TCG_COND_LE] = "le", 1134 [TCG_COND_GT] = "gt", 1135 [TCG_COND_LTU] = "ltu", 1136 [TCG_COND_GEU] = "geu", 1137 [TCG_COND_LEU] = "leu", 1138 [TCG_COND_GTU] = "gtu", 1139 }; 1140 1141 assert((unsigned)c < ARRAY_SIZE(cond)); 1142 assert(cond[c][0] != 0); 1143 return cond[c]; 1144 } 1145 1146 /* Disassemble TCI bytecode. */ 1147 int print_insn_tci(bfd_vma addr, disassemble_info *info) 1148 { 1149 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 1150 const TCGOpDef *def; 1151 const char *op_name; 1152 uint32_t insn; 1153 TCGOpcode op; 1154 TCGReg r0, r1, r2, r3, r4, r5; 1155 tcg_target_ulong i1; 1156 int32_t s2; 1157 TCGCond c; 1158 MemOpIdx oi; 1159 uint8_t pos, len; 1160 void *ptr; 1161 1162 /* TCI is always the host, so we don't need to load indirect. */ 1163 insn = *tb_ptr++; 1164 1165 info->fprintf_func(info->stream, "%08x ", insn); 1166 1167 op = extract32(insn, 0, 8); 1168 def = &tcg_op_defs[op]; 1169 op_name = def->name; 1170 1171 switch (op) { 1172 case INDEX_op_br: 1173 case INDEX_op_exit_tb: 1174 case INDEX_op_goto_tb: 1175 tci_args_l(insn, tb_ptr, &ptr); 1176 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 1177 break; 1178 1179 case INDEX_op_goto_ptr: 1180 tci_args_r(insn, &r0); 1181 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 1182 break; 1183 1184 case INDEX_op_call: 1185 tci_args_nl(insn, tb_ptr, &len, &ptr); 1186 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 1187 break; 1188 1189 case INDEX_op_brcond_i32: 1190 case INDEX_op_brcond_i64: 1191 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1192 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 1193 op_name, str_r(r0), ptr); 1194 break; 1195 1196 case INDEX_op_setcond_i32: 1197 case INDEX_op_setcond_i64: 1198 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 1199 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1200 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 1201 break; 1202 1203 case INDEX_op_tci_movi: 1204 tci_args_ri(insn, &r0, &i1); 1205 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 1206 op_name, str_r(r0), i1); 1207 break; 1208 1209 case INDEX_op_tci_movl: 1210 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1211 info->fprintf_func(info->stream, "%-12s %s, %p", 1212 op_name, str_r(r0), ptr); 1213 break; 1214 1215 case INDEX_op_ld8u_i32: 1216 case INDEX_op_ld8u_i64: 1217 case INDEX_op_ld8s_i32: 1218 case INDEX_op_ld8s_i64: 1219 case INDEX_op_ld16u_i32: 1220 case INDEX_op_ld16u_i64: 1221 case INDEX_op_ld16s_i32: 1222 case INDEX_op_ld16s_i64: 1223 case INDEX_op_ld32u_i64: 1224 case INDEX_op_ld32s_i64: 1225 case INDEX_op_ld_i32: 1226 case INDEX_op_ld_i64: 1227 case INDEX_op_st8_i32: 1228 case INDEX_op_st8_i64: 1229 case INDEX_op_st16_i32: 1230 case INDEX_op_st16_i64: 1231 case INDEX_op_st32_i64: 1232 case INDEX_op_st_i32: 1233 case INDEX_op_st_i64: 1234 tci_args_rrs(insn, &r0, &r1, &s2); 1235 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 1236 op_name, str_r(r0), str_r(r1), s2); 1237 break; 1238 1239 case INDEX_op_mov_i32: 1240 case INDEX_op_mov_i64: 1241 case INDEX_op_ext8s_i32: 1242 case INDEX_op_ext8s_i64: 1243 case INDEX_op_ext8u_i32: 1244 case INDEX_op_ext8u_i64: 1245 case INDEX_op_ext16s_i32: 1246 case INDEX_op_ext16s_i64: 1247 case INDEX_op_ext16u_i32: 1248 case INDEX_op_ext32s_i64: 1249 case INDEX_op_ext32u_i64: 1250 case INDEX_op_ext_i32_i64: 1251 case INDEX_op_extu_i32_i64: 1252 case INDEX_op_bswap16_i32: 1253 case INDEX_op_bswap16_i64: 1254 case INDEX_op_bswap32_i32: 1255 case INDEX_op_bswap32_i64: 1256 case INDEX_op_bswap64_i64: 1257 case INDEX_op_not_i32: 1258 case INDEX_op_not_i64: 1259 case INDEX_op_neg_i32: 1260 case INDEX_op_neg_i64: 1261 case INDEX_op_ctpop_i32: 1262 case INDEX_op_ctpop_i64: 1263 tci_args_rr(insn, &r0, &r1); 1264 info->fprintf_func(info->stream, "%-12s %s, %s", 1265 op_name, str_r(r0), str_r(r1)); 1266 break; 1267 1268 case INDEX_op_add_i32: 1269 case INDEX_op_add_i64: 1270 case INDEX_op_sub_i32: 1271 case INDEX_op_sub_i64: 1272 case INDEX_op_mul_i32: 1273 case INDEX_op_mul_i64: 1274 case INDEX_op_and_i32: 1275 case INDEX_op_and_i64: 1276 case INDEX_op_or_i32: 1277 case INDEX_op_or_i64: 1278 case INDEX_op_xor_i32: 1279 case INDEX_op_xor_i64: 1280 case INDEX_op_andc_i32: 1281 case INDEX_op_andc_i64: 1282 case INDEX_op_orc_i32: 1283 case INDEX_op_orc_i64: 1284 case INDEX_op_eqv_i32: 1285 case INDEX_op_eqv_i64: 1286 case INDEX_op_nand_i32: 1287 case INDEX_op_nand_i64: 1288 case INDEX_op_nor_i32: 1289 case INDEX_op_nor_i64: 1290 case INDEX_op_div_i32: 1291 case INDEX_op_div_i64: 1292 case INDEX_op_rem_i32: 1293 case INDEX_op_rem_i64: 1294 case INDEX_op_divu_i32: 1295 case INDEX_op_divu_i64: 1296 case INDEX_op_remu_i32: 1297 case INDEX_op_remu_i64: 1298 case INDEX_op_shl_i32: 1299 case INDEX_op_shl_i64: 1300 case INDEX_op_shr_i32: 1301 case INDEX_op_shr_i64: 1302 case INDEX_op_sar_i32: 1303 case INDEX_op_sar_i64: 1304 case INDEX_op_rotl_i32: 1305 case INDEX_op_rotl_i64: 1306 case INDEX_op_rotr_i32: 1307 case INDEX_op_rotr_i64: 1308 case INDEX_op_clz_i32: 1309 case INDEX_op_clz_i64: 1310 case INDEX_op_ctz_i32: 1311 case INDEX_op_ctz_i64: 1312 tci_args_rrr(insn, &r0, &r1, &r2); 1313 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1314 op_name, str_r(r0), str_r(r1), str_r(r2)); 1315 break; 1316 1317 case INDEX_op_deposit_i32: 1318 case INDEX_op_deposit_i64: 1319 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1320 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1321 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1322 break; 1323 1324 case INDEX_op_extract_i32: 1325 case INDEX_op_extract_i64: 1326 case INDEX_op_sextract_i32: 1327 case INDEX_op_sextract_i64: 1328 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1329 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1330 op_name, str_r(r0), str_r(r1), pos, len); 1331 break; 1332 1333 case INDEX_op_movcond_i32: 1334 case INDEX_op_movcond_i64: 1335 case INDEX_op_setcond2_i32: 1336 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1337 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1338 op_name, str_r(r0), str_r(r1), str_r(r2), 1339 str_r(r3), str_r(r4), str_c(c)); 1340 break; 1341 1342 case INDEX_op_mulu2_i32: 1343 case INDEX_op_mulu2_i64: 1344 case INDEX_op_muls2_i32: 1345 case INDEX_op_muls2_i64: 1346 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1347 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1348 op_name, str_r(r0), str_r(r1), 1349 str_r(r2), str_r(r3)); 1350 break; 1351 1352 case INDEX_op_add2_i32: 1353 case INDEX_op_add2_i64: 1354 case INDEX_op_sub2_i32: 1355 case INDEX_op_sub2_i64: 1356 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 1357 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1358 op_name, str_r(r0), str_r(r1), str_r(r2), 1359 str_r(r3), str_r(r4), str_r(r5)); 1360 break; 1361 1362 case INDEX_op_qemu_ld_i64: 1363 case INDEX_op_qemu_st_i64: 1364 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1365 goto do_qemu_ldst; 1366 case INDEX_op_qemu_ld_i32: 1367 case INDEX_op_qemu_st_i32: 1368 len = 1; 1369 do_qemu_ldst: 1370 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); 1371 switch (len) { 1372 case 2: 1373 tci_args_rrm(insn, &r0, &r1, &oi); 1374 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1375 op_name, str_r(r0), str_r(r1), oi); 1376 break; 1377 case 3: 1378 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1379 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x", 1380 op_name, str_r(r0), str_r(r1), str_r(r2), oi); 1381 break; 1382 case 4: 1383 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1384 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", 1385 op_name, str_r(r0), str_r(r1), 1386 str_r(r2), str_r(r3), str_r(r4)); 1387 break; 1388 default: 1389 g_assert_not_reached(); 1390 } 1391 break; 1392 1393 case 0: 1394 /* tcg_out_nop_fill uses zeros */ 1395 if (insn == 0) { 1396 info->fprintf_func(info->stream, "align"); 1397 break; 1398 } 1399 /* fall through */ 1400 1401 default: 1402 info->fprintf_func(info->stream, "illegal opcode %d", op); 1403 break; 1404 } 1405 1406 return sizeof(insn); 1407 } 1408