1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/cpu_ldst.h" 22 #include "tcg/tcg-op.h" 23 #include "tcg/tcg-ldst.h" 24 #include <ffi.h> 25 26 27 /* 28 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 29 * Without assertions, the interpreter runs much faster. 30 */ 31 #if defined(CONFIG_DEBUG_TCG) 32 # define tci_assert(cond) assert(cond) 33 #else 34 # define tci_assert(cond) ((void)(cond)) 35 #endif 36 37 __thread uintptr_t tci_tb_ptr; 38 39 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 40 uint32_t low_index, uint64_t value) 41 { 42 regs[low_index] = (uint32_t)value; 43 regs[high_index] = value >> 32; 44 } 45 46 /* Create a 64 bit value from two 32 bit values. */ 47 static uint64_t tci_uint64(uint32_t high, uint32_t low) 48 { 49 return ((uint64_t)high << 32) + low; 50 } 51 52 /* 53 * Load sets of arguments all at once. The naming convention is: 54 * tci_args_<arguments> 55 * where arguments is a sequence of 56 * 57 * b = immediate (bit position) 58 * c = condition (TCGCond) 59 * i = immediate (uint32_t) 60 * I = immediate (tcg_target_ulong) 61 * l = label or pointer 62 * m = immediate (MemOpIdx) 63 * n = immediate (call return length) 64 * r = register 65 * s = signed ldst offset 66 */ 67 68 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 69 { 70 int diff = sextract32(insn, 12, 20); 71 *l0 = diff ? (void *)tb_ptr + diff : NULL; 72 } 73 74 static void tci_args_r(uint32_t insn, TCGReg *r0) 75 { 76 *r0 = extract32(insn, 8, 4); 77 } 78 79 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 80 uint8_t *n0, void **l1) 81 { 82 *n0 = extract32(insn, 8, 4); 83 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 84 } 85 86 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 87 TCGReg *r0, void **l1) 88 { 89 *r0 = extract32(insn, 8, 4); 90 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 91 } 92 93 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 94 { 95 *r0 = extract32(insn, 8, 4); 96 *r1 = extract32(insn, 12, 4); 97 } 98 99 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 100 { 101 *r0 = extract32(insn, 8, 4); 102 *i1 = sextract32(insn, 12, 20); 103 } 104 105 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 106 TCGReg *r1, MemOpIdx *m2) 107 { 108 *r0 = extract32(insn, 8, 4); 109 *r1 = extract32(insn, 12, 4); 110 *m2 = extract32(insn, 20, 12); 111 } 112 113 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 114 { 115 *r0 = extract32(insn, 8, 4); 116 *r1 = extract32(insn, 12, 4); 117 *r2 = extract32(insn, 16, 4); 118 } 119 120 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 121 { 122 *r0 = extract32(insn, 8, 4); 123 *r1 = extract32(insn, 12, 4); 124 *i2 = sextract32(insn, 16, 16); 125 } 126 127 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 128 uint8_t *i2, uint8_t *i3) 129 { 130 *r0 = extract32(insn, 8, 4); 131 *r1 = extract32(insn, 12, 4); 132 *i2 = extract32(insn, 16, 6); 133 *i3 = extract32(insn, 22, 6); 134 } 135 136 static void tci_args_rrrc(uint32_t insn, 137 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 138 { 139 *r0 = extract32(insn, 8, 4); 140 *r1 = extract32(insn, 12, 4); 141 *r2 = extract32(insn, 16, 4); 142 *c3 = extract32(insn, 20, 4); 143 } 144 145 static void tci_args_rrrm(uint32_t insn, 146 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3) 147 { 148 *r0 = extract32(insn, 8, 4); 149 *r1 = extract32(insn, 12, 4); 150 *r2 = extract32(insn, 16, 4); 151 *m3 = extract32(insn, 20, 12); 152 } 153 154 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 155 TCGReg *r2, uint8_t *i3, uint8_t *i4) 156 { 157 *r0 = extract32(insn, 8, 4); 158 *r1 = extract32(insn, 12, 4); 159 *r2 = extract32(insn, 16, 4); 160 *i3 = extract32(insn, 20, 6); 161 *i4 = extract32(insn, 26, 6); 162 } 163 164 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 165 TCGReg *r2, TCGReg *r3, TCGReg *r4) 166 { 167 *r0 = extract32(insn, 8, 4); 168 *r1 = extract32(insn, 12, 4); 169 *r2 = extract32(insn, 16, 4); 170 *r3 = extract32(insn, 20, 4); 171 *r4 = extract32(insn, 24, 4); 172 } 173 174 static void tci_args_rrrr(uint32_t insn, 175 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 176 { 177 *r0 = extract32(insn, 8, 4); 178 *r1 = extract32(insn, 12, 4); 179 *r2 = extract32(insn, 16, 4); 180 *r3 = extract32(insn, 20, 4); 181 } 182 183 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 184 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 185 { 186 *r0 = extract32(insn, 8, 4); 187 *r1 = extract32(insn, 12, 4); 188 *r2 = extract32(insn, 16, 4); 189 *r3 = extract32(insn, 20, 4); 190 *r4 = extract32(insn, 24, 4); 191 *c5 = extract32(insn, 28, 4); 192 } 193 194 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 195 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) 196 { 197 *r0 = extract32(insn, 8, 4); 198 *r1 = extract32(insn, 12, 4); 199 *r2 = extract32(insn, 16, 4); 200 *r3 = extract32(insn, 20, 4); 201 *r4 = extract32(insn, 24, 4); 202 *r5 = extract32(insn, 28, 4); 203 } 204 205 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 206 { 207 bool result = false; 208 int32_t i0 = u0; 209 int32_t i1 = u1; 210 switch (condition) { 211 case TCG_COND_EQ: 212 result = (u0 == u1); 213 break; 214 case TCG_COND_NE: 215 result = (u0 != u1); 216 break; 217 case TCG_COND_LT: 218 result = (i0 < i1); 219 break; 220 case TCG_COND_GE: 221 result = (i0 >= i1); 222 break; 223 case TCG_COND_LE: 224 result = (i0 <= i1); 225 break; 226 case TCG_COND_GT: 227 result = (i0 > i1); 228 break; 229 case TCG_COND_LTU: 230 result = (u0 < u1); 231 break; 232 case TCG_COND_GEU: 233 result = (u0 >= u1); 234 break; 235 case TCG_COND_LEU: 236 result = (u0 <= u1); 237 break; 238 case TCG_COND_GTU: 239 result = (u0 > u1); 240 break; 241 default: 242 g_assert_not_reached(); 243 } 244 return result; 245 } 246 247 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 248 { 249 bool result = false; 250 int64_t i0 = u0; 251 int64_t i1 = u1; 252 switch (condition) { 253 case TCG_COND_EQ: 254 result = (u0 == u1); 255 break; 256 case TCG_COND_NE: 257 result = (u0 != u1); 258 break; 259 case TCG_COND_LT: 260 result = (i0 < i1); 261 break; 262 case TCG_COND_GE: 263 result = (i0 >= i1); 264 break; 265 case TCG_COND_LE: 266 result = (i0 <= i1); 267 break; 268 case TCG_COND_GT: 269 result = (i0 > i1); 270 break; 271 case TCG_COND_LTU: 272 result = (u0 < u1); 273 break; 274 case TCG_COND_GEU: 275 result = (u0 >= u1); 276 break; 277 case TCG_COND_LEU: 278 result = (u0 <= u1); 279 break; 280 case TCG_COND_GTU: 281 result = (u0 > u1); 282 break; 283 default: 284 g_assert_not_reached(); 285 } 286 return result; 287 } 288 289 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, 290 MemOpIdx oi, const void *tb_ptr) 291 { 292 MemOp mop = get_memop(oi); 293 uintptr_t ra = (uintptr_t)tb_ptr; 294 295 #ifdef CONFIG_SOFTMMU 296 switch (mop & MO_SSIZE) { 297 case MO_UB: 298 return helper_ldub_mmu(env, taddr, oi, ra); 299 case MO_SB: 300 return helper_ldsb_mmu(env, taddr, oi, ra); 301 case MO_UW: 302 return helper_lduw_mmu(env, taddr, oi, ra); 303 case MO_SW: 304 return helper_ldsw_mmu(env, taddr, oi, ra); 305 case MO_UL: 306 return helper_ldul_mmu(env, taddr, oi, ra); 307 case MO_SL: 308 return helper_ldsl_mmu(env, taddr, oi, ra); 309 case MO_UQ: 310 return helper_ldq_mmu(env, taddr, oi, ra); 311 default: 312 g_assert_not_reached(); 313 } 314 #else 315 void *haddr = g2h(env_cpu(env), taddr); 316 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; 317 uint64_t ret; 318 319 set_helper_retaddr(ra); 320 if (taddr & a_mask) { 321 helper_unaligned_ld(env, taddr); 322 } 323 switch (mop & (MO_BSWAP | MO_SSIZE)) { 324 case MO_UB: 325 ret = ldub_p(haddr); 326 break; 327 case MO_SB: 328 ret = ldsb_p(haddr); 329 break; 330 case MO_LEUW: 331 ret = lduw_le_p(haddr); 332 break; 333 case MO_LESW: 334 ret = ldsw_le_p(haddr); 335 break; 336 case MO_LEUL: 337 ret = (uint32_t)ldl_le_p(haddr); 338 break; 339 case MO_LESL: 340 ret = (int32_t)ldl_le_p(haddr); 341 break; 342 case MO_LEUQ: 343 ret = ldq_le_p(haddr); 344 break; 345 case MO_BEUW: 346 ret = lduw_be_p(haddr); 347 break; 348 case MO_BESW: 349 ret = ldsw_be_p(haddr); 350 break; 351 case MO_BEUL: 352 ret = (uint32_t)ldl_be_p(haddr); 353 break; 354 case MO_BESL: 355 ret = (int32_t)ldl_be_p(haddr); 356 break; 357 case MO_BEUQ: 358 ret = ldq_be_p(haddr); 359 break; 360 default: 361 g_assert_not_reached(); 362 } 363 clear_helper_retaddr(); 364 return ret; 365 #endif 366 } 367 368 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, 369 MemOpIdx oi, const void *tb_ptr) 370 { 371 MemOp mop = get_memop(oi); 372 uintptr_t ra = (uintptr_t)tb_ptr; 373 374 #ifdef CONFIG_SOFTMMU 375 switch (mop & MO_SIZE) { 376 case MO_UB: 377 helper_stb_mmu(env, taddr, val, oi, ra); 378 break; 379 case MO_UW: 380 helper_stw_mmu(env, taddr, val, oi, ra); 381 break; 382 case MO_UL: 383 helper_stl_mmu(env, taddr, val, oi, ra); 384 break; 385 case MO_UQ: 386 helper_stq_mmu(env, taddr, val, oi, ra); 387 break; 388 default: 389 g_assert_not_reached(); 390 } 391 #else 392 void *haddr = g2h(env_cpu(env), taddr); 393 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1; 394 395 set_helper_retaddr(ra); 396 if (taddr & a_mask) { 397 helper_unaligned_st(env, taddr); 398 } 399 switch (mop & (MO_BSWAP | MO_SIZE)) { 400 case MO_UB: 401 stb_p(haddr, val); 402 break; 403 case MO_LEUW: 404 stw_le_p(haddr, val); 405 break; 406 case MO_LEUL: 407 stl_le_p(haddr, val); 408 break; 409 case MO_LEUQ: 410 stq_le_p(haddr, val); 411 break; 412 case MO_BEUW: 413 stw_be_p(haddr, val); 414 break; 415 case MO_BEUL: 416 stl_be_p(haddr, val); 417 break; 418 case MO_BEUQ: 419 stq_be_p(haddr, val); 420 break; 421 default: 422 g_assert_not_reached(); 423 } 424 clear_helper_retaddr(); 425 #endif 426 } 427 428 #if TCG_TARGET_REG_BITS == 64 429 # define CASE_32_64(x) \ 430 case glue(glue(INDEX_op_, x), _i64): \ 431 case glue(glue(INDEX_op_, x), _i32): 432 # define CASE_64(x) \ 433 case glue(glue(INDEX_op_, x), _i64): 434 #else 435 # define CASE_32_64(x) \ 436 case glue(glue(INDEX_op_, x), _i32): 437 # define CASE_64(x) 438 #endif 439 440 /* Interpret pseudo code in tb. */ 441 /* 442 * Disable CFI checks. 443 * One possible operation in the pseudo code is a call to binary code. 444 * Therefore, disable CFI checks in the interpreter function 445 */ 446 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 447 const void *v_tb_ptr) 448 { 449 const uint32_t *tb_ptr = v_tb_ptr; 450 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 451 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 452 / sizeof(uint64_t)]; 453 454 regs[TCG_AREG0] = (tcg_target_ulong)env; 455 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 456 tci_assert(tb_ptr); 457 458 for (;;) { 459 uint32_t insn; 460 TCGOpcode opc; 461 TCGReg r0, r1, r2, r3, r4, r5; 462 tcg_target_ulong t1; 463 TCGCond condition; 464 target_ulong taddr; 465 uint8_t pos, len; 466 uint32_t tmp32; 467 uint64_t tmp64; 468 uint64_t T1, T2; 469 MemOpIdx oi; 470 int32_t ofs; 471 void *ptr; 472 473 insn = *tb_ptr++; 474 opc = extract32(insn, 0, 8); 475 476 switch (opc) { 477 case INDEX_op_call: 478 { 479 void *call_slots[MAX_CALL_IARGS]; 480 ffi_cif *cif; 481 void *func; 482 unsigned i, s, n; 483 484 tci_args_nl(insn, tb_ptr, &len, &ptr); 485 func = ((void **)ptr)[0]; 486 cif = ((void **)ptr)[1]; 487 488 n = cif->nargs; 489 for (i = s = 0; i < n; ++i) { 490 ffi_type *t = cif->arg_types[i]; 491 call_slots[i] = &stack[s]; 492 s += DIV_ROUND_UP(t->size, 8); 493 } 494 495 /* Helper functions may need to access the "return address" */ 496 tci_tb_ptr = (uintptr_t)tb_ptr; 497 ffi_call(cif, func, stack, call_slots); 498 } 499 500 switch (len) { 501 case 0: /* void */ 502 break; 503 case 1: /* uint32_t */ 504 /* 505 * The result winds up "left-aligned" in the stack[0] slot. 506 * Note that libffi has an odd special case in that it will 507 * always widen an integral result to ffi_arg. 508 */ 509 if (sizeof(ffi_arg) == 8) { 510 regs[TCG_REG_R0] = (uint32_t)stack[0]; 511 } else { 512 regs[TCG_REG_R0] = *(uint32_t *)stack; 513 } 514 break; 515 case 2: /* uint64_t */ 516 /* 517 * For TCG_TARGET_REG_BITS == 32, the register pair 518 * must stay in host memory order. 519 */ 520 memcpy(®s[TCG_REG_R0], stack, 8); 521 break; 522 case 3: /* Int128 */ 523 memcpy(®s[TCG_REG_R0], stack, 16); 524 break; 525 default: 526 g_assert_not_reached(); 527 } 528 break; 529 530 case INDEX_op_br: 531 tci_args_l(insn, tb_ptr, &ptr); 532 tb_ptr = ptr; 533 continue; 534 case INDEX_op_setcond_i32: 535 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 536 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 537 break; 538 case INDEX_op_movcond_i32: 539 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 540 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 541 regs[r0] = regs[tmp32 ? r3 : r4]; 542 break; 543 #if TCG_TARGET_REG_BITS == 32 544 case INDEX_op_setcond2_i32: 545 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 546 T1 = tci_uint64(regs[r2], regs[r1]); 547 T2 = tci_uint64(regs[r4], regs[r3]); 548 regs[r0] = tci_compare64(T1, T2, condition); 549 break; 550 #elif TCG_TARGET_REG_BITS == 64 551 case INDEX_op_setcond_i64: 552 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 553 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 554 break; 555 case INDEX_op_movcond_i64: 556 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 557 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 558 regs[r0] = regs[tmp32 ? r3 : r4]; 559 break; 560 #endif 561 CASE_32_64(mov) 562 tci_args_rr(insn, &r0, &r1); 563 regs[r0] = regs[r1]; 564 break; 565 case INDEX_op_tci_movi: 566 tci_args_ri(insn, &r0, &t1); 567 regs[r0] = t1; 568 break; 569 case INDEX_op_tci_movl: 570 tci_args_rl(insn, tb_ptr, &r0, &ptr); 571 regs[r0] = *(tcg_target_ulong *)ptr; 572 break; 573 574 /* Load/store operations (32 bit). */ 575 576 CASE_32_64(ld8u) 577 tci_args_rrs(insn, &r0, &r1, &ofs); 578 ptr = (void *)(regs[r1] + ofs); 579 regs[r0] = *(uint8_t *)ptr; 580 break; 581 CASE_32_64(ld8s) 582 tci_args_rrs(insn, &r0, &r1, &ofs); 583 ptr = (void *)(regs[r1] + ofs); 584 regs[r0] = *(int8_t *)ptr; 585 break; 586 CASE_32_64(ld16u) 587 tci_args_rrs(insn, &r0, &r1, &ofs); 588 ptr = (void *)(regs[r1] + ofs); 589 regs[r0] = *(uint16_t *)ptr; 590 break; 591 CASE_32_64(ld16s) 592 tci_args_rrs(insn, &r0, &r1, &ofs); 593 ptr = (void *)(regs[r1] + ofs); 594 regs[r0] = *(int16_t *)ptr; 595 break; 596 case INDEX_op_ld_i32: 597 CASE_64(ld32u) 598 tci_args_rrs(insn, &r0, &r1, &ofs); 599 ptr = (void *)(regs[r1] + ofs); 600 regs[r0] = *(uint32_t *)ptr; 601 break; 602 CASE_32_64(st8) 603 tci_args_rrs(insn, &r0, &r1, &ofs); 604 ptr = (void *)(regs[r1] + ofs); 605 *(uint8_t *)ptr = regs[r0]; 606 break; 607 CASE_32_64(st16) 608 tci_args_rrs(insn, &r0, &r1, &ofs); 609 ptr = (void *)(regs[r1] + ofs); 610 *(uint16_t *)ptr = regs[r0]; 611 break; 612 case INDEX_op_st_i32: 613 CASE_64(st32) 614 tci_args_rrs(insn, &r0, &r1, &ofs); 615 ptr = (void *)(regs[r1] + ofs); 616 *(uint32_t *)ptr = regs[r0]; 617 break; 618 619 /* Arithmetic operations (mixed 32/64 bit). */ 620 621 CASE_32_64(add) 622 tci_args_rrr(insn, &r0, &r1, &r2); 623 regs[r0] = regs[r1] + regs[r2]; 624 break; 625 CASE_32_64(sub) 626 tci_args_rrr(insn, &r0, &r1, &r2); 627 regs[r0] = regs[r1] - regs[r2]; 628 break; 629 CASE_32_64(mul) 630 tci_args_rrr(insn, &r0, &r1, &r2); 631 regs[r0] = regs[r1] * regs[r2]; 632 break; 633 CASE_32_64(and) 634 tci_args_rrr(insn, &r0, &r1, &r2); 635 regs[r0] = regs[r1] & regs[r2]; 636 break; 637 CASE_32_64(or) 638 tci_args_rrr(insn, &r0, &r1, &r2); 639 regs[r0] = regs[r1] | regs[r2]; 640 break; 641 CASE_32_64(xor) 642 tci_args_rrr(insn, &r0, &r1, &r2); 643 regs[r0] = regs[r1] ^ regs[r2]; 644 break; 645 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 646 CASE_32_64(andc) 647 tci_args_rrr(insn, &r0, &r1, &r2); 648 regs[r0] = regs[r1] & ~regs[r2]; 649 break; 650 #endif 651 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 652 CASE_32_64(orc) 653 tci_args_rrr(insn, &r0, &r1, &r2); 654 regs[r0] = regs[r1] | ~regs[r2]; 655 break; 656 #endif 657 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 658 CASE_32_64(eqv) 659 tci_args_rrr(insn, &r0, &r1, &r2); 660 regs[r0] = ~(regs[r1] ^ regs[r2]); 661 break; 662 #endif 663 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 664 CASE_32_64(nand) 665 tci_args_rrr(insn, &r0, &r1, &r2); 666 regs[r0] = ~(regs[r1] & regs[r2]); 667 break; 668 #endif 669 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 670 CASE_32_64(nor) 671 tci_args_rrr(insn, &r0, &r1, &r2); 672 regs[r0] = ~(regs[r1] | regs[r2]); 673 break; 674 #endif 675 676 /* Arithmetic operations (32 bit). */ 677 678 case INDEX_op_div_i32: 679 tci_args_rrr(insn, &r0, &r1, &r2); 680 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 681 break; 682 case INDEX_op_divu_i32: 683 tci_args_rrr(insn, &r0, &r1, &r2); 684 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 685 break; 686 case INDEX_op_rem_i32: 687 tci_args_rrr(insn, &r0, &r1, &r2); 688 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 689 break; 690 case INDEX_op_remu_i32: 691 tci_args_rrr(insn, &r0, &r1, &r2); 692 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 693 break; 694 #if TCG_TARGET_HAS_clz_i32 695 case INDEX_op_clz_i32: 696 tci_args_rrr(insn, &r0, &r1, &r2); 697 tmp32 = regs[r1]; 698 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 699 break; 700 #endif 701 #if TCG_TARGET_HAS_ctz_i32 702 case INDEX_op_ctz_i32: 703 tci_args_rrr(insn, &r0, &r1, &r2); 704 tmp32 = regs[r1]; 705 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 706 break; 707 #endif 708 #if TCG_TARGET_HAS_ctpop_i32 709 case INDEX_op_ctpop_i32: 710 tci_args_rr(insn, &r0, &r1); 711 regs[r0] = ctpop32(regs[r1]); 712 break; 713 #endif 714 715 /* Shift/rotate operations (32 bit). */ 716 717 case INDEX_op_shl_i32: 718 tci_args_rrr(insn, &r0, &r1, &r2); 719 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); 720 break; 721 case INDEX_op_shr_i32: 722 tci_args_rrr(insn, &r0, &r1, &r2); 723 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); 724 break; 725 case INDEX_op_sar_i32: 726 tci_args_rrr(insn, &r0, &r1, &r2); 727 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); 728 break; 729 #if TCG_TARGET_HAS_rot_i32 730 case INDEX_op_rotl_i32: 731 tci_args_rrr(insn, &r0, &r1, &r2); 732 regs[r0] = rol32(regs[r1], regs[r2] & 31); 733 break; 734 case INDEX_op_rotr_i32: 735 tci_args_rrr(insn, &r0, &r1, &r2); 736 regs[r0] = ror32(regs[r1], regs[r2] & 31); 737 break; 738 #endif 739 #if TCG_TARGET_HAS_deposit_i32 740 case INDEX_op_deposit_i32: 741 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 742 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); 743 break; 744 #endif 745 #if TCG_TARGET_HAS_extract_i32 746 case INDEX_op_extract_i32: 747 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 748 regs[r0] = extract32(regs[r1], pos, len); 749 break; 750 #endif 751 #if TCG_TARGET_HAS_sextract_i32 752 case INDEX_op_sextract_i32: 753 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 754 regs[r0] = sextract32(regs[r1], pos, len); 755 break; 756 #endif 757 case INDEX_op_brcond_i32: 758 tci_args_rl(insn, tb_ptr, &r0, &ptr); 759 if ((uint32_t)regs[r0]) { 760 tb_ptr = ptr; 761 } 762 break; 763 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 764 case INDEX_op_add2_i32: 765 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 766 T1 = tci_uint64(regs[r3], regs[r2]); 767 T2 = tci_uint64(regs[r5], regs[r4]); 768 tci_write_reg64(regs, r1, r0, T1 + T2); 769 break; 770 #endif 771 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 772 case INDEX_op_sub2_i32: 773 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 774 T1 = tci_uint64(regs[r3], regs[r2]); 775 T2 = tci_uint64(regs[r5], regs[r4]); 776 tci_write_reg64(regs, r1, r0, T1 - T2); 777 break; 778 #endif 779 #if TCG_TARGET_HAS_mulu2_i32 780 case INDEX_op_mulu2_i32: 781 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 782 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 783 tci_write_reg64(regs, r1, r0, tmp64); 784 break; 785 #endif 786 #if TCG_TARGET_HAS_muls2_i32 787 case INDEX_op_muls2_i32: 788 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 789 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 790 tci_write_reg64(regs, r1, r0, tmp64); 791 break; 792 #endif 793 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 794 CASE_32_64(ext8s) 795 tci_args_rr(insn, &r0, &r1); 796 regs[r0] = (int8_t)regs[r1]; 797 break; 798 #endif 799 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \ 800 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 801 CASE_32_64(ext16s) 802 tci_args_rr(insn, &r0, &r1); 803 regs[r0] = (int16_t)regs[r1]; 804 break; 805 #endif 806 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 807 CASE_32_64(ext8u) 808 tci_args_rr(insn, &r0, &r1); 809 regs[r0] = (uint8_t)regs[r1]; 810 break; 811 #endif 812 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 813 CASE_32_64(ext16u) 814 tci_args_rr(insn, &r0, &r1); 815 regs[r0] = (uint16_t)regs[r1]; 816 break; 817 #endif 818 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 819 CASE_32_64(bswap16) 820 tci_args_rr(insn, &r0, &r1); 821 regs[r0] = bswap16(regs[r1]); 822 break; 823 #endif 824 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 825 CASE_32_64(bswap32) 826 tci_args_rr(insn, &r0, &r1); 827 regs[r0] = bswap32(regs[r1]); 828 break; 829 #endif 830 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 831 CASE_32_64(not) 832 tci_args_rr(insn, &r0, &r1); 833 regs[r0] = ~regs[r1]; 834 break; 835 #endif 836 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 837 CASE_32_64(neg) 838 tci_args_rr(insn, &r0, &r1); 839 regs[r0] = -regs[r1]; 840 break; 841 #endif 842 #if TCG_TARGET_REG_BITS == 64 843 /* Load/store operations (64 bit). */ 844 845 case INDEX_op_ld32s_i64: 846 tci_args_rrs(insn, &r0, &r1, &ofs); 847 ptr = (void *)(regs[r1] + ofs); 848 regs[r0] = *(int32_t *)ptr; 849 break; 850 case INDEX_op_ld_i64: 851 tci_args_rrs(insn, &r0, &r1, &ofs); 852 ptr = (void *)(regs[r1] + ofs); 853 regs[r0] = *(uint64_t *)ptr; 854 break; 855 case INDEX_op_st_i64: 856 tci_args_rrs(insn, &r0, &r1, &ofs); 857 ptr = (void *)(regs[r1] + ofs); 858 *(uint64_t *)ptr = regs[r0]; 859 break; 860 861 /* Arithmetic operations (64 bit). */ 862 863 case INDEX_op_div_i64: 864 tci_args_rrr(insn, &r0, &r1, &r2); 865 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 866 break; 867 case INDEX_op_divu_i64: 868 tci_args_rrr(insn, &r0, &r1, &r2); 869 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 870 break; 871 case INDEX_op_rem_i64: 872 tci_args_rrr(insn, &r0, &r1, &r2); 873 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 874 break; 875 case INDEX_op_remu_i64: 876 tci_args_rrr(insn, &r0, &r1, &r2); 877 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 878 break; 879 #if TCG_TARGET_HAS_clz_i64 880 case INDEX_op_clz_i64: 881 tci_args_rrr(insn, &r0, &r1, &r2); 882 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 883 break; 884 #endif 885 #if TCG_TARGET_HAS_ctz_i64 886 case INDEX_op_ctz_i64: 887 tci_args_rrr(insn, &r0, &r1, &r2); 888 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 889 break; 890 #endif 891 #if TCG_TARGET_HAS_ctpop_i64 892 case INDEX_op_ctpop_i64: 893 tci_args_rr(insn, &r0, &r1); 894 regs[r0] = ctpop64(regs[r1]); 895 break; 896 #endif 897 #if TCG_TARGET_HAS_mulu2_i64 898 case INDEX_op_mulu2_i64: 899 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 900 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 901 break; 902 #endif 903 #if TCG_TARGET_HAS_muls2_i64 904 case INDEX_op_muls2_i64: 905 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 906 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 907 break; 908 #endif 909 #if TCG_TARGET_HAS_add2_i64 910 case INDEX_op_add2_i64: 911 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 912 T1 = regs[r2] + regs[r4]; 913 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); 914 regs[r0] = T1; 915 regs[r1] = T2; 916 break; 917 #endif 918 #if TCG_TARGET_HAS_add2_i64 919 case INDEX_op_sub2_i64: 920 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 921 T1 = regs[r2] - regs[r4]; 922 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); 923 regs[r0] = T1; 924 regs[r1] = T2; 925 break; 926 #endif 927 928 /* Shift/rotate operations (64 bit). */ 929 930 case INDEX_op_shl_i64: 931 tci_args_rrr(insn, &r0, &r1, &r2); 932 regs[r0] = regs[r1] << (regs[r2] & 63); 933 break; 934 case INDEX_op_shr_i64: 935 tci_args_rrr(insn, &r0, &r1, &r2); 936 regs[r0] = regs[r1] >> (regs[r2] & 63); 937 break; 938 case INDEX_op_sar_i64: 939 tci_args_rrr(insn, &r0, &r1, &r2); 940 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); 941 break; 942 #if TCG_TARGET_HAS_rot_i64 943 case INDEX_op_rotl_i64: 944 tci_args_rrr(insn, &r0, &r1, &r2); 945 regs[r0] = rol64(regs[r1], regs[r2] & 63); 946 break; 947 case INDEX_op_rotr_i64: 948 tci_args_rrr(insn, &r0, &r1, &r2); 949 regs[r0] = ror64(regs[r1], regs[r2] & 63); 950 break; 951 #endif 952 #if TCG_TARGET_HAS_deposit_i64 953 case INDEX_op_deposit_i64: 954 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 955 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); 956 break; 957 #endif 958 #if TCG_TARGET_HAS_extract_i64 959 case INDEX_op_extract_i64: 960 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 961 regs[r0] = extract64(regs[r1], pos, len); 962 break; 963 #endif 964 #if TCG_TARGET_HAS_sextract_i64 965 case INDEX_op_sextract_i64: 966 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 967 regs[r0] = sextract64(regs[r1], pos, len); 968 break; 969 #endif 970 case INDEX_op_brcond_i64: 971 tci_args_rl(insn, tb_ptr, &r0, &ptr); 972 if (regs[r0]) { 973 tb_ptr = ptr; 974 } 975 break; 976 case INDEX_op_ext32s_i64: 977 case INDEX_op_ext_i32_i64: 978 tci_args_rr(insn, &r0, &r1); 979 regs[r0] = (int32_t)regs[r1]; 980 break; 981 case INDEX_op_ext32u_i64: 982 case INDEX_op_extu_i32_i64: 983 tci_args_rr(insn, &r0, &r1); 984 regs[r0] = (uint32_t)regs[r1]; 985 break; 986 #if TCG_TARGET_HAS_bswap64_i64 987 case INDEX_op_bswap64_i64: 988 tci_args_rr(insn, &r0, &r1); 989 regs[r0] = bswap64(regs[r1]); 990 break; 991 #endif 992 #endif /* TCG_TARGET_REG_BITS == 64 */ 993 994 /* QEMU specific operations. */ 995 996 case INDEX_op_exit_tb: 997 tci_args_l(insn, tb_ptr, &ptr); 998 return (uintptr_t)ptr; 999 1000 case INDEX_op_goto_tb: 1001 tci_args_l(insn, tb_ptr, &ptr); 1002 tb_ptr = *(void **)ptr; 1003 break; 1004 1005 case INDEX_op_goto_ptr: 1006 tci_args_r(insn, &r0); 1007 ptr = (void *)regs[r0]; 1008 if (!ptr) { 1009 return 0; 1010 } 1011 tb_ptr = ptr; 1012 break; 1013 1014 case INDEX_op_qemu_ld_i32: 1015 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1016 tci_args_rrm(insn, &r0, &r1, &oi); 1017 taddr = regs[r1]; 1018 } else { 1019 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1020 taddr = tci_uint64(regs[r2], regs[r1]); 1021 } 1022 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1023 regs[r0] = tmp32; 1024 break; 1025 1026 case INDEX_op_qemu_ld_i64: 1027 if (TCG_TARGET_REG_BITS == 64) { 1028 tci_args_rrm(insn, &r0, &r1, &oi); 1029 taddr = regs[r1]; 1030 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1031 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1032 taddr = regs[r2]; 1033 } else { 1034 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1035 taddr = tci_uint64(regs[r3], regs[r2]); 1036 oi = regs[r4]; 1037 } 1038 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1039 if (TCG_TARGET_REG_BITS == 32) { 1040 tci_write_reg64(regs, r1, r0, tmp64); 1041 } else { 1042 regs[r0] = tmp64; 1043 } 1044 break; 1045 1046 case INDEX_op_qemu_st_i32: 1047 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1048 tci_args_rrm(insn, &r0, &r1, &oi); 1049 taddr = regs[r1]; 1050 } else { 1051 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1052 taddr = tci_uint64(regs[r2], regs[r1]); 1053 } 1054 tmp32 = regs[r0]; 1055 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); 1056 break; 1057 1058 case INDEX_op_qemu_st_i64: 1059 if (TCG_TARGET_REG_BITS == 64) { 1060 tci_args_rrm(insn, &r0, &r1, &oi); 1061 taddr = regs[r1]; 1062 tmp64 = regs[r0]; 1063 } else { 1064 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1065 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1066 taddr = regs[r2]; 1067 } else { 1068 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1069 taddr = tci_uint64(regs[r3], regs[r2]); 1070 oi = regs[r4]; 1071 } 1072 tmp64 = tci_uint64(regs[r1], regs[r0]); 1073 } 1074 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 1075 break; 1076 1077 case INDEX_op_mb: 1078 /* Ensure ordering for all kinds */ 1079 smp_mb(); 1080 break; 1081 default: 1082 g_assert_not_reached(); 1083 } 1084 } 1085 } 1086 1087 /* 1088 * Disassembler that matches the interpreter 1089 */ 1090 1091 static const char *str_r(TCGReg r) 1092 { 1093 static const char regs[TCG_TARGET_NB_REGS][4] = { 1094 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1095 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 1096 }; 1097 1098 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 1099 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 1100 1101 assert((unsigned)r < TCG_TARGET_NB_REGS); 1102 return regs[r]; 1103 } 1104 1105 static const char *str_c(TCGCond c) 1106 { 1107 static const char cond[16][8] = { 1108 [TCG_COND_NEVER] = "never", 1109 [TCG_COND_ALWAYS] = "always", 1110 [TCG_COND_EQ] = "eq", 1111 [TCG_COND_NE] = "ne", 1112 [TCG_COND_LT] = "lt", 1113 [TCG_COND_GE] = "ge", 1114 [TCG_COND_LE] = "le", 1115 [TCG_COND_GT] = "gt", 1116 [TCG_COND_LTU] = "ltu", 1117 [TCG_COND_GEU] = "geu", 1118 [TCG_COND_LEU] = "leu", 1119 [TCG_COND_GTU] = "gtu", 1120 }; 1121 1122 assert((unsigned)c < ARRAY_SIZE(cond)); 1123 assert(cond[c][0] != 0); 1124 return cond[c]; 1125 } 1126 1127 /* Disassemble TCI bytecode. */ 1128 int print_insn_tci(bfd_vma addr, disassemble_info *info) 1129 { 1130 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 1131 const TCGOpDef *def; 1132 const char *op_name; 1133 uint32_t insn; 1134 TCGOpcode op; 1135 TCGReg r0, r1, r2, r3, r4, r5; 1136 tcg_target_ulong i1; 1137 int32_t s2; 1138 TCGCond c; 1139 MemOpIdx oi; 1140 uint8_t pos, len; 1141 void *ptr; 1142 1143 /* TCI is always the host, so we don't need to load indirect. */ 1144 insn = *tb_ptr++; 1145 1146 info->fprintf_func(info->stream, "%08x ", insn); 1147 1148 op = extract32(insn, 0, 8); 1149 def = &tcg_op_defs[op]; 1150 op_name = def->name; 1151 1152 switch (op) { 1153 case INDEX_op_br: 1154 case INDEX_op_exit_tb: 1155 case INDEX_op_goto_tb: 1156 tci_args_l(insn, tb_ptr, &ptr); 1157 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 1158 break; 1159 1160 case INDEX_op_goto_ptr: 1161 tci_args_r(insn, &r0); 1162 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 1163 break; 1164 1165 case INDEX_op_call: 1166 tci_args_nl(insn, tb_ptr, &len, &ptr); 1167 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 1168 break; 1169 1170 case INDEX_op_brcond_i32: 1171 case INDEX_op_brcond_i64: 1172 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1173 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 1174 op_name, str_r(r0), ptr); 1175 break; 1176 1177 case INDEX_op_setcond_i32: 1178 case INDEX_op_setcond_i64: 1179 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 1180 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1181 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 1182 break; 1183 1184 case INDEX_op_tci_movi: 1185 tci_args_ri(insn, &r0, &i1); 1186 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 1187 op_name, str_r(r0), i1); 1188 break; 1189 1190 case INDEX_op_tci_movl: 1191 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1192 info->fprintf_func(info->stream, "%-12s %s, %p", 1193 op_name, str_r(r0), ptr); 1194 break; 1195 1196 case INDEX_op_ld8u_i32: 1197 case INDEX_op_ld8u_i64: 1198 case INDEX_op_ld8s_i32: 1199 case INDEX_op_ld8s_i64: 1200 case INDEX_op_ld16u_i32: 1201 case INDEX_op_ld16u_i64: 1202 case INDEX_op_ld16s_i32: 1203 case INDEX_op_ld16s_i64: 1204 case INDEX_op_ld32u_i64: 1205 case INDEX_op_ld32s_i64: 1206 case INDEX_op_ld_i32: 1207 case INDEX_op_ld_i64: 1208 case INDEX_op_st8_i32: 1209 case INDEX_op_st8_i64: 1210 case INDEX_op_st16_i32: 1211 case INDEX_op_st16_i64: 1212 case INDEX_op_st32_i64: 1213 case INDEX_op_st_i32: 1214 case INDEX_op_st_i64: 1215 tci_args_rrs(insn, &r0, &r1, &s2); 1216 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 1217 op_name, str_r(r0), str_r(r1), s2); 1218 break; 1219 1220 case INDEX_op_mov_i32: 1221 case INDEX_op_mov_i64: 1222 case INDEX_op_ext8s_i32: 1223 case INDEX_op_ext8s_i64: 1224 case INDEX_op_ext8u_i32: 1225 case INDEX_op_ext8u_i64: 1226 case INDEX_op_ext16s_i32: 1227 case INDEX_op_ext16s_i64: 1228 case INDEX_op_ext16u_i32: 1229 case INDEX_op_ext32s_i64: 1230 case INDEX_op_ext32u_i64: 1231 case INDEX_op_ext_i32_i64: 1232 case INDEX_op_extu_i32_i64: 1233 case INDEX_op_bswap16_i32: 1234 case INDEX_op_bswap16_i64: 1235 case INDEX_op_bswap32_i32: 1236 case INDEX_op_bswap32_i64: 1237 case INDEX_op_bswap64_i64: 1238 case INDEX_op_not_i32: 1239 case INDEX_op_not_i64: 1240 case INDEX_op_neg_i32: 1241 case INDEX_op_neg_i64: 1242 case INDEX_op_ctpop_i32: 1243 case INDEX_op_ctpop_i64: 1244 tci_args_rr(insn, &r0, &r1); 1245 info->fprintf_func(info->stream, "%-12s %s, %s", 1246 op_name, str_r(r0), str_r(r1)); 1247 break; 1248 1249 case INDEX_op_add_i32: 1250 case INDEX_op_add_i64: 1251 case INDEX_op_sub_i32: 1252 case INDEX_op_sub_i64: 1253 case INDEX_op_mul_i32: 1254 case INDEX_op_mul_i64: 1255 case INDEX_op_and_i32: 1256 case INDEX_op_and_i64: 1257 case INDEX_op_or_i32: 1258 case INDEX_op_or_i64: 1259 case INDEX_op_xor_i32: 1260 case INDEX_op_xor_i64: 1261 case INDEX_op_andc_i32: 1262 case INDEX_op_andc_i64: 1263 case INDEX_op_orc_i32: 1264 case INDEX_op_orc_i64: 1265 case INDEX_op_eqv_i32: 1266 case INDEX_op_eqv_i64: 1267 case INDEX_op_nand_i32: 1268 case INDEX_op_nand_i64: 1269 case INDEX_op_nor_i32: 1270 case INDEX_op_nor_i64: 1271 case INDEX_op_div_i32: 1272 case INDEX_op_div_i64: 1273 case INDEX_op_rem_i32: 1274 case INDEX_op_rem_i64: 1275 case INDEX_op_divu_i32: 1276 case INDEX_op_divu_i64: 1277 case INDEX_op_remu_i32: 1278 case INDEX_op_remu_i64: 1279 case INDEX_op_shl_i32: 1280 case INDEX_op_shl_i64: 1281 case INDEX_op_shr_i32: 1282 case INDEX_op_shr_i64: 1283 case INDEX_op_sar_i32: 1284 case INDEX_op_sar_i64: 1285 case INDEX_op_rotl_i32: 1286 case INDEX_op_rotl_i64: 1287 case INDEX_op_rotr_i32: 1288 case INDEX_op_rotr_i64: 1289 case INDEX_op_clz_i32: 1290 case INDEX_op_clz_i64: 1291 case INDEX_op_ctz_i32: 1292 case INDEX_op_ctz_i64: 1293 tci_args_rrr(insn, &r0, &r1, &r2); 1294 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1295 op_name, str_r(r0), str_r(r1), str_r(r2)); 1296 break; 1297 1298 case INDEX_op_deposit_i32: 1299 case INDEX_op_deposit_i64: 1300 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1301 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1302 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1303 break; 1304 1305 case INDEX_op_extract_i32: 1306 case INDEX_op_extract_i64: 1307 case INDEX_op_sextract_i32: 1308 case INDEX_op_sextract_i64: 1309 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1310 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1311 op_name, str_r(r0), str_r(r1), pos, len); 1312 break; 1313 1314 case INDEX_op_movcond_i32: 1315 case INDEX_op_movcond_i64: 1316 case INDEX_op_setcond2_i32: 1317 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1318 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1319 op_name, str_r(r0), str_r(r1), str_r(r2), 1320 str_r(r3), str_r(r4), str_c(c)); 1321 break; 1322 1323 case INDEX_op_mulu2_i32: 1324 case INDEX_op_mulu2_i64: 1325 case INDEX_op_muls2_i32: 1326 case INDEX_op_muls2_i64: 1327 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1328 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1329 op_name, str_r(r0), str_r(r1), 1330 str_r(r2), str_r(r3)); 1331 break; 1332 1333 case INDEX_op_add2_i32: 1334 case INDEX_op_add2_i64: 1335 case INDEX_op_sub2_i32: 1336 case INDEX_op_sub2_i64: 1337 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 1338 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1339 op_name, str_r(r0), str_r(r1), str_r(r2), 1340 str_r(r3), str_r(r4), str_r(r5)); 1341 break; 1342 1343 case INDEX_op_qemu_ld_i64: 1344 case INDEX_op_qemu_st_i64: 1345 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1346 goto do_qemu_ldst; 1347 case INDEX_op_qemu_ld_i32: 1348 case INDEX_op_qemu_st_i32: 1349 len = 1; 1350 do_qemu_ldst: 1351 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); 1352 switch (len) { 1353 case 2: 1354 tci_args_rrm(insn, &r0, &r1, &oi); 1355 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1356 op_name, str_r(r0), str_r(r1), oi); 1357 break; 1358 case 3: 1359 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1360 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x", 1361 op_name, str_r(r0), str_r(r1), str_r(r2), oi); 1362 break; 1363 case 4: 1364 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1365 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", 1366 op_name, str_r(r0), str_r(r1), 1367 str_r(r2), str_r(r3), str_r(r4)); 1368 break; 1369 default: 1370 g_assert_not_reached(); 1371 } 1372 break; 1373 1374 case 0: 1375 /* tcg_out_nop_fill uses zeros */ 1376 if (insn == 0) { 1377 info->fprintf_func(info->stream, "align"); 1378 break; 1379 } 1380 /* fall through */ 1381 1382 default: 1383 info->fprintf_func(info->stream, "illegal opcode %d", op); 1384 break; 1385 } 1386 1387 return sizeof(insn); 1388 } 1389