1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu-common.h" 22 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */ 23 #include "exec/cpu_ldst.h" 24 #include "tcg/tcg-op.h" 25 #include "qemu/compiler.h" 26 #include <ffi.h> 27 28 29 /* 30 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 31 * Without assertions, the interpreter runs much faster. 32 */ 33 #if defined(CONFIG_DEBUG_TCG) 34 # define tci_assert(cond) assert(cond) 35 #else 36 # define tci_assert(cond) ((void)(cond)) 37 #endif 38 39 __thread uintptr_t tci_tb_ptr; 40 41 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 42 uint32_t low_index, uint64_t value) 43 { 44 regs[low_index] = (uint32_t)value; 45 regs[high_index] = value >> 32; 46 } 47 48 /* Create a 64 bit value from two 32 bit values. */ 49 static uint64_t tci_uint64(uint32_t high, uint32_t low) 50 { 51 return ((uint64_t)high << 32) + low; 52 } 53 54 /* 55 * Load sets of arguments all at once. The naming convention is: 56 * tci_args_<arguments> 57 * where arguments is a sequence of 58 * 59 * b = immediate (bit position) 60 * c = condition (TCGCond) 61 * i = immediate (uint32_t) 62 * I = immediate (tcg_target_ulong) 63 * l = label or pointer 64 * m = immediate (TCGMemOpIdx) 65 * n = immediate (call return length) 66 * r = register 67 * s = signed ldst offset 68 */ 69 70 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 71 { 72 int diff = sextract32(insn, 12, 20); 73 *l0 = diff ? (void *)tb_ptr + diff : NULL; 74 } 75 76 static void tci_args_r(uint32_t insn, TCGReg *r0) 77 { 78 *r0 = extract32(insn, 8, 4); 79 } 80 81 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 82 uint8_t *n0, void **l1) 83 { 84 *n0 = extract32(insn, 8, 4); 85 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 86 } 87 88 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 89 TCGReg *r0, void **l1) 90 { 91 *r0 = extract32(insn, 8, 4); 92 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 93 } 94 95 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 96 { 97 *r0 = extract32(insn, 8, 4); 98 *r1 = extract32(insn, 12, 4); 99 } 100 101 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 102 { 103 *r0 = extract32(insn, 8, 4); 104 *i1 = sextract32(insn, 12, 20); 105 } 106 107 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 108 TCGReg *r1, TCGMemOpIdx *m2) 109 { 110 *r0 = extract32(insn, 8, 4); 111 *r1 = extract32(insn, 12, 4); 112 *m2 = extract32(insn, 20, 12); 113 } 114 115 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 116 { 117 *r0 = extract32(insn, 8, 4); 118 *r1 = extract32(insn, 12, 4); 119 *r2 = extract32(insn, 16, 4); 120 } 121 122 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 123 { 124 *r0 = extract32(insn, 8, 4); 125 *r1 = extract32(insn, 12, 4); 126 *i2 = sextract32(insn, 16, 16); 127 } 128 129 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 130 uint8_t *i2, uint8_t *i3) 131 { 132 *r0 = extract32(insn, 8, 4); 133 *r1 = extract32(insn, 12, 4); 134 *i2 = extract32(insn, 16, 6); 135 *i3 = extract32(insn, 22, 6); 136 } 137 138 static void tci_args_rrrc(uint32_t insn, 139 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 140 { 141 *r0 = extract32(insn, 8, 4); 142 *r1 = extract32(insn, 12, 4); 143 *r2 = extract32(insn, 16, 4); 144 *c3 = extract32(insn, 20, 4); 145 } 146 147 static void tci_args_rrrm(uint32_t insn, 148 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3) 149 { 150 *r0 = extract32(insn, 8, 4); 151 *r1 = extract32(insn, 12, 4); 152 *r2 = extract32(insn, 16, 4); 153 *m3 = extract32(insn, 20, 12); 154 } 155 156 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 157 TCGReg *r2, uint8_t *i3, uint8_t *i4) 158 { 159 *r0 = extract32(insn, 8, 4); 160 *r1 = extract32(insn, 12, 4); 161 *r2 = extract32(insn, 16, 4); 162 *i3 = extract32(insn, 20, 6); 163 *i4 = extract32(insn, 26, 6); 164 } 165 166 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 167 TCGReg *r2, TCGReg *r3, TCGReg *r4) 168 { 169 *r0 = extract32(insn, 8, 4); 170 *r1 = extract32(insn, 12, 4); 171 *r2 = extract32(insn, 16, 4); 172 *r3 = extract32(insn, 20, 4); 173 *r4 = extract32(insn, 24, 4); 174 } 175 176 static void tci_args_rrrr(uint32_t insn, 177 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 178 { 179 *r0 = extract32(insn, 8, 4); 180 *r1 = extract32(insn, 12, 4); 181 *r2 = extract32(insn, 16, 4); 182 *r3 = extract32(insn, 20, 4); 183 } 184 185 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 186 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 187 { 188 *r0 = extract32(insn, 8, 4); 189 *r1 = extract32(insn, 12, 4); 190 *r2 = extract32(insn, 16, 4); 191 *r3 = extract32(insn, 20, 4); 192 *r4 = extract32(insn, 24, 4); 193 *c5 = extract32(insn, 28, 4); 194 } 195 196 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 197 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) 198 { 199 *r0 = extract32(insn, 8, 4); 200 *r1 = extract32(insn, 12, 4); 201 *r2 = extract32(insn, 16, 4); 202 *r3 = extract32(insn, 20, 4); 203 *r4 = extract32(insn, 24, 4); 204 *r5 = extract32(insn, 28, 4); 205 } 206 207 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 208 { 209 bool result = false; 210 int32_t i0 = u0; 211 int32_t i1 = u1; 212 switch (condition) { 213 case TCG_COND_EQ: 214 result = (u0 == u1); 215 break; 216 case TCG_COND_NE: 217 result = (u0 != u1); 218 break; 219 case TCG_COND_LT: 220 result = (i0 < i1); 221 break; 222 case TCG_COND_GE: 223 result = (i0 >= i1); 224 break; 225 case TCG_COND_LE: 226 result = (i0 <= i1); 227 break; 228 case TCG_COND_GT: 229 result = (i0 > i1); 230 break; 231 case TCG_COND_LTU: 232 result = (u0 < u1); 233 break; 234 case TCG_COND_GEU: 235 result = (u0 >= u1); 236 break; 237 case TCG_COND_LEU: 238 result = (u0 <= u1); 239 break; 240 case TCG_COND_GTU: 241 result = (u0 > u1); 242 break; 243 default: 244 g_assert_not_reached(); 245 } 246 return result; 247 } 248 249 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 250 { 251 bool result = false; 252 int64_t i0 = u0; 253 int64_t i1 = u1; 254 switch (condition) { 255 case TCG_COND_EQ: 256 result = (u0 == u1); 257 break; 258 case TCG_COND_NE: 259 result = (u0 != u1); 260 break; 261 case TCG_COND_LT: 262 result = (i0 < i1); 263 break; 264 case TCG_COND_GE: 265 result = (i0 >= i1); 266 break; 267 case TCG_COND_LE: 268 result = (i0 <= i1); 269 break; 270 case TCG_COND_GT: 271 result = (i0 > i1); 272 break; 273 case TCG_COND_LTU: 274 result = (u0 < u1); 275 break; 276 case TCG_COND_GEU: 277 result = (u0 >= u1); 278 break; 279 case TCG_COND_LEU: 280 result = (u0 <= u1); 281 break; 282 case TCG_COND_GTU: 283 result = (u0 > u1); 284 break; 285 default: 286 g_assert_not_reached(); 287 } 288 return result; 289 } 290 291 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, 292 TCGMemOpIdx oi, const void *tb_ptr) 293 { 294 MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); 295 uintptr_t ra = (uintptr_t)tb_ptr; 296 297 #ifdef CONFIG_SOFTMMU 298 switch (mop) { 299 case MO_UB: 300 return helper_ret_ldub_mmu(env, taddr, oi, ra); 301 case MO_SB: 302 return helper_ret_ldsb_mmu(env, taddr, oi, ra); 303 case MO_LEUW: 304 return helper_le_lduw_mmu(env, taddr, oi, ra); 305 case MO_LESW: 306 return helper_le_ldsw_mmu(env, taddr, oi, ra); 307 case MO_LEUL: 308 return helper_le_ldul_mmu(env, taddr, oi, ra); 309 case MO_LESL: 310 return helper_le_ldsl_mmu(env, taddr, oi, ra); 311 case MO_LEQ: 312 return helper_le_ldq_mmu(env, taddr, oi, ra); 313 case MO_BEUW: 314 return helper_be_lduw_mmu(env, taddr, oi, ra); 315 case MO_BESW: 316 return helper_be_ldsw_mmu(env, taddr, oi, ra); 317 case MO_BEUL: 318 return helper_be_ldul_mmu(env, taddr, oi, ra); 319 case MO_BESL: 320 return helper_be_ldsl_mmu(env, taddr, oi, ra); 321 case MO_BEQ: 322 return helper_be_ldq_mmu(env, taddr, oi, ra); 323 default: 324 g_assert_not_reached(); 325 } 326 #else 327 void *haddr = g2h(env_cpu(env), taddr); 328 uint64_t ret; 329 330 set_helper_retaddr(ra); 331 switch (mop) { 332 case MO_UB: 333 ret = ldub_p(haddr); 334 break; 335 case MO_SB: 336 ret = ldsb_p(haddr); 337 break; 338 case MO_LEUW: 339 ret = lduw_le_p(haddr); 340 break; 341 case MO_LESW: 342 ret = ldsw_le_p(haddr); 343 break; 344 case MO_LEUL: 345 ret = (uint32_t)ldl_le_p(haddr); 346 break; 347 case MO_LESL: 348 ret = (int32_t)ldl_le_p(haddr); 349 break; 350 case MO_LEQ: 351 ret = ldq_le_p(haddr); 352 break; 353 case MO_BEUW: 354 ret = lduw_be_p(haddr); 355 break; 356 case MO_BESW: 357 ret = ldsw_be_p(haddr); 358 break; 359 case MO_BEUL: 360 ret = (uint32_t)ldl_be_p(haddr); 361 break; 362 case MO_BESL: 363 ret = (int32_t)ldl_be_p(haddr); 364 break; 365 case MO_BEQ: 366 ret = ldq_be_p(haddr); 367 break; 368 default: 369 g_assert_not_reached(); 370 } 371 clear_helper_retaddr(); 372 return ret; 373 #endif 374 } 375 376 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, 377 TCGMemOpIdx oi, const void *tb_ptr) 378 { 379 MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); 380 uintptr_t ra = (uintptr_t)tb_ptr; 381 382 #ifdef CONFIG_SOFTMMU 383 switch (mop) { 384 case MO_UB: 385 helper_ret_stb_mmu(env, taddr, val, oi, ra); 386 break; 387 case MO_LEUW: 388 helper_le_stw_mmu(env, taddr, val, oi, ra); 389 break; 390 case MO_LEUL: 391 helper_le_stl_mmu(env, taddr, val, oi, ra); 392 break; 393 case MO_LEQ: 394 helper_le_stq_mmu(env, taddr, val, oi, ra); 395 break; 396 case MO_BEUW: 397 helper_be_stw_mmu(env, taddr, val, oi, ra); 398 break; 399 case MO_BEUL: 400 helper_be_stl_mmu(env, taddr, val, oi, ra); 401 break; 402 case MO_BEQ: 403 helper_be_stq_mmu(env, taddr, val, oi, ra); 404 break; 405 default: 406 g_assert_not_reached(); 407 } 408 #else 409 void *haddr = g2h(env_cpu(env), taddr); 410 411 set_helper_retaddr(ra); 412 switch (mop) { 413 case MO_UB: 414 stb_p(haddr, val); 415 break; 416 case MO_LEUW: 417 stw_le_p(haddr, val); 418 break; 419 case MO_LEUL: 420 stl_le_p(haddr, val); 421 break; 422 case MO_LEQ: 423 stq_le_p(haddr, val); 424 break; 425 case MO_BEUW: 426 stw_be_p(haddr, val); 427 break; 428 case MO_BEUL: 429 stl_be_p(haddr, val); 430 break; 431 case MO_BEQ: 432 stq_be_p(haddr, val); 433 break; 434 default: 435 g_assert_not_reached(); 436 } 437 clear_helper_retaddr(); 438 #endif 439 } 440 441 #if TCG_TARGET_REG_BITS == 64 442 # define CASE_32_64(x) \ 443 case glue(glue(INDEX_op_, x), _i64): \ 444 case glue(glue(INDEX_op_, x), _i32): 445 # define CASE_64(x) \ 446 case glue(glue(INDEX_op_, x), _i64): 447 #else 448 # define CASE_32_64(x) \ 449 case glue(glue(INDEX_op_, x), _i32): 450 # define CASE_64(x) 451 #endif 452 453 /* Interpret pseudo code in tb. */ 454 /* 455 * Disable CFI checks. 456 * One possible operation in the pseudo code is a call to binary code. 457 * Therefore, disable CFI checks in the interpreter function 458 */ 459 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 460 const void *v_tb_ptr) 461 { 462 const uint32_t *tb_ptr = v_tb_ptr; 463 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 464 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 465 / sizeof(uint64_t)]; 466 void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)]; 467 468 regs[TCG_AREG0] = (tcg_target_ulong)env; 469 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 470 /* Other call_slots entries initialized at first use (see below). */ 471 call_slots[0] = NULL; 472 tci_assert(tb_ptr); 473 474 for (;;) { 475 uint32_t insn; 476 TCGOpcode opc; 477 TCGReg r0, r1, r2, r3, r4, r5; 478 tcg_target_ulong t1; 479 TCGCond condition; 480 target_ulong taddr; 481 uint8_t pos, len; 482 uint32_t tmp32; 483 uint64_t tmp64; 484 uint64_t T1, T2; 485 TCGMemOpIdx oi; 486 int32_t ofs; 487 void *ptr; 488 489 insn = *tb_ptr++; 490 opc = extract32(insn, 0, 8); 491 492 switch (opc) { 493 case INDEX_op_call: 494 /* 495 * Set up the ffi_avalue array once, delayed until now 496 * because many TB's do not make any calls. In tcg_gen_callN, 497 * we arranged for every real argument to be "left-aligned" 498 * in each 64-bit slot. 499 */ 500 if (unlikely(call_slots[0] == NULL)) { 501 for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) { 502 call_slots[i] = &stack[i]; 503 } 504 } 505 506 tci_args_nl(insn, tb_ptr, &len, &ptr); 507 508 /* Helper functions may need to access the "return address" */ 509 tci_tb_ptr = (uintptr_t)tb_ptr; 510 511 { 512 void **pptr = ptr; 513 ffi_call(pptr[1], pptr[0], stack, call_slots); 514 } 515 516 /* Any result winds up "left-aligned" in the stack[0] slot. */ 517 switch (len) { 518 case 0: /* void */ 519 break; 520 case 1: /* uint32_t */ 521 /* 522 * Note that libffi has an odd special case in that it will 523 * always widen an integral result to ffi_arg. 524 */ 525 if (sizeof(ffi_arg) == 4) { 526 regs[TCG_REG_R0] = *(uint32_t *)stack; 527 break; 528 } 529 /* fall through */ 530 case 2: /* uint64_t */ 531 if (TCG_TARGET_REG_BITS == 32) { 532 tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]); 533 } else { 534 regs[TCG_REG_R0] = stack[0]; 535 } 536 break; 537 default: 538 g_assert_not_reached(); 539 } 540 break; 541 542 case INDEX_op_br: 543 tci_args_l(insn, tb_ptr, &ptr); 544 tb_ptr = ptr; 545 continue; 546 case INDEX_op_setcond_i32: 547 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 548 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 549 break; 550 case INDEX_op_movcond_i32: 551 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 552 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 553 regs[r0] = regs[tmp32 ? r3 : r4]; 554 break; 555 #if TCG_TARGET_REG_BITS == 32 556 case INDEX_op_setcond2_i32: 557 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 558 T1 = tci_uint64(regs[r2], regs[r1]); 559 T2 = tci_uint64(regs[r4], regs[r3]); 560 regs[r0] = tci_compare64(T1, T2, condition); 561 break; 562 #elif TCG_TARGET_REG_BITS == 64 563 case INDEX_op_setcond_i64: 564 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 565 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 566 break; 567 case INDEX_op_movcond_i64: 568 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 569 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 570 regs[r0] = regs[tmp32 ? r3 : r4]; 571 break; 572 #endif 573 CASE_32_64(mov) 574 tci_args_rr(insn, &r0, &r1); 575 regs[r0] = regs[r1]; 576 break; 577 case INDEX_op_tci_movi: 578 tci_args_ri(insn, &r0, &t1); 579 regs[r0] = t1; 580 break; 581 case INDEX_op_tci_movl: 582 tci_args_rl(insn, tb_ptr, &r0, &ptr); 583 regs[r0] = *(tcg_target_ulong *)ptr; 584 break; 585 586 /* Load/store operations (32 bit). */ 587 588 CASE_32_64(ld8u) 589 tci_args_rrs(insn, &r0, &r1, &ofs); 590 ptr = (void *)(regs[r1] + ofs); 591 regs[r0] = *(uint8_t *)ptr; 592 break; 593 CASE_32_64(ld8s) 594 tci_args_rrs(insn, &r0, &r1, &ofs); 595 ptr = (void *)(regs[r1] + ofs); 596 regs[r0] = *(int8_t *)ptr; 597 break; 598 CASE_32_64(ld16u) 599 tci_args_rrs(insn, &r0, &r1, &ofs); 600 ptr = (void *)(regs[r1] + ofs); 601 regs[r0] = *(uint16_t *)ptr; 602 break; 603 CASE_32_64(ld16s) 604 tci_args_rrs(insn, &r0, &r1, &ofs); 605 ptr = (void *)(regs[r1] + ofs); 606 regs[r0] = *(int16_t *)ptr; 607 break; 608 case INDEX_op_ld_i32: 609 CASE_64(ld32u) 610 tci_args_rrs(insn, &r0, &r1, &ofs); 611 ptr = (void *)(regs[r1] + ofs); 612 regs[r0] = *(uint32_t *)ptr; 613 break; 614 CASE_32_64(st8) 615 tci_args_rrs(insn, &r0, &r1, &ofs); 616 ptr = (void *)(regs[r1] + ofs); 617 *(uint8_t *)ptr = regs[r0]; 618 break; 619 CASE_32_64(st16) 620 tci_args_rrs(insn, &r0, &r1, &ofs); 621 ptr = (void *)(regs[r1] + ofs); 622 *(uint16_t *)ptr = regs[r0]; 623 break; 624 case INDEX_op_st_i32: 625 CASE_64(st32) 626 tci_args_rrs(insn, &r0, &r1, &ofs); 627 ptr = (void *)(regs[r1] + ofs); 628 *(uint32_t *)ptr = regs[r0]; 629 break; 630 631 /* Arithmetic operations (mixed 32/64 bit). */ 632 633 CASE_32_64(add) 634 tci_args_rrr(insn, &r0, &r1, &r2); 635 regs[r0] = regs[r1] + regs[r2]; 636 break; 637 CASE_32_64(sub) 638 tci_args_rrr(insn, &r0, &r1, &r2); 639 regs[r0] = regs[r1] - regs[r2]; 640 break; 641 CASE_32_64(mul) 642 tci_args_rrr(insn, &r0, &r1, &r2); 643 regs[r0] = regs[r1] * regs[r2]; 644 break; 645 CASE_32_64(and) 646 tci_args_rrr(insn, &r0, &r1, &r2); 647 regs[r0] = regs[r1] & regs[r2]; 648 break; 649 CASE_32_64(or) 650 tci_args_rrr(insn, &r0, &r1, &r2); 651 regs[r0] = regs[r1] | regs[r2]; 652 break; 653 CASE_32_64(xor) 654 tci_args_rrr(insn, &r0, &r1, &r2); 655 regs[r0] = regs[r1] ^ regs[r2]; 656 break; 657 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 658 CASE_32_64(andc) 659 tci_args_rrr(insn, &r0, &r1, &r2); 660 regs[r0] = regs[r1] & ~regs[r2]; 661 break; 662 #endif 663 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 664 CASE_32_64(orc) 665 tci_args_rrr(insn, &r0, &r1, &r2); 666 regs[r0] = regs[r1] | ~regs[r2]; 667 break; 668 #endif 669 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 670 CASE_32_64(eqv) 671 tci_args_rrr(insn, &r0, &r1, &r2); 672 regs[r0] = ~(regs[r1] ^ regs[r2]); 673 break; 674 #endif 675 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 676 CASE_32_64(nand) 677 tci_args_rrr(insn, &r0, &r1, &r2); 678 regs[r0] = ~(regs[r1] & regs[r2]); 679 break; 680 #endif 681 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 682 CASE_32_64(nor) 683 tci_args_rrr(insn, &r0, &r1, &r2); 684 regs[r0] = ~(regs[r1] | regs[r2]); 685 break; 686 #endif 687 688 /* Arithmetic operations (32 bit). */ 689 690 case INDEX_op_div_i32: 691 tci_args_rrr(insn, &r0, &r1, &r2); 692 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 693 break; 694 case INDEX_op_divu_i32: 695 tci_args_rrr(insn, &r0, &r1, &r2); 696 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 697 break; 698 case INDEX_op_rem_i32: 699 tci_args_rrr(insn, &r0, &r1, &r2); 700 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 701 break; 702 case INDEX_op_remu_i32: 703 tci_args_rrr(insn, &r0, &r1, &r2); 704 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 705 break; 706 #if TCG_TARGET_HAS_clz_i32 707 case INDEX_op_clz_i32: 708 tci_args_rrr(insn, &r0, &r1, &r2); 709 tmp32 = regs[r1]; 710 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 711 break; 712 #endif 713 #if TCG_TARGET_HAS_ctz_i32 714 case INDEX_op_ctz_i32: 715 tci_args_rrr(insn, &r0, &r1, &r2); 716 tmp32 = regs[r1]; 717 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 718 break; 719 #endif 720 #if TCG_TARGET_HAS_ctpop_i32 721 case INDEX_op_ctpop_i32: 722 tci_args_rr(insn, &r0, &r1); 723 regs[r0] = ctpop32(regs[r1]); 724 break; 725 #endif 726 727 /* Shift/rotate operations (32 bit). */ 728 729 case INDEX_op_shl_i32: 730 tci_args_rrr(insn, &r0, &r1, &r2); 731 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); 732 break; 733 case INDEX_op_shr_i32: 734 tci_args_rrr(insn, &r0, &r1, &r2); 735 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); 736 break; 737 case INDEX_op_sar_i32: 738 tci_args_rrr(insn, &r0, &r1, &r2); 739 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); 740 break; 741 #if TCG_TARGET_HAS_rot_i32 742 case INDEX_op_rotl_i32: 743 tci_args_rrr(insn, &r0, &r1, &r2); 744 regs[r0] = rol32(regs[r1], regs[r2] & 31); 745 break; 746 case INDEX_op_rotr_i32: 747 tci_args_rrr(insn, &r0, &r1, &r2); 748 regs[r0] = ror32(regs[r1], regs[r2] & 31); 749 break; 750 #endif 751 #if TCG_TARGET_HAS_deposit_i32 752 case INDEX_op_deposit_i32: 753 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 754 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); 755 break; 756 #endif 757 #if TCG_TARGET_HAS_extract_i32 758 case INDEX_op_extract_i32: 759 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 760 regs[r0] = extract32(regs[r1], pos, len); 761 break; 762 #endif 763 #if TCG_TARGET_HAS_sextract_i32 764 case INDEX_op_sextract_i32: 765 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 766 regs[r0] = sextract32(regs[r1], pos, len); 767 break; 768 #endif 769 case INDEX_op_brcond_i32: 770 tci_args_rl(insn, tb_ptr, &r0, &ptr); 771 if ((uint32_t)regs[r0]) { 772 tb_ptr = ptr; 773 } 774 break; 775 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 776 case INDEX_op_add2_i32: 777 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 778 T1 = tci_uint64(regs[r3], regs[r2]); 779 T2 = tci_uint64(regs[r5], regs[r4]); 780 tci_write_reg64(regs, r1, r0, T1 + T2); 781 break; 782 #endif 783 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 784 case INDEX_op_sub2_i32: 785 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 786 T1 = tci_uint64(regs[r3], regs[r2]); 787 T2 = tci_uint64(regs[r5], regs[r4]); 788 tci_write_reg64(regs, r1, r0, T1 - T2); 789 break; 790 #endif 791 #if TCG_TARGET_HAS_mulu2_i32 792 case INDEX_op_mulu2_i32: 793 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 794 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 795 tci_write_reg64(regs, r1, r0, tmp64); 796 break; 797 #endif 798 #if TCG_TARGET_HAS_muls2_i32 799 case INDEX_op_muls2_i32: 800 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 801 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 802 tci_write_reg64(regs, r1, r0, tmp64); 803 break; 804 #endif 805 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 806 CASE_32_64(ext8s) 807 tci_args_rr(insn, &r0, &r1); 808 regs[r0] = (int8_t)regs[r1]; 809 break; 810 #endif 811 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 812 CASE_32_64(ext16s) 813 tci_args_rr(insn, &r0, &r1); 814 regs[r0] = (int16_t)regs[r1]; 815 break; 816 #endif 817 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 818 CASE_32_64(ext8u) 819 tci_args_rr(insn, &r0, &r1); 820 regs[r0] = (uint8_t)regs[r1]; 821 break; 822 #endif 823 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 824 CASE_32_64(ext16u) 825 tci_args_rr(insn, &r0, &r1); 826 regs[r0] = (uint16_t)regs[r1]; 827 break; 828 #endif 829 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 830 CASE_32_64(bswap16) 831 tci_args_rr(insn, &r0, &r1); 832 regs[r0] = bswap16(regs[r1]); 833 break; 834 #endif 835 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 836 CASE_32_64(bswap32) 837 tci_args_rr(insn, &r0, &r1); 838 regs[r0] = bswap32(regs[r1]); 839 break; 840 #endif 841 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 842 CASE_32_64(not) 843 tci_args_rr(insn, &r0, &r1); 844 regs[r0] = ~regs[r1]; 845 break; 846 #endif 847 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 848 CASE_32_64(neg) 849 tci_args_rr(insn, &r0, &r1); 850 regs[r0] = -regs[r1]; 851 break; 852 #endif 853 #if TCG_TARGET_REG_BITS == 64 854 /* Load/store operations (64 bit). */ 855 856 case INDEX_op_ld32s_i64: 857 tci_args_rrs(insn, &r0, &r1, &ofs); 858 ptr = (void *)(regs[r1] + ofs); 859 regs[r0] = *(int32_t *)ptr; 860 break; 861 case INDEX_op_ld_i64: 862 tci_args_rrs(insn, &r0, &r1, &ofs); 863 ptr = (void *)(regs[r1] + ofs); 864 regs[r0] = *(uint64_t *)ptr; 865 break; 866 case INDEX_op_st_i64: 867 tci_args_rrs(insn, &r0, &r1, &ofs); 868 ptr = (void *)(regs[r1] + ofs); 869 *(uint64_t *)ptr = regs[r0]; 870 break; 871 872 /* Arithmetic operations (64 bit). */ 873 874 case INDEX_op_div_i64: 875 tci_args_rrr(insn, &r0, &r1, &r2); 876 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 877 break; 878 case INDEX_op_divu_i64: 879 tci_args_rrr(insn, &r0, &r1, &r2); 880 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 881 break; 882 case INDEX_op_rem_i64: 883 tci_args_rrr(insn, &r0, &r1, &r2); 884 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 885 break; 886 case INDEX_op_remu_i64: 887 tci_args_rrr(insn, &r0, &r1, &r2); 888 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 889 break; 890 #if TCG_TARGET_HAS_clz_i64 891 case INDEX_op_clz_i64: 892 tci_args_rrr(insn, &r0, &r1, &r2); 893 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 894 break; 895 #endif 896 #if TCG_TARGET_HAS_ctz_i64 897 case INDEX_op_ctz_i64: 898 tci_args_rrr(insn, &r0, &r1, &r2); 899 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 900 break; 901 #endif 902 #if TCG_TARGET_HAS_ctpop_i64 903 case INDEX_op_ctpop_i64: 904 tci_args_rr(insn, &r0, &r1); 905 regs[r0] = ctpop64(regs[r1]); 906 break; 907 #endif 908 #if TCG_TARGET_HAS_mulu2_i64 909 case INDEX_op_mulu2_i64: 910 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 911 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 912 break; 913 #endif 914 #if TCG_TARGET_HAS_muls2_i64 915 case INDEX_op_muls2_i64: 916 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 917 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 918 break; 919 #endif 920 #if TCG_TARGET_HAS_add2_i64 921 case INDEX_op_add2_i64: 922 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 923 T1 = regs[r2] + regs[r4]; 924 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); 925 regs[r0] = T1; 926 regs[r1] = T2; 927 break; 928 #endif 929 #if TCG_TARGET_HAS_add2_i64 930 case INDEX_op_sub2_i64: 931 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 932 T1 = regs[r2] - regs[r4]; 933 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); 934 regs[r0] = T1; 935 regs[r1] = T2; 936 break; 937 #endif 938 939 /* Shift/rotate operations (64 bit). */ 940 941 case INDEX_op_shl_i64: 942 tci_args_rrr(insn, &r0, &r1, &r2); 943 regs[r0] = regs[r1] << (regs[r2] & 63); 944 break; 945 case INDEX_op_shr_i64: 946 tci_args_rrr(insn, &r0, &r1, &r2); 947 regs[r0] = regs[r1] >> (regs[r2] & 63); 948 break; 949 case INDEX_op_sar_i64: 950 tci_args_rrr(insn, &r0, &r1, &r2); 951 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); 952 break; 953 #if TCG_TARGET_HAS_rot_i64 954 case INDEX_op_rotl_i64: 955 tci_args_rrr(insn, &r0, &r1, &r2); 956 regs[r0] = rol64(regs[r1], regs[r2] & 63); 957 break; 958 case INDEX_op_rotr_i64: 959 tci_args_rrr(insn, &r0, &r1, &r2); 960 regs[r0] = ror64(regs[r1], regs[r2] & 63); 961 break; 962 #endif 963 #if TCG_TARGET_HAS_deposit_i64 964 case INDEX_op_deposit_i64: 965 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 966 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); 967 break; 968 #endif 969 #if TCG_TARGET_HAS_extract_i64 970 case INDEX_op_extract_i64: 971 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 972 regs[r0] = extract64(regs[r1], pos, len); 973 break; 974 #endif 975 #if TCG_TARGET_HAS_sextract_i64 976 case INDEX_op_sextract_i64: 977 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 978 regs[r0] = sextract64(regs[r1], pos, len); 979 break; 980 #endif 981 case INDEX_op_brcond_i64: 982 tci_args_rl(insn, tb_ptr, &r0, &ptr); 983 if (regs[r0]) { 984 tb_ptr = ptr; 985 } 986 break; 987 case INDEX_op_ext32s_i64: 988 case INDEX_op_ext_i32_i64: 989 tci_args_rr(insn, &r0, &r1); 990 regs[r0] = (int32_t)regs[r1]; 991 break; 992 case INDEX_op_ext32u_i64: 993 case INDEX_op_extu_i32_i64: 994 tci_args_rr(insn, &r0, &r1); 995 regs[r0] = (uint32_t)regs[r1]; 996 break; 997 #if TCG_TARGET_HAS_bswap64_i64 998 case INDEX_op_bswap64_i64: 999 tci_args_rr(insn, &r0, &r1); 1000 regs[r0] = bswap64(regs[r1]); 1001 break; 1002 #endif 1003 #endif /* TCG_TARGET_REG_BITS == 64 */ 1004 1005 /* QEMU specific operations. */ 1006 1007 case INDEX_op_exit_tb: 1008 tci_args_l(insn, tb_ptr, &ptr); 1009 return (uintptr_t)ptr; 1010 1011 case INDEX_op_goto_tb: 1012 tci_args_l(insn, tb_ptr, &ptr); 1013 tb_ptr = *(void **)ptr; 1014 break; 1015 1016 case INDEX_op_goto_ptr: 1017 tci_args_r(insn, &r0); 1018 ptr = (void *)regs[r0]; 1019 if (!ptr) { 1020 return 0; 1021 } 1022 tb_ptr = ptr; 1023 break; 1024 1025 case INDEX_op_qemu_ld_i32: 1026 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1027 tci_args_rrm(insn, &r0, &r1, &oi); 1028 taddr = regs[r1]; 1029 } else { 1030 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1031 taddr = tci_uint64(regs[r2], regs[r1]); 1032 } 1033 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1034 regs[r0] = tmp32; 1035 break; 1036 1037 case INDEX_op_qemu_ld_i64: 1038 if (TCG_TARGET_REG_BITS == 64) { 1039 tci_args_rrm(insn, &r0, &r1, &oi); 1040 taddr = regs[r1]; 1041 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1042 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1043 taddr = regs[r2]; 1044 } else { 1045 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1046 taddr = tci_uint64(regs[r3], regs[r2]); 1047 oi = regs[r4]; 1048 } 1049 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1050 if (TCG_TARGET_REG_BITS == 32) { 1051 tci_write_reg64(regs, r1, r0, tmp64); 1052 } else { 1053 regs[r0] = tmp64; 1054 } 1055 break; 1056 1057 case INDEX_op_qemu_st_i32: 1058 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1059 tci_args_rrm(insn, &r0, &r1, &oi); 1060 taddr = regs[r1]; 1061 } else { 1062 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1063 taddr = tci_uint64(regs[r2], regs[r1]); 1064 } 1065 tmp32 = regs[r0]; 1066 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); 1067 break; 1068 1069 case INDEX_op_qemu_st_i64: 1070 if (TCG_TARGET_REG_BITS == 64) { 1071 tci_args_rrm(insn, &r0, &r1, &oi); 1072 taddr = regs[r1]; 1073 tmp64 = regs[r0]; 1074 } else { 1075 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1076 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1077 taddr = regs[r2]; 1078 } else { 1079 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1080 taddr = tci_uint64(regs[r3], regs[r2]); 1081 oi = regs[r4]; 1082 } 1083 tmp64 = tci_uint64(regs[r1], regs[r0]); 1084 } 1085 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 1086 break; 1087 1088 case INDEX_op_mb: 1089 /* Ensure ordering for all kinds */ 1090 smp_mb(); 1091 break; 1092 default: 1093 g_assert_not_reached(); 1094 } 1095 } 1096 } 1097 1098 /* 1099 * Disassembler that matches the interpreter 1100 */ 1101 1102 static const char *str_r(TCGReg r) 1103 { 1104 static const char regs[TCG_TARGET_NB_REGS][4] = { 1105 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1106 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 1107 }; 1108 1109 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 1110 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 1111 1112 assert((unsigned)r < TCG_TARGET_NB_REGS); 1113 return regs[r]; 1114 } 1115 1116 static const char *str_c(TCGCond c) 1117 { 1118 static const char cond[16][8] = { 1119 [TCG_COND_NEVER] = "never", 1120 [TCG_COND_ALWAYS] = "always", 1121 [TCG_COND_EQ] = "eq", 1122 [TCG_COND_NE] = "ne", 1123 [TCG_COND_LT] = "lt", 1124 [TCG_COND_GE] = "ge", 1125 [TCG_COND_LE] = "le", 1126 [TCG_COND_GT] = "gt", 1127 [TCG_COND_LTU] = "ltu", 1128 [TCG_COND_GEU] = "geu", 1129 [TCG_COND_LEU] = "leu", 1130 [TCG_COND_GTU] = "gtu", 1131 }; 1132 1133 assert((unsigned)c < ARRAY_SIZE(cond)); 1134 assert(cond[c][0] != 0); 1135 return cond[c]; 1136 } 1137 1138 /* Disassemble TCI bytecode. */ 1139 int print_insn_tci(bfd_vma addr, disassemble_info *info) 1140 { 1141 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 1142 const TCGOpDef *def; 1143 const char *op_name; 1144 uint32_t insn; 1145 TCGOpcode op; 1146 TCGReg r0, r1, r2, r3, r4, r5; 1147 tcg_target_ulong i1; 1148 int32_t s2; 1149 TCGCond c; 1150 TCGMemOpIdx oi; 1151 uint8_t pos, len; 1152 void *ptr; 1153 1154 /* TCI is always the host, so we don't need to load indirect. */ 1155 insn = *tb_ptr++; 1156 1157 info->fprintf_func(info->stream, "%08x ", insn); 1158 1159 op = extract32(insn, 0, 8); 1160 def = &tcg_op_defs[op]; 1161 op_name = def->name; 1162 1163 switch (op) { 1164 case INDEX_op_br: 1165 case INDEX_op_exit_tb: 1166 case INDEX_op_goto_tb: 1167 tci_args_l(insn, tb_ptr, &ptr); 1168 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 1169 break; 1170 1171 case INDEX_op_goto_ptr: 1172 tci_args_r(insn, &r0); 1173 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 1174 break; 1175 1176 case INDEX_op_call: 1177 tci_args_nl(insn, tb_ptr, &len, &ptr); 1178 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 1179 break; 1180 1181 case INDEX_op_brcond_i32: 1182 case INDEX_op_brcond_i64: 1183 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1184 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 1185 op_name, str_r(r0), ptr); 1186 break; 1187 1188 case INDEX_op_setcond_i32: 1189 case INDEX_op_setcond_i64: 1190 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 1191 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1192 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 1193 break; 1194 1195 case INDEX_op_tci_movi: 1196 tci_args_ri(insn, &r0, &i1); 1197 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 1198 op_name, str_r(r0), i1); 1199 break; 1200 1201 case INDEX_op_tci_movl: 1202 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1203 info->fprintf_func(info->stream, "%-12s %s, %p", 1204 op_name, str_r(r0), ptr); 1205 break; 1206 1207 case INDEX_op_ld8u_i32: 1208 case INDEX_op_ld8u_i64: 1209 case INDEX_op_ld8s_i32: 1210 case INDEX_op_ld8s_i64: 1211 case INDEX_op_ld16u_i32: 1212 case INDEX_op_ld16u_i64: 1213 case INDEX_op_ld16s_i32: 1214 case INDEX_op_ld16s_i64: 1215 case INDEX_op_ld32u_i64: 1216 case INDEX_op_ld32s_i64: 1217 case INDEX_op_ld_i32: 1218 case INDEX_op_ld_i64: 1219 case INDEX_op_st8_i32: 1220 case INDEX_op_st8_i64: 1221 case INDEX_op_st16_i32: 1222 case INDEX_op_st16_i64: 1223 case INDEX_op_st32_i64: 1224 case INDEX_op_st_i32: 1225 case INDEX_op_st_i64: 1226 tci_args_rrs(insn, &r0, &r1, &s2); 1227 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 1228 op_name, str_r(r0), str_r(r1), s2); 1229 break; 1230 1231 case INDEX_op_mov_i32: 1232 case INDEX_op_mov_i64: 1233 case INDEX_op_ext8s_i32: 1234 case INDEX_op_ext8s_i64: 1235 case INDEX_op_ext8u_i32: 1236 case INDEX_op_ext8u_i64: 1237 case INDEX_op_ext16s_i32: 1238 case INDEX_op_ext16s_i64: 1239 case INDEX_op_ext16u_i32: 1240 case INDEX_op_ext32s_i64: 1241 case INDEX_op_ext32u_i64: 1242 case INDEX_op_ext_i32_i64: 1243 case INDEX_op_extu_i32_i64: 1244 case INDEX_op_bswap16_i32: 1245 case INDEX_op_bswap16_i64: 1246 case INDEX_op_bswap32_i32: 1247 case INDEX_op_bswap32_i64: 1248 case INDEX_op_bswap64_i64: 1249 case INDEX_op_not_i32: 1250 case INDEX_op_not_i64: 1251 case INDEX_op_neg_i32: 1252 case INDEX_op_neg_i64: 1253 case INDEX_op_ctpop_i32: 1254 case INDEX_op_ctpop_i64: 1255 tci_args_rr(insn, &r0, &r1); 1256 info->fprintf_func(info->stream, "%-12s %s, %s", 1257 op_name, str_r(r0), str_r(r1)); 1258 break; 1259 1260 case INDEX_op_add_i32: 1261 case INDEX_op_add_i64: 1262 case INDEX_op_sub_i32: 1263 case INDEX_op_sub_i64: 1264 case INDEX_op_mul_i32: 1265 case INDEX_op_mul_i64: 1266 case INDEX_op_and_i32: 1267 case INDEX_op_and_i64: 1268 case INDEX_op_or_i32: 1269 case INDEX_op_or_i64: 1270 case INDEX_op_xor_i32: 1271 case INDEX_op_xor_i64: 1272 case INDEX_op_andc_i32: 1273 case INDEX_op_andc_i64: 1274 case INDEX_op_orc_i32: 1275 case INDEX_op_orc_i64: 1276 case INDEX_op_eqv_i32: 1277 case INDEX_op_eqv_i64: 1278 case INDEX_op_nand_i32: 1279 case INDEX_op_nand_i64: 1280 case INDEX_op_nor_i32: 1281 case INDEX_op_nor_i64: 1282 case INDEX_op_div_i32: 1283 case INDEX_op_div_i64: 1284 case INDEX_op_rem_i32: 1285 case INDEX_op_rem_i64: 1286 case INDEX_op_divu_i32: 1287 case INDEX_op_divu_i64: 1288 case INDEX_op_remu_i32: 1289 case INDEX_op_remu_i64: 1290 case INDEX_op_shl_i32: 1291 case INDEX_op_shl_i64: 1292 case INDEX_op_shr_i32: 1293 case INDEX_op_shr_i64: 1294 case INDEX_op_sar_i32: 1295 case INDEX_op_sar_i64: 1296 case INDEX_op_rotl_i32: 1297 case INDEX_op_rotl_i64: 1298 case INDEX_op_rotr_i32: 1299 case INDEX_op_rotr_i64: 1300 case INDEX_op_clz_i32: 1301 case INDEX_op_clz_i64: 1302 case INDEX_op_ctz_i32: 1303 case INDEX_op_ctz_i64: 1304 tci_args_rrr(insn, &r0, &r1, &r2); 1305 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1306 op_name, str_r(r0), str_r(r1), str_r(r2)); 1307 break; 1308 1309 case INDEX_op_deposit_i32: 1310 case INDEX_op_deposit_i64: 1311 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1312 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1313 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1314 break; 1315 1316 case INDEX_op_extract_i32: 1317 case INDEX_op_extract_i64: 1318 case INDEX_op_sextract_i32: 1319 case INDEX_op_sextract_i64: 1320 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1321 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1322 op_name, str_r(r0), str_r(r1), pos, len); 1323 break; 1324 1325 case INDEX_op_movcond_i32: 1326 case INDEX_op_movcond_i64: 1327 case INDEX_op_setcond2_i32: 1328 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1329 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1330 op_name, str_r(r0), str_r(r1), str_r(r2), 1331 str_r(r3), str_r(r4), str_c(c)); 1332 break; 1333 1334 case INDEX_op_mulu2_i32: 1335 case INDEX_op_mulu2_i64: 1336 case INDEX_op_muls2_i32: 1337 case INDEX_op_muls2_i64: 1338 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1339 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1340 op_name, str_r(r0), str_r(r1), 1341 str_r(r2), str_r(r3)); 1342 break; 1343 1344 case INDEX_op_add2_i32: 1345 case INDEX_op_add2_i64: 1346 case INDEX_op_sub2_i32: 1347 case INDEX_op_sub2_i64: 1348 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 1349 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1350 op_name, str_r(r0), str_r(r1), str_r(r2), 1351 str_r(r3), str_r(r4), str_r(r5)); 1352 break; 1353 1354 case INDEX_op_qemu_ld_i64: 1355 case INDEX_op_qemu_st_i64: 1356 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1357 goto do_qemu_ldst; 1358 case INDEX_op_qemu_ld_i32: 1359 case INDEX_op_qemu_st_i32: 1360 len = 1; 1361 do_qemu_ldst: 1362 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); 1363 switch (len) { 1364 case 2: 1365 tci_args_rrm(insn, &r0, &r1, &oi); 1366 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1367 op_name, str_r(r0), str_r(r1), oi); 1368 break; 1369 case 3: 1370 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1371 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x", 1372 op_name, str_r(r0), str_r(r1), str_r(r2), oi); 1373 break; 1374 case 4: 1375 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1376 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", 1377 op_name, str_r(r0), str_r(r1), 1378 str_r(r2), str_r(r3), str_r(r4)); 1379 break; 1380 default: 1381 g_assert_not_reached(); 1382 } 1383 break; 1384 1385 case 0: 1386 /* tcg_out_nop_fill uses zeros */ 1387 if (insn == 0) { 1388 info->fprintf_func(info->stream, "align"); 1389 break; 1390 } 1391 /* fall through */ 1392 1393 default: 1394 info->fprintf_func(info->stream, "illegal opcode %d", op); 1395 break; 1396 } 1397 1398 return sizeof(insn); 1399 } 1400