1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu-common.h" 22 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */ 23 #include "exec/cpu_ldst.h" 24 #include "tcg/tcg-op.h" 25 #include "qemu/compiler.h" 26 #include <ffi.h> 27 28 29 /* 30 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 31 * Without assertions, the interpreter runs much faster. 32 */ 33 #if defined(CONFIG_DEBUG_TCG) 34 # define tci_assert(cond) assert(cond) 35 #else 36 # define tci_assert(cond) ((void)(cond)) 37 #endif 38 39 __thread uintptr_t tci_tb_ptr; 40 41 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 42 uint32_t low_index, uint64_t value) 43 { 44 regs[low_index] = (uint32_t)value; 45 regs[high_index] = value >> 32; 46 } 47 48 /* Create a 64 bit value from two 32 bit values. */ 49 static uint64_t tci_uint64(uint32_t high, uint32_t low) 50 { 51 return ((uint64_t)high << 32) + low; 52 } 53 54 /* 55 * Load sets of arguments all at once. The naming convention is: 56 * tci_args_<arguments> 57 * where arguments is a sequence of 58 * 59 * b = immediate (bit position) 60 * c = condition (TCGCond) 61 * i = immediate (uint32_t) 62 * I = immediate (tcg_target_ulong) 63 * l = label or pointer 64 * m = immediate (TCGMemOpIdx) 65 * n = immediate (call return length) 66 * r = register 67 * s = signed ldst offset 68 */ 69 70 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 71 { 72 int diff = sextract32(insn, 12, 20); 73 *l0 = diff ? (void *)tb_ptr + diff : NULL; 74 } 75 76 static void tci_args_r(uint32_t insn, TCGReg *r0) 77 { 78 *r0 = extract32(insn, 8, 4); 79 } 80 81 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 82 uint8_t *n0, void **l1) 83 { 84 *n0 = extract32(insn, 8, 4); 85 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 86 } 87 88 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 89 TCGReg *r0, void **l1) 90 { 91 *r0 = extract32(insn, 8, 4); 92 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 93 } 94 95 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 96 { 97 *r0 = extract32(insn, 8, 4); 98 *r1 = extract32(insn, 12, 4); 99 } 100 101 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 102 { 103 *r0 = extract32(insn, 8, 4); 104 *i1 = sextract32(insn, 12, 20); 105 } 106 107 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 108 TCGReg *r1, TCGMemOpIdx *m2) 109 { 110 *r0 = extract32(insn, 8, 4); 111 *r1 = extract32(insn, 12, 4); 112 *m2 = extract32(insn, 20, 12); 113 } 114 115 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 116 { 117 *r0 = extract32(insn, 8, 4); 118 *r1 = extract32(insn, 12, 4); 119 *r2 = extract32(insn, 16, 4); 120 } 121 122 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 123 { 124 *r0 = extract32(insn, 8, 4); 125 *r1 = extract32(insn, 12, 4); 126 *i2 = sextract32(insn, 16, 16); 127 } 128 129 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 130 uint8_t *i2, uint8_t *i3) 131 { 132 *r0 = extract32(insn, 8, 4); 133 *r1 = extract32(insn, 12, 4); 134 *i2 = extract32(insn, 16, 6); 135 *i3 = extract32(insn, 22, 6); 136 } 137 138 static void tci_args_rrrc(uint32_t insn, 139 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 140 { 141 *r0 = extract32(insn, 8, 4); 142 *r1 = extract32(insn, 12, 4); 143 *r2 = extract32(insn, 16, 4); 144 *c3 = extract32(insn, 20, 4); 145 } 146 147 static void tci_args_rrrm(uint32_t insn, 148 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3) 149 { 150 *r0 = extract32(insn, 8, 4); 151 *r1 = extract32(insn, 12, 4); 152 *r2 = extract32(insn, 16, 4); 153 *m3 = extract32(insn, 20, 12); 154 } 155 156 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 157 TCGReg *r2, uint8_t *i3, uint8_t *i4) 158 { 159 *r0 = extract32(insn, 8, 4); 160 *r1 = extract32(insn, 12, 4); 161 *r2 = extract32(insn, 16, 4); 162 *i3 = extract32(insn, 20, 6); 163 *i4 = extract32(insn, 26, 6); 164 } 165 166 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 167 TCGReg *r2, TCGReg *r3, TCGReg *r4) 168 { 169 *r0 = extract32(insn, 8, 4); 170 *r1 = extract32(insn, 12, 4); 171 *r2 = extract32(insn, 16, 4); 172 *r3 = extract32(insn, 20, 4); 173 *r4 = extract32(insn, 24, 4); 174 } 175 176 static void tci_args_rrrr(uint32_t insn, 177 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 178 { 179 *r0 = extract32(insn, 8, 4); 180 *r1 = extract32(insn, 12, 4); 181 *r2 = extract32(insn, 16, 4); 182 *r3 = extract32(insn, 20, 4); 183 } 184 185 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 186 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 187 { 188 *r0 = extract32(insn, 8, 4); 189 *r1 = extract32(insn, 12, 4); 190 *r2 = extract32(insn, 16, 4); 191 *r3 = extract32(insn, 20, 4); 192 *r4 = extract32(insn, 24, 4); 193 *c5 = extract32(insn, 28, 4); 194 } 195 196 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 197 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) 198 { 199 *r0 = extract32(insn, 8, 4); 200 *r1 = extract32(insn, 12, 4); 201 *r2 = extract32(insn, 16, 4); 202 *r3 = extract32(insn, 20, 4); 203 *r4 = extract32(insn, 24, 4); 204 *r5 = extract32(insn, 28, 4); 205 } 206 207 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 208 { 209 bool result = false; 210 int32_t i0 = u0; 211 int32_t i1 = u1; 212 switch (condition) { 213 case TCG_COND_EQ: 214 result = (u0 == u1); 215 break; 216 case TCG_COND_NE: 217 result = (u0 != u1); 218 break; 219 case TCG_COND_LT: 220 result = (i0 < i1); 221 break; 222 case TCG_COND_GE: 223 result = (i0 >= i1); 224 break; 225 case TCG_COND_LE: 226 result = (i0 <= i1); 227 break; 228 case TCG_COND_GT: 229 result = (i0 > i1); 230 break; 231 case TCG_COND_LTU: 232 result = (u0 < u1); 233 break; 234 case TCG_COND_GEU: 235 result = (u0 >= u1); 236 break; 237 case TCG_COND_LEU: 238 result = (u0 <= u1); 239 break; 240 case TCG_COND_GTU: 241 result = (u0 > u1); 242 break; 243 default: 244 g_assert_not_reached(); 245 } 246 return result; 247 } 248 249 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 250 { 251 bool result = false; 252 int64_t i0 = u0; 253 int64_t i1 = u1; 254 switch (condition) { 255 case TCG_COND_EQ: 256 result = (u0 == u1); 257 break; 258 case TCG_COND_NE: 259 result = (u0 != u1); 260 break; 261 case TCG_COND_LT: 262 result = (i0 < i1); 263 break; 264 case TCG_COND_GE: 265 result = (i0 >= i1); 266 break; 267 case TCG_COND_LE: 268 result = (i0 <= i1); 269 break; 270 case TCG_COND_GT: 271 result = (i0 > i1); 272 break; 273 case TCG_COND_LTU: 274 result = (u0 < u1); 275 break; 276 case TCG_COND_GEU: 277 result = (u0 >= u1); 278 break; 279 case TCG_COND_LEU: 280 result = (u0 <= u1); 281 break; 282 case TCG_COND_GTU: 283 result = (u0 > u1); 284 break; 285 default: 286 g_assert_not_reached(); 287 } 288 return result; 289 } 290 291 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, 292 TCGMemOpIdx oi, const void *tb_ptr) 293 { 294 MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); 295 uintptr_t ra = (uintptr_t)tb_ptr; 296 297 #ifdef CONFIG_SOFTMMU 298 switch (mop) { 299 case MO_UB: 300 return helper_ret_ldub_mmu(env, taddr, oi, ra); 301 case MO_SB: 302 return helper_ret_ldsb_mmu(env, taddr, oi, ra); 303 case MO_LEUW: 304 return helper_le_lduw_mmu(env, taddr, oi, ra); 305 case MO_LESW: 306 return helper_le_ldsw_mmu(env, taddr, oi, ra); 307 case MO_LEUL: 308 return helper_le_ldul_mmu(env, taddr, oi, ra); 309 case MO_LESL: 310 return helper_le_ldsl_mmu(env, taddr, oi, ra); 311 case MO_LEQ: 312 return helper_le_ldq_mmu(env, taddr, oi, ra); 313 case MO_BEUW: 314 return helper_be_lduw_mmu(env, taddr, oi, ra); 315 case MO_BESW: 316 return helper_be_ldsw_mmu(env, taddr, oi, ra); 317 case MO_BEUL: 318 return helper_be_ldul_mmu(env, taddr, oi, ra); 319 case MO_BESL: 320 return helper_be_ldsl_mmu(env, taddr, oi, ra); 321 case MO_BEQ: 322 return helper_be_ldq_mmu(env, taddr, oi, ra); 323 default: 324 g_assert_not_reached(); 325 } 326 #else 327 void *haddr = g2h(env_cpu(env), taddr); 328 uint64_t ret; 329 330 set_helper_retaddr(ra); 331 switch (mop) { 332 case MO_UB: 333 ret = ldub_p(haddr); 334 break; 335 case MO_SB: 336 ret = ldsb_p(haddr); 337 break; 338 case MO_LEUW: 339 ret = lduw_le_p(haddr); 340 break; 341 case MO_LESW: 342 ret = ldsw_le_p(haddr); 343 break; 344 case MO_LEUL: 345 ret = (uint32_t)ldl_le_p(haddr); 346 break; 347 case MO_LESL: 348 ret = (int32_t)ldl_le_p(haddr); 349 break; 350 case MO_LEQ: 351 ret = ldq_le_p(haddr); 352 break; 353 case MO_BEUW: 354 ret = lduw_be_p(haddr); 355 break; 356 case MO_BESW: 357 ret = ldsw_be_p(haddr); 358 break; 359 case MO_BEUL: 360 ret = (uint32_t)ldl_be_p(haddr); 361 break; 362 case MO_BESL: 363 ret = (int32_t)ldl_be_p(haddr); 364 break; 365 case MO_BEQ: 366 ret = ldq_be_p(haddr); 367 break; 368 default: 369 g_assert_not_reached(); 370 } 371 clear_helper_retaddr(); 372 return ret; 373 #endif 374 } 375 376 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, 377 TCGMemOpIdx oi, const void *tb_ptr) 378 { 379 MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); 380 uintptr_t ra = (uintptr_t)tb_ptr; 381 382 #ifdef CONFIG_SOFTMMU 383 switch (mop) { 384 case MO_UB: 385 helper_ret_stb_mmu(env, taddr, val, oi, ra); 386 break; 387 case MO_LEUW: 388 helper_le_stw_mmu(env, taddr, val, oi, ra); 389 break; 390 case MO_LEUL: 391 helper_le_stl_mmu(env, taddr, val, oi, ra); 392 break; 393 case MO_LEQ: 394 helper_le_stq_mmu(env, taddr, val, oi, ra); 395 break; 396 case MO_BEUW: 397 helper_be_stw_mmu(env, taddr, val, oi, ra); 398 break; 399 case MO_BEUL: 400 helper_be_stl_mmu(env, taddr, val, oi, ra); 401 break; 402 case MO_BEQ: 403 helper_be_stq_mmu(env, taddr, val, oi, ra); 404 break; 405 default: 406 g_assert_not_reached(); 407 } 408 #else 409 void *haddr = g2h(env_cpu(env), taddr); 410 411 set_helper_retaddr(ra); 412 switch (mop) { 413 case MO_UB: 414 stb_p(haddr, val); 415 break; 416 case MO_LEUW: 417 stw_le_p(haddr, val); 418 break; 419 case MO_LEUL: 420 stl_le_p(haddr, val); 421 break; 422 case MO_LEQ: 423 stq_le_p(haddr, val); 424 break; 425 case MO_BEUW: 426 stw_be_p(haddr, val); 427 break; 428 case MO_BEUL: 429 stl_be_p(haddr, val); 430 break; 431 case MO_BEQ: 432 stq_be_p(haddr, val); 433 break; 434 default: 435 g_assert_not_reached(); 436 } 437 clear_helper_retaddr(); 438 #endif 439 } 440 441 #if TCG_TARGET_REG_BITS == 64 442 # define CASE_32_64(x) \ 443 case glue(glue(INDEX_op_, x), _i64): \ 444 case glue(glue(INDEX_op_, x), _i32): 445 # define CASE_64(x) \ 446 case glue(glue(INDEX_op_, x), _i64): 447 #else 448 # define CASE_32_64(x) \ 449 case glue(glue(INDEX_op_, x), _i32): 450 # define CASE_64(x) 451 #endif 452 453 /* Interpret pseudo code in tb. */ 454 /* 455 * Disable CFI checks. 456 * One possible operation in the pseudo code is a call to binary code. 457 * Therefore, disable CFI checks in the interpreter function 458 */ 459 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 460 const void *v_tb_ptr) 461 { 462 const uint32_t *tb_ptr = v_tb_ptr; 463 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 464 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 465 / sizeof(uint64_t)]; 466 void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)]; 467 468 regs[TCG_AREG0] = (tcg_target_ulong)env; 469 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 470 /* Other call_slots entries initialized at first use (see below). */ 471 call_slots[0] = NULL; 472 tci_assert(tb_ptr); 473 474 for (;;) { 475 uint32_t insn; 476 TCGOpcode opc; 477 TCGReg r0, r1, r2, r3, r4, r5; 478 tcg_target_ulong t1; 479 TCGCond condition; 480 target_ulong taddr; 481 uint8_t pos, len; 482 uint32_t tmp32; 483 uint64_t tmp64; 484 uint64_t T1, T2; 485 TCGMemOpIdx oi; 486 int32_t ofs; 487 void *ptr; 488 489 insn = *tb_ptr++; 490 opc = extract32(insn, 0, 8); 491 492 switch (opc) { 493 case INDEX_op_call: 494 /* 495 * Set up the ffi_avalue array once, delayed until now 496 * because many TB's do not make any calls. In tcg_gen_callN, 497 * we arranged for every real argument to be "left-aligned" 498 * in each 64-bit slot. 499 */ 500 if (unlikely(call_slots[0] == NULL)) { 501 for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) { 502 call_slots[i] = &stack[i]; 503 } 504 } 505 506 tci_args_nl(insn, tb_ptr, &len, &ptr); 507 508 /* Helper functions may need to access the "return address" */ 509 tci_tb_ptr = (uintptr_t)tb_ptr; 510 511 { 512 void **pptr = ptr; 513 ffi_call(pptr[1], pptr[0], stack, call_slots); 514 } 515 516 /* Any result winds up "left-aligned" in the stack[0] slot. */ 517 switch (len) { 518 case 0: /* void */ 519 break; 520 case 1: /* uint32_t */ 521 /* 522 * Note that libffi has an odd special case in that it will 523 * always widen an integral result to ffi_arg. 524 */ 525 if (sizeof(ffi_arg) == 4) { 526 regs[TCG_REG_R0] = *(uint32_t *)stack; 527 break; 528 } 529 /* fall through */ 530 case 2: /* uint64_t */ 531 if (TCG_TARGET_REG_BITS == 32) { 532 tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]); 533 } else { 534 regs[TCG_REG_R0] = stack[0]; 535 } 536 break; 537 default: 538 g_assert_not_reached(); 539 } 540 break; 541 542 case INDEX_op_br: 543 tci_args_l(insn, tb_ptr, &ptr); 544 tb_ptr = ptr; 545 continue; 546 case INDEX_op_setcond_i32: 547 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 548 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 549 break; 550 case INDEX_op_movcond_i32: 551 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 552 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 553 regs[r0] = regs[tmp32 ? r3 : r4]; 554 break; 555 #if TCG_TARGET_REG_BITS == 32 556 case INDEX_op_setcond2_i32: 557 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 558 T1 = tci_uint64(regs[r2], regs[r1]); 559 T2 = tci_uint64(regs[r4], regs[r3]); 560 regs[r0] = tci_compare64(T1, T2, condition); 561 break; 562 #elif TCG_TARGET_REG_BITS == 64 563 case INDEX_op_setcond_i64: 564 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 565 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 566 break; 567 case INDEX_op_movcond_i64: 568 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 569 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 570 regs[r0] = regs[tmp32 ? r3 : r4]; 571 break; 572 #endif 573 CASE_32_64(mov) 574 tci_args_rr(insn, &r0, &r1); 575 regs[r0] = regs[r1]; 576 break; 577 case INDEX_op_tci_movi: 578 tci_args_ri(insn, &r0, &t1); 579 regs[r0] = t1; 580 break; 581 case INDEX_op_tci_movl: 582 tci_args_rl(insn, tb_ptr, &r0, &ptr); 583 regs[r0] = *(tcg_target_ulong *)ptr; 584 break; 585 586 /* Load/store operations (32 bit). */ 587 588 CASE_32_64(ld8u) 589 tci_args_rrs(insn, &r0, &r1, &ofs); 590 ptr = (void *)(regs[r1] + ofs); 591 regs[r0] = *(uint8_t *)ptr; 592 break; 593 CASE_32_64(ld8s) 594 tci_args_rrs(insn, &r0, &r1, &ofs); 595 ptr = (void *)(regs[r1] + ofs); 596 regs[r0] = *(int8_t *)ptr; 597 break; 598 CASE_32_64(ld16u) 599 tci_args_rrs(insn, &r0, &r1, &ofs); 600 ptr = (void *)(regs[r1] + ofs); 601 regs[r0] = *(uint16_t *)ptr; 602 break; 603 CASE_32_64(ld16s) 604 tci_args_rrs(insn, &r0, &r1, &ofs); 605 ptr = (void *)(regs[r1] + ofs); 606 regs[r0] = *(int16_t *)ptr; 607 break; 608 case INDEX_op_ld_i32: 609 CASE_64(ld32u) 610 tci_args_rrs(insn, &r0, &r1, &ofs); 611 ptr = (void *)(regs[r1] + ofs); 612 regs[r0] = *(uint32_t *)ptr; 613 break; 614 CASE_32_64(st8) 615 tci_args_rrs(insn, &r0, &r1, &ofs); 616 ptr = (void *)(regs[r1] + ofs); 617 *(uint8_t *)ptr = regs[r0]; 618 break; 619 CASE_32_64(st16) 620 tci_args_rrs(insn, &r0, &r1, &ofs); 621 ptr = (void *)(regs[r1] + ofs); 622 *(uint16_t *)ptr = regs[r0]; 623 break; 624 case INDEX_op_st_i32: 625 CASE_64(st32) 626 tci_args_rrs(insn, &r0, &r1, &ofs); 627 ptr = (void *)(regs[r1] + ofs); 628 *(uint32_t *)ptr = regs[r0]; 629 break; 630 631 /* Arithmetic operations (mixed 32/64 bit). */ 632 633 CASE_32_64(add) 634 tci_args_rrr(insn, &r0, &r1, &r2); 635 regs[r0] = regs[r1] + regs[r2]; 636 break; 637 CASE_32_64(sub) 638 tci_args_rrr(insn, &r0, &r1, &r2); 639 regs[r0] = regs[r1] - regs[r2]; 640 break; 641 CASE_32_64(mul) 642 tci_args_rrr(insn, &r0, &r1, &r2); 643 regs[r0] = regs[r1] * regs[r2]; 644 break; 645 CASE_32_64(and) 646 tci_args_rrr(insn, &r0, &r1, &r2); 647 regs[r0] = regs[r1] & regs[r2]; 648 break; 649 CASE_32_64(or) 650 tci_args_rrr(insn, &r0, &r1, &r2); 651 regs[r0] = regs[r1] | regs[r2]; 652 break; 653 CASE_32_64(xor) 654 tci_args_rrr(insn, &r0, &r1, &r2); 655 regs[r0] = regs[r1] ^ regs[r2]; 656 break; 657 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 658 CASE_32_64(andc) 659 tci_args_rrr(insn, &r0, &r1, &r2); 660 regs[r0] = regs[r1] & ~regs[r2]; 661 break; 662 #endif 663 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 664 CASE_32_64(orc) 665 tci_args_rrr(insn, &r0, &r1, &r2); 666 regs[r0] = regs[r1] | ~regs[r2]; 667 break; 668 #endif 669 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 670 CASE_32_64(eqv) 671 tci_args_rrr(insn, &r0, &r1, &r2); 672 regs[r0] = ~(regs[r1] ^ regs[r2]); 673 break; 674 #endif 675 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 676 CASE_32_64(nand) 677 tci_args_rrr(insn, &r0, &r1, &r2); 678 regs[r0] = ~(regs[r1] & regs[r2]); 679 break; 680 #endif 681 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 682 CASE_32_64(nor) 683 tci_args_rrr(insn, &r0, &r1, &r2); 684 regs[r0] = ~(regs[r1] | regs[r2]); 685 break; 686 #endif 687 688 /* Arithmetic operations (32 bit). */ 689 690 case INDEX_op_div_i32: 691 tci_args_rrr(insn, &r0, &r1, &r2); 692 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 693 break; 694 case INDEX_op_divu_i32: 695 tci_args_rrr(insn, &r0, &r1, &r2); 696 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 697 break; 698 case INDEX_op_rem_i32: 699 tci_args_rrr(insn, &r0, &r1, &r2); 700 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 701 break; 702 case INDEX_op_remu_i32: 703 tci_args_rrr(insn, &r0, &r1, &r2); 704 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 705 break; 706 #if TCG_TARGET_HAS_clz_i32 707 case INDEX_op_clz_i32: 708 tci_args_rrr(insn, &r0, &r1, &r2); 709 tmp32 = regs[r1]; 710 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 711 break; 712 #endif 713 #if TCG_TARGET_HAS_ctz_i32 714 case INDEX_op_ctz_i32: 715 tci_args_rrr(insn, &r0, &r1, &r2); 716 tmp32 = regs[r1]; 717 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 718 break; 719 #endif 720 #if TCG_TARGET_HAS_ctpop_i32 721 case INDEX_op_ctpop_i32: 722 tci_args_rr(insn, &r0, &r1); 723 regs[r0] = ctpop32(regs[r1]); 724 break; 725 #endif 726 727 /* Shift/rotate operations (32 bit). */ 728 729 case INDEX_op_shl_i32: 730 tci_args_rrr(insn, &r0, &r1, &r2); 731 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); 732 break; 733 case INDEX_op_shr_i32: 734 tci_args_rrr(insn, &r0, &r1, &r2); 735 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); 736 break; 737 case INDEX_op_sar_i32: 738 tci_args_rrr(insn, &r0, &r1, &r2); 739 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); 740 break; 741 #if TCG_TARGET_HAS_rot_i32 742 case INDEX_op_rotl_i32: 743 tci_args_rrr(insn, &r0, &r1, &r2); 744 regs[r0] = rol32(regs[r1], regs[r2] & 31); 745 break; 746 case INDEX_op_rotr_i32: 747 tci_args_rrr(insn, &r0, &r1, &r2); 748 regs[r0] = ror32(regs[r1], regs[r2] & 31); 749 break; 750 #endif 751 #if TCG_TARGET_HAS_deposit_i32 752 case INDEX_op_deposit_i32: 753 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 754 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); 755 break; 756 #endif 757 #if TCG_TARGET_HAS_extract_i32 758 case INDEX_op_extract_i32: 759 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 760 regs[r0] = extract32(regs[r1], pos, len); 761 break; 762 #endif 763 #if TCG_TARGET_HAS_sextract_i32 764 case INDEX_op_sextract_i32: 765 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 766 regs[r0] = sextract32(regs[r1], pos, len); 767 break; 768 #endif 769 case INDEX_op_brcond_i32: 770 tci_args_rl(insn, tb_ptr, &r0, &ptr); 771 if ((uint32_t)regs[r0]) { 772 tb_ptr = ptr; 773 } 774 break; 775 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 776 case INDEX_op_add2_i32: 777 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 778 T1 = tci_uint64(regs[r3], regs[r2]); 779 T2 = tci_uint64(regs[r5], regs[r4]); 780 tci_write_reg64(regs, r1, r0, T1 + T2); 781 break; 782 #endif 783 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 784 case INDEX_op_sub2_i32: 785 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 786 T1 = tci_uint64(regs[r3], regs[r2]); 787 T2 = tci_uint64(regs[r5], regs[r4]); 788 tci_write_reg64(regs, r1, r0, T1 - T2); 789 break; 790 #endif 791 #if TCG_TARGET_HAS_mulu2_i32 792 case INDEX_op_mulu2_i32: 793 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 794 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 795 tci_write_reg64(regs, r1, r0, tmp64); 796 break; 797 #endif 798 #if TCG_TARGET_HAS_muls2_i32 799 case INDEX_op_muls2_i32: 800 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 801 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 802 tci_write_reg64(regs, r1, r0, tmp64); 803 break; 804 #endif 805 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 806 CASE_32_64(ext8s) 807 tci_args_rr(insn, &r0, &r1); 808 regs[r0] = (int8_t)regs[r1]; 809 break; 810 #endif 811 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \ 812 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 813 CASE_32_64(ext16s) 814 tci_args_rr(insn, &r0, &r1); 815 regs[r0] = (int16_t)regs[r1]; 816 break; 817 #endif 818 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 819 CASE_32_64(ext8u) 820 tci_args_rr(insn, &r0, &r1); 821 regs[r0] = (uint8_t)regs[r1]; 822 break; 823 #endif 824 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 825 CASE_32_64(ext16u) 826 tci_args_rr(insn, &r0, &r1); 827 regs[r0] = (uint16_t)regs[r1]; 828 break; 829 #endif 830 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 831 CASE_32_64(bswap16) 832 tci_args_rr(insn, &r0, &r1); 833 regs[r0] = bswap16(regs[r1]); 834 break; 835 #endif 836 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 837 CASE_32_64(bswap32) 838 tci_args_rr(insn, &r0, &r1); 839 regs[r0] = bswap32(regs[r1]); 840 break; 841 #endif 842 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 843 CASE_32_64(not) 844 tci_args_rr(insn, &r0, &r1); 845 regs[r0] = ~regs[r1]; 846 break; 847 #endif 848 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 849 CASE_32_64(neg) 850 tci_args_rr(insn, &r0, &r1); 851 regs[r0] = -regs[r1]; 852 break; 853 #endif 854 #if TCG_TARGET_REG_BITS == 64 855 /* Load/store operations (64 bit). */ 856 857 case INDEX_op_ld32s_i64: 858 tci_args_rrs(insn, &r0, &r1, &ofs); 859 ptr = (void *)(regs[r1] + ofs); 860 regs[r0] = *(int32_t *)ptr; 861 break; 862 case INDEX_op_ld_i64: 863 tci_args_rrs(insn, &r0, &r1, &ofs); 864 ptr = (void *)(regs[r1] + ofs); 865 regs[r0] = *(uint64_t *)ptr; 866 break; 867 case INDEX_op_st_i64: 868 tci_args_rrs(insn, &r0, &r1, &ofs); 869 ptr = (void *)(regs[r1] + ofs); 870 *(uint64_t *)ptr = regs[r0]; 871 break; 872 873 /* Arithmetic operations (64 bit). */ 874 875 case INDEX_op_div_i64: 876 tci_args_rrr(insn, &r0, &r1, &r2); 877 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 878 break; 879 case INDEX_op_divu_i64: 880 tci_args_rrr(insn, &r0, &r1, &r2); 881 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 882 break; 883 case INDEX_op_rem_i64: 884 tci_args_rrr(insn, &r0, &r1, &r2); 885 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 886 break; 887 case INDEX_op_remu_i64: 888 tci_args_rrr(insn, &r0, &r1, &r2); 889 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 890 break; 891 #if TCG_TARGET_HAS_clz_i64 892 case INDEX_op_clz_i64: 893 tci_args_rrr(insn, &r0, &r1, &r2); 894 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 895 break; 896 #endif 897 #if TCG_TARGET_HAS_ctz_i64 898 case INDEX_op_ctz_i64: 899 tci_args_rrr(insn, &r0, &r1, &r2); 900 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 901 break; 902 #endif 903 #if TCG_TARGET_HAS_ctpop_i64 904 case INDEX_op_ctpop_i64: 905 tci_args_rr(insn, &r0, &r1); 906 regs[r0] = ctpop64(regs[r1]); 907 break; 908 #endif 909 #if TCG_TARGET_HAS_mulu2_i64 910 case INDEX_op_mulu2_i64: 911 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 912 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 913 break; 914 #endif 915 #if TCG_TARGET_HAS_muls2_i64 916 case INDEX_op_muls2_i64: 917 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 918 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 919 break; 920 #endif 921 #if TCG_TARGET_HAS_add2_i64 922 case INDEX_op_add2_i64: 923 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 924 T1 = regs[r2] + regs[r4]; 925 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); 926 regs[r0] = T1; 927 regs[r1] = T2; 928 break; 929 #endif 930 #if TCG_TARGET_HAS_add2_i64 931 case INDEX_op_sub2_i64: 932 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 933 T1 = regs[r2] - regs[r4]; 934 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); 935 regs[r0] = T1; 936 regs[r1] = T2; 937 break; 938 #endif 939 940 /* Shift/rotate operations (64 bit). */ 941 942 case INDEX_op_shl_i64: 943 tci_args_rrr(insn, &r0, &r1, &r2); 944 regs[r0] = regs[r1] << (regs[r2] & 63); 945 break; 946 case INDEX_op_shr_i64: 947 tci_args_rrr(insn, &r0, &r1, &r2); 948 regs[r0] = regs[r1] >> (regs[r2] & 63); 949 break; 950 case INDEX_op_sar_i64: 951 tci_args_rrr(insn, &r0, &r1, &r2); 952 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); 953 break; 954 #if TCG_TARGET_HAS_rot_i64 955 case INDEX_op_rotl_i64: 956 tci_args_rrr(insn, &r0, &r1, &r2); 957 regs[r0] = rol64(regs[r1], regs[r2] & 63); 958 break; 959 case INDEX_op_rotr_i64: 960 tci_args_rrr(insn, &r0, &r1, &r2); 961 regs[r0] = ror64(regs[r1], regs[r2] & 63); 962 break; 963 #endif 964 #if TCG_TARGET_HAS_deposit_i64 965 case INDEX_op_deposit_i64: 966 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 967 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); 968 break; 969 #endif 970 #if TCG_TARGET_HAS_extract_i64 971 case INDEX_op_extract_i64: 972 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 973 regs[r0] = extract64(regs[r1], pos, len); 974 break; 975 #endif 976 #if TCG_TARGET_HAS_sextract_i64 977 case INDEX_op_sextract_i64: 978 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 979 regs[r0] = sextract64(regs[r1], pos, len); 980 break; 981 #endif 982 case INDEX_op_brcond_i64: 983 tci_args_rl(insn, tb_ptr, &r0, &ptr); 984 if (regs[r0]) { 985 tb_ptr = ptr; 986 } 987 break; 988 case INDEX_op_ext32s_i64: 989 case INDEX_op_ext_i32_i64: 990 tci_args_rr(insn, &r0, &r1); 991 regs[r0] = (int32_t)regs[r1]; 992 break; 993 case INDEX_op_ext32u_i64: 994 case INDEX_op_extu_i32_i64: 995 tci_args_rr(insn, &r0, &r1); 996 regs[r0] = (uint32_t)regs[r1]; 997 break; 998 #if TCG_TARGET_HAS_bswap64_i64 999 case INDEX_op_bswap64_i64: 1000 tci_args_rr(insn, &r0, &r1); 1001 regs[r0] = bswap64(regs[r1]); 1002 break; 1003 #endif 1004 #endif /* TCG_TARGET_REG_BITS == 64 */ 1005 1006 /* QEMU specific operations. */ 1007 1008 case INDEX_op_exit_tb: 1009 tci_args_l(insn, tb_ptr, &ptr); 1010 return (uintptr_t)ptr; 1011 1012 case INDEX_op_goto_tb: 1013 tci_args_l(insn, tb_ptr, &ptr); 1014 tb_ptr = *(void **)ptr; 1015 break; 1016 1017 case INDEX_op_goto_ptr: 1018 tci_args_r(insn, &r0); 1019 ptr = (void *)regs[r0]; 1020 if (!ptr) { 1021 return 0; 1022 } 1023 tb_ptr = ptr; 1024 break; 1025 1026 case INDEX_op_qemu_ld_i32: 1027 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1028 tci_args_rrm(insn, &r0, &r1, &oi); 1029 taddr = regs[r1]; 1030 } else { 1031 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1032 taddr = tci_uint64(regs[r2], regs[r1]); 1033 } 1034 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1035 regs[r0] = tmp32; 1036 break; 1037 1038 case INDEX_op_qemu_ld_i64: 1039 if (TCG_TARGET_REG_BITS == 64) { 1040 tci_args_rrm(insn, &r0, &r1, &oi); 1041 taddr = regs[r1]; 1042 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1043 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1044 taddr = regs[r2]; 1045 } else { 1046 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1047 taddr = tci_uint64(regs[r3], regs[r2]); 1048 oi = regs[r4]; 1049 } 1050 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 1051 if (TCG_TARGET_REG_BITS == 32) { 1052 tci_write_reg64(regs, r1, r0, tmp64); 1053 } else { 1054 regs[r0] = tmp64; 1055 } 1056 break; 1057 1058 case INDEX_op_qemu_st_i32: 1059 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1060 tci_args_rrm(insn, &r0, &r1, &oi); 1061 taddr = regs[r1]; 1062 } else { 1063 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1064 taddr = tci_uint64(regs[r2], regs[r1]); 1065 } 1066 tmp32 = regs[r0]; 1067 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); 1068 break; 1069 1070 case INDEX_op_qemu_st_i64: 1071 if (TCG_TARGET_REG_BITS == 64) { 1072 tci_args_rrm(insn, &r0, &r1, &oi); 1073 taddr = regs[r1]; 1074 tmp64 = regs[r0]; 1075 } else { 1076 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 1077 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1078 taddr = regs[r2]; 1079 } else { 1080 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1081 taddr = tci_uint64(regs[r3], regs[r2]); 1082 oi = regs[r4]; 1083 } 1084 tmp64 = tci_uint64(regs[r1], regs[r0]); 1085 } 1086 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 1087 break; 1088 1089 case INDEX_op_mb: 1090 /* Ensure ordering for all kinds */ 1091 smp_mb(); 1092 break; 1093 default: 1094 g_assert_not_reached(); 1095 } 1096 } 1097 } 1098 1099 /* 1100 * Disassembler that matches the interpreter 1101 */ 1102 1103 static const char *str_r(TCGReg r) 1104 { 1105 static const char regs[TCG_TARGET_NB_REGS][4] = { 1106 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1107 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 1108 }; 1109 1110 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 1111 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 1112 1113 assert((unsigned)r < TCG_TARGET_NB_REGS); 1114 return regs[r]; 1115 } 1116 1117 static const char *str_c(TCGCond c) 1118 { 1119 static const char cond[16][8] = { 1120 [TCG_COND_NEVER] = "never", 1121 [TCG_COND_ALWAYS] = "always", 1122 [TCG_COND_EQ] = "eq", 1123 [TCG_COND_NE] = "ne", 1124 [TCG_COND_LT] = "lt", 1125 [TCG_COND_GE] = "ge", 1126 [TCG_COND_LE] = "le", 1127 [TCG_COND_GT] = "gt", 1128 [TCG_COND_LTU] = "ltu", 1129 [TCG_COND_GEU] = "geu", 1130 [TCG_COND_LEU] = "leu", 1131 [TCG_COND_GTU] = "gtu", 1132 }; 1133 1134 assert((unsigned)c < ARRAY_SIZE(cond)); 1135 assert(cond[c][0] != 0); 1136 return cond[c]; 1137 } 1138 1139 /* Disassemble TCI bytecode. */ 1140 int print_insn_tci(bfd_vma addr, disassemble_info *info) 1141 { 1142 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 1143 const TCGOpDef *def; 1144 const char *op_name; 1145 uint32_t insn; 1146 TCGOpcode op; 1147 TCGReg r0, r1, r2, r3, r4, r5; 1148 tcg_target_ulong i1; 1149 int32_t s2; 1150 TCGCond c; 1151 TCGMemOpIdx oi; 1152 uint8_t pos, len; 1153 void *ptr; 1154 1155 /* TCI is always the host, so we don't need to load indirect. */ 1156 insn = *tb_ptr++; 1157 1158 info->fprintf_func(info->stream, "%08x ", insn); 1159 1160 op = extract32(insn, 0, 8); 1161 def = &tcg_op_defs[op]; 1162 op_name = def->name; 1163 1164 switch (op) { 1165 case INDEX_op_br: 1166 case INDEX_op_exit_tb: 1167 case INDEX_op_goto_tb: 1168 tci_args_l(insn, tb_ptr, &ptr); 1169 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 1170 break; 1171 1172 case INDEX_op_goto_ptr: 1173 tci_args_r(insn, &r0); 1174 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 1175 break; 1176 1177 case INDEX_op_call: 1178 tci_args_nl(insn, tb_ptr, &len, &ptr); 1179 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 1180 break; 1181 1182 case INDEX_op_brcond_i32: 1183 case INDEX_op_brcond_i64: 1184 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1185 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 1186 op_name, str_r(r0), ptr); 1187 break; 1188 1189 case INDEX_op_setcond_i32: 1190 case INDEX_op_setcond_i64: 1191 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 1192 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1193 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 1194 break; 1195 1196 case INDEX_op_tci_movi: 1197 tci_args_ri(insn, &r0, &i1); 1198 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 1199 op_name, str_r(r0), i1); 1200 break; 1201 1202 case INDEX_op_tci_movl: 1203 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1204 info->fprintf_func(info->stream, "%-12s %s, %p", 1205 op_name, str_r(r0), ptr); 1206 break; 1207 1208 case INDEX_op_ld8u_i32: 1209 case INDEX_op_ld8u_i64: 1210 case INDEX_op_ld8s_i32: 1211 case INDEX_op_ld8s_i64: 1212 case INDEX_op_ld16u_i32: 1213 case INDEX_op_ld16u_i64: 1214 case INDEX_op_ld16s_i32: 1215 case INDEX_op_ld16s_i64: 1216 case INDEX_op_ld32u_i64: 1217 case INDEX_op_ld32s_i64: 1218 case INDEX_op_ld_i32: 1219 case INDEX_op_ld_i64: 1220 case INDEX_op_st8_i32: 1221 case INDEX_op_st8_i64: 1222 case INDEX_op_st16_i32: 1223 case INDEX_op_st16_i64: 1224 case INDEX_op_st32_i64: 1225 case INDEX_op_st_i32: 1226 case INDEX_op_st_i64: 1227 tci_args_rrs(insn, &r0, &r1, &s2); 1228 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 1229 op_name, str_r(r0), str_r(r1), s2); 1230 break; 1231 1232 case INDEX_op_mov_i32: 1233 case INDEX_op_mov_i64: 1234 case INDEX_op_ext8s_i32: 1235 case INDEX_op_ext8s_i64: 1236 case INDEX_op_ext8u_i32: 1237 case INDEX_op_ext8u_i64: 1238 case INDEX_op_ext16s_i32: 1239 case INDEX_op_ext16s_i64: 1240 case INDEX_op_ext16u_i32: 1241 case INDEX_op_ext32s_i64: 1242 case INDEX_op_ext32u_i64: 1243 case INDEX_op_ext_i32_i64: 1244 case INDEX_op_extu_i32_i64: 1245 case INDEX_op_bswap16_i32: 1246 case INDEX_op_bswap16_i64: 1247 case INDEX_op_bswap32_i32: 1248 case INDEX_op_bswap32_i64: 1249 case INDEX_op_bswap64_i64: 1250 case INDEX_op_not_i32: 1251 case INDEX_op_not_i64: 1252 case INDEX_op_neg_i32: 1253 case INDEX_op_neg_i64: 1254 case INDEX_op_ctpop_i32: 1255 case INDEX_op_ctpop_i64: 1256 tci_args_rr(insn, &r0, &r1); 1257 info->fprintf_func(info->stream, "%-12s %s, %s", 1258 op_name, str_r(r0), str_r(r1)); 1259 break; 1260 1261 case INDEX_op_add_i32: 1262 case INDEX_op_add_i64: 1263 case INDEX_op_sub_i32: 1264 case INDEX_op_sub_i64: 1265 case INDEX_op_mul_i32: 1266 case INDEX_op_mul_i64: 1267 case INDEX_op_and_i32: 1268 case INDEX_op_and_i64: 1269 case INDEX_op_or_i32: 1270 case INDEX_op_or_i64: 1271 case INDEX_op_xor_i32: 1272 case INDEX_op_xor_i64: 1273 case INDEX_op_andc_i32: 1274 case INDEX_op_andc_i64: 1275 case INDEX_op_orc_i32: 1276 case INDEX_op_orc_i64: 1277 case INDEX_op_eqv_i32: 1278 case INDEX_op_eqv_i64: 1279 case INDEX_op_nand_i32: 1280 case INDEX_op_nand_i64: 1281 case INDEX_op_nor_i32: 1282 case INDEX_op_nor_i64: 1283 case INDEX_op_div_i32: 1284 case INDEX_op_div_i64: 1285 case INDEX_op_rem_i32: 1286 case INDEX_op_rem_i64: 1287 case INDEX_op_divu_i32: 1288 case INDEX_op_divu_i64: 1289 case INDEX_op_remu_i32: 1290 case INDEX_op_remu_i64: 1291 case INDEX_op_shl_i32: 1292 case INDEX_op_shl_i64: 1293 case INDEX_op_shr_i32: 1294 case INDEX_op_shr_i64: 1295 case INDEX_op_sar_i32: 1296 case INDEX_op_sar_i64: 1297 case INDEX_op_rotl_i32: 1298 case INDEX_op_rotl_i64: 1299 case INDEX_op_rotr_i32: 1300 case INDEX_op_rotr_i64: 1301 case INDEX_op_clz_i32: 1302 case INDEX_op_clz_i64: 1303 case INDEX_op_ctz_i32: 1304 case INDEX_op_ctz_i64: 1305 tci_args_rrr(insn, &r0, &r1, &r2); 1306 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1307 op_name, str_r(r0), str_r(r1), str_r(r2)); 1308 break; 1309 1310 case INDEX_op_deposit_i32: 1311 case INDEX_op_deposit_i64: 1312 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1313 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1314 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1315 break; 1316 1317 case INDEX_op_extract_i32: 1318 case INDEX_op_extract_i64: 1319 case INDEX_op_sextract_i32: 1320 case INDEX_op_sextract_i64: 1321 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1322 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1323 op_name, str_r(r0), str_r(r1), pos, len); 1324 break; 1325 1326 case INDEX_op_movcond_i32: 1327 case INDEX_op_movcond_i64: 1328 case INDEX_op_setcond2_i32: 1329 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1330 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1331 op_name, str_r(r0), str_r(r1), str_r(r2), 1332 str_r(r3), str_r(r4), str_c(c)); 1333 break; 1334 1335 case INDEX_op_mulu2_i32: 1336 case INDEX_op_mulu2_i64: 1337 case INDEX_op_muls2_i32: 1338 case INDEX_op_muls2_i64: 1339 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1340 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1341 op_name, str_r(r0), str_r(r1), 1342 str_r(r2), str_r(r3)); 1343 break; 1344 1345 case INDEX_op_add2_i32: 1346 case INDEX_op_add2_i64: 1347 case INDEX_op_sub2_i32: 1348 case INDEX_op_sub2_i64: 1349 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 1350 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1351 op_name, str_r(r0), str_r(r1), str_r(r2), 1352 str_r(r3), str_r(r4), str_r(r5)); 1353 break; 1354 1355 case INDEX_op_qemu_ld_i64: 1356 case INDEX_op_qemu_st_i64: 1357 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1358 goto do_qemu_ldst; 1359 case INDEX_op_qemu_ld_i32: 1360 case INDEX_op_qemu_st_i32: 1361 len = 1; 1362 do_qemu_ldst: 1363 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); 1364 switch (len) { 1365 case 2: 1366 tci_args_rrm(insn, &r0, &r1, &oi); 1367 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1368 op_name, str_r(r0), str_r(r1), oi); 1369 break; 1370 case 3: 1371 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1372 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x", 1373 op_name, str_r(r0), str_r(r1), str_r(r2), oi); 1374 break; 1375 case 4: 1376 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1377 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", 1378 op_name, str_r(r0), str_r(r1), 1379 str_r(r2), str_r(r3), str_r(r4)); 1380 break; 1381 default: 1382 g_assert_not_reached(); 1383 } 1384 break; 1385 1386 case 0: 1387 /* tcg_out_nop_fill uses zeros */ 1388 if (insn == 0) { 1389 info->fprintf_func(info->stream, "align"); 1390 break; 1391 } 1392 /* fall through */ 1393 1394 default: 1395 info->fprintf_func(info->stream, "illegal opcode %d", op); 1396 break; 1397 } 1398 1399 return sizeof(insn); 1400 } 1401