1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/cpu_ldst.h" 22 #include "tcg/tcg-op.h" 23 #include "tcg/tcg-ldst.h" 24 #include <ffi.h> 25 26 27 /* 28 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 29 * Without assertions, the interpreter runs much faster. 30 */ 31 #if defined(CONFIG_DEBUG_TCG) 32 # define tci_assert(cond) assert(cond) 33 #else 34 # define tci_assert(cond) ((void)(cond)) 35 #endif 36 37 __thread uintptr_t tci_tb_ptr; 38 39 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 40 uint32_t low_index, uint64_t value) 41 { 42 regs[low_index] = (uint32_t)value; 43 regs[high_index] = value >> 32; 44 } 45 46 /* Create a 64 bit value from two 32 bit values. */ 47 static uint64_t tci_uint64(uint32_t high, uint32_t low) 48 { 49 return ((uint64_t)high << 32) + low; 50 } 51 52 /* 53 * Load sets of arguments all at once. The naming convention is: 54 * tci_args_<arguments> 55 * where arguments is a sequence of 56 * 57 * b = immediate (bit position) 58 * c = condition (TCGCond) 59 * i = immediate (uint32_t) 60 * I = immediate (tcg_target_ulong) 61 * l = label or pointer 62 * m = immediate (MemOpIdx) 63 * n = immediate (call return length) 64 * r = register 65 * s = signed ldst offset 66 */ 67 68 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 69 { 70 int diff = sextract32(insn, 12, 20); 71 *l0 = diff ? (void *)tb_ptr + diff : NULL; 72 } 73 74 static void tci_args_r(uint32_t insn, TCGReg *r0) 75 { 76 *r0 = extract32(insn, 8, 4); 77 } 78 79 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 80 uint8_t *n0, void **l1) 81 { 82 *n0 = extract32(insn, 8, 4); 83 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 84 } 85 86 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 87 TCGReg *r0, void **l1) 88 { 89 *r0 = extract32(insn, 8, 4); 90 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 91 } 92 93 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 94 { 95 *r0 = extract32(insn, 8, 4); 96 *r1 = extract32(insn, 12, 4); 97 } 98 99 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 100 { 101 *r0 = extract32(insn, 8, 4); 102 *i1 = sextract32(insn, 12, 20); 103 } 104 105 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 106 TCGReg *r1, MemOpIdx *m2) 107 { 108 *r0 = extract32(insn, 8, 4); 109 *r1 = extract32(insn, 12, 4); 110 *m2 = extract32(insn, 20, 12); 111 } 112 113 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 114 { 115 *r0 = extract32(insn, 8, 4); 116 *r1 = extract32(insn, 12, 4); 117 *r2 = extract32(insn, 16, 4); 118 } 119 120 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 121 { 122 *r0 = extract32(insn, 8, 4); 123 *r1 = extract32(insn, 12, 4); 124 *i2 = sextract32(insn, 16, 16); 125 } 126 127 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 128 uint8_t *i2, uint8_t *i3) 129 { 130 *r0 = extract32(insn, 8, 4); 131 *r1 = extract32(insn, 12, 4); 132 *i2 = extract32(insn, 16, 6); 133 *i3 = extract32(insn, 22, 6); 134 } 135 136 static void tci_args_rrrc(uint32_t insn, 137 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 138 { 139 *r0 = extract32(insn, 8, 4); 140 *r1 = extract32(insn, 12, 4); 141 *r2 = extract32(insn, 16, 4); 142 *c3 = extract32(insn, 20, 4); 143 } 144 145 static void tci_args_rrrm(uint32_t insn, 146 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3) 147 { 148 *r0 = extract32(insn, 8, 4); 149 *r1 = extract32(insn, 12, 4); 150 *r2 = extract32(insn, 16, 4); 151 *m3 = extract32(insn, 20, 12); 152 } 153 154 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 155 TCGReg *r2, uint8_t *i3, uint8_t *i4) 156 { 157 *r0 = extract32(insn, 8, 4); 158 *r1 = extract32(insn, 12, 4); 159 *r2 = extract32(insn, 16, 4); 160 *i3 = extract32(insn, 20, 6); 161 *i4 = extract32(insn, 26, 6); 162 } 163 164 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 165 TCGReg *r2, TCGReg *r3, TCGReg *r4) 166 { 167 *r0 = extract32(insn, 8, 4); 168 *r1 = extract32(insn, 12, 4); 169 *r2 = extract32(insn, 16, 4); 170 *r3 = extract32(insn, 20, 4); 171 *r4 = extract32(insn, 24, 4); 172 } 173 174 static void tci_args_rrrr(uint32_t insn, 175 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 176 { 177 *r0 = extract32(insn, 8, 4); 178 *r1 = extract32(insn, 12, 4); 179 *r2 = extract32(insn, 16, 4); 180 *r3 = extract32(insn, 20, 4); 181 } 182 183 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 184 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 185 { 186 *r0 = extract32(insn, 8, 4); 187 *r1 = extract32(insn, 12, 4); 188 *r2 = extract32(insn, 16, 4); 189 *r3 = extract32(insn, 20, 4); 190 *r4 = extract32(insn, 24, 4); 191 *c5 = extract32(insn, 28, 4); 192 } 193 194 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 195 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) 196 { 197 *r0 = extract32(insn, 8, 4); 198 *r1 = extract32(insn, 12, 4); 199 *r2 = extract32(insn, 16, 4); 200 *r3 = extract32(insn, 20, 4); 201 *r4 = extract32(insn, 24, 4); 202 *r5 = extract32(insn, 28, 4); 203 } 204 205 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 206 { 207 bool result = false; 208 int32_t i0 = u0; 209 int32_t i1 = u1; 210 switch (condition) { 211 case TCG_COND_EQ: 212 result = (u0 == u1); 213 break; 214 case TCG_COND_NE: 215 result = (u0 != u1); 216 break; 217 case TCG_COND_LT: 218 result = (i0 < i1); 219 break; 220 case TCG_COND_GE: 221 result = (i0 >= i1); 222 break; 223 case TCG_COND_LE: 224 result = (i0 <= i1); 225 break; 226 case TCG_COND_GT: 227 result = (i0 > i1); 228 break; 229 case TCG_COND_LTU: 230 result = (u0 < u1); 231 break; 232 case TCG_COND_GEU: 233 result = (u0 >= u1); 234 break; 235 case TCG_COND_LEU: 236 result = (u0 <= u1); 237 break; 238 case TCG_COND_GTU: 239 result = (u0 > u1); 240 break; 241 default: 242 g_assert_not_reached(); 243 } 244 return result; 245 } 246 247 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 248 { 249 bool result = false; 250 int64_t i0 = u0; 251 int64_t i1 = u1; 252 switch (condition) { 253 case TCG_COND_EQ: 254 result = (u0 == u1); 255 break; 256 case TCG_COND_NE: 257 result = (u0 != u1); 258 break; 259 case TCG_COND_LT: 260 result = (i0 < i1); 261 break; 262 case TCG_COND_GE: 263 result = (i0 >= i1); 264 break; 265 case TCG_COND_LE: 266 result = (i0 <= i1); 267 break; 268 case TCG_COND_GT: 269 result = (i0 > i1); 270 break; 271 case TCG_COND_LTU: 272 result = (u0 < u1); 273 break; 274 case TCG_COND_GEU: 275 result = (u0 >= u1); 276 break; 277 case TCG_COND_LEU: 278 result = (u0 <= u1); 279 break; 280 case TCG_COND_GTU: 281 result = (u0 > u1); 282 break; 283 default: 284 g_assert_not_reached(); 285 } 286 return result; 287 } 288 289 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, 290 MemOpIdx oi, const void *tb_ptr) 291 { 292 MemOp mop = get_memop(oi); 293 uintptr_t ra = (uintptr_t)tb_ptr; 294 295 switch (mop & MO_SSIZE) { 296 case MO_UB: 297 return helper_ldub_mmu(env, taddr, oi, ra); 298 case MO_SB: 299 return helper_ldsb_mmu(env, taddr, oi, ra); 300 case MO_UW: 301 return helper_lduw_mmu(env, taddr, oi, ra); 302 case MO_SW: 303 return helper_ldsw_mmu(env, taddr, oi, ra); 304 case MO_UL: 305 return helper_ldul_mmu(env, taddr, oi, ra); 306 case MO_SL: 307 return helper_ldsl_mmu(env, taddr, oi, ra); 308 case MO_UQ: 309 return helper_ldq_mmu(env, taddr, oi, ra); 310 default: 311 g_assert_not_reached(); 312 } 313 } 314 315 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, 316 MemOpIdx oi, const void *tb_ptr) 317 { 318 MemOp mop = get_memop(oi); 319 uintptr_t ra = (uintptr_t)tb_ptr; 320 321 switch (mop & MO_SIZE) { 322 case MO_UB: 323 helper_stb_mmu(env, taddr, val, oi, ra); 324 break; 325 case MO_UW: 326 helper_stw_mmu(env, taddr, val, oi, ra); 327 break; 328 case MO_UL: 329 helper_stl_mmu(env, taddr, val, oi, ra); 330 break; 331 case MO_UQ: 332 helper_stq_mmu(env, taddr, val, oi, ra); 333 break; 334 default: 335 g_assert_not_reached(); 336 } 337 } 338 339 #if TCG_TARGET_REG_BITS == 64 340 # define CASE_32_64(x) \ 341 case glue(glue(INDEX_op_, x), _i64): \ 342 case glue(glue(INDEX_op_, x), _i32): 343 # define CASE_64(x) \ 344 case glue(glue(INDEX_op_, x), _i64): 345 #else 346 # define CASE_32_64(x) \ 347 case glue(glue(INDEX_op_, x), _i32): 348 # define CASE_64(x) 349 #endif 350 351 /* Interpret pseudo code in tb. */ 352 /* 353 * Disable CFI checks. 354 * One possible operation in the pseudo code is a call to binary code. 355 * Therefore, disable CFI checks in the interpreter function 356 */ 357 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 358 const void *v_tb_ptr) 359 { 360 const uint32_t *tb_ptr = v_tb_ptr; 361 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 362 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 363 / sizeof(uint64_t)]; 364 365 regs[TCG_AREG0] = (tcg_target_ulong)env; 366 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 367 tci_assert(tb_ptr); 368 369 for (;;) { 370 uint32_t insn; 371 TCGOpcode opc; 372 TCGReg r0, r1, r2, r3, r4, r5; 373 tcg_target_ulong t1; 374 TCGCond condition; 375 target_ulong taddr; 376 uint8_t pos, len; 377 uint32_t tmp32; 378 uint64_t tmp64; 379 uint64_t T1, T2; 380 MemOpIdx oi; 381 int32_t ofs; 382 void *ptr; 383 384 insn = *tb_ptr++; 385 opc = extract32(insn, 0, 8); 386 387 switch (opc) { 388 case INDEX_op_call: 389 { 390 void *call_slots[MAX_CALL_IARGS]; 391 ffi_cif *cif; 392 void *func; 393 unsigned i, s, n; 394 395 tci_args_nl(insn, tb_ptr, &len, &ptr); 396 func = ((void **)ptr)[0]; 397 cif = ((void **)ptr)[1]; 398 399 n = cif->nargs; 400 for (i = s = 0; i < n; ++i) { 401 ffi_type *t = cif->arg_types[i]; 402 call_slots[i] = &stack[s]; 403 s += DIV_ROUND_UP(t->size, 8); 404 } 405 406 /* Helper functions may need to access the "return address" */ 407 tci_tb_ptr = (uintptr_t)tb_ptr; 408 ffi_call(cif, func, stack, call_slots); 409 } 410 411 switch (len) { 412 case 0: /* void */ 413 break; 414 case 1: /* uint32_t */ 415 /* 416 * The result winds up "left-aligned" in the stack[0] slot. 417 * Note that libffi has an odd special case in that it will 418 * always widen an integral result to ffi_arg. 419 */ 420 if (sizeof(ffi_arg) == 8) { 421 regs[TCG_REG_R0] = (uint32_t)stack[0]; 422 } else { 423 regs[TCG_REG_R0] = *(uint32_t *)stack; 424 } 425 break; 426 case 2: /* uint64_t */ 427 /* 428 * For TCG_TARGET_REG_BITS == 32, the register pair 429 * must stay in host memory order. 430 */ 431 memcpy(®s[TCG_REG_R0], stack, 8); 432 break; 433 case 3: /* Int128 */ 434 memcpy(®s[TCG_REG_R0], stack, 16); 435 break; 436 default: 437 g_assert_not_reached(); 438 } 439 break; 440 441 case INDEX_op_br: 442 tci_args_l(insn, tb_ptr, &ptr); 443 tb_ptr = ptr; 444 continue; 445 case INDEX_op_setcond_i32: 446 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 447 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 448 break; 449 case INDEX_op_movcond_i32: 450 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 451 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 452 regs[r0] = regs[tmp32 ? r3 : r4]; 453 break; 454 #if TCG_TARGET_REG_BITS == 32 455 case INDEX_op_setcond2_i32: 456 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 457 T1 = tci_uint64(regs[r2], regs[r1]); 458 T2 = tci_uint64(regs[r4], regs[r3]); 459 regs[r0] = tci_compare64(T1, T2, condition); 460 break; 461 #elif TCG_TARGET_REG_BITS == 64 462 case INDEX_op_setcond_i64: 463 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 464 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 465 break; 466 case INDEX_op_movcond_i64: 467 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 468 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 469 regs[r0] = regs[tmp32 ? r3 : r4]; 470 break; 471 #endif 472 CASE_32_64(mov) 473 tci_args_rr(insn, &r0, &r1); 474 regs[r0] = regs[r1]; 475 break; 476 case INDEX_op_tci_movi: 477 tci_args_ri(insn, &r0, &t1); 478 regs[r0] = t1; 479 break; 480 case INDEX_op_tci_movl: 481 tci_args_rl(insn, tb_ptr, &r0, &ptr); 482 regs[r0] = *(tcg_target_ulong *)ptr; 483 break; 484 485 /* Load/store operations (32 bit). */ 486 487 CASE_32_64(ld8u) 488 tci_args_rrs(insn, &r0, &r1, &ofs); 489 ptr = (void *)(regs[r1] + ofs); 490 regs[r0] = *(uint8_t *)ptr; 491 break; 492 CASE_32_64(ld8s) 493 tci_args_rrs(insn, &r0, &r1, &ofs); 494 ptr = (void *)(regs[r1] + ofs); 495 regs[r0] = *(int8_t *)ptr; 496 break; 497 CASE_32_64(ld16u) 498 tci_args_rrs(insn, &r0, &r1, &ofs); 499 ptr = (void *)(regs[r1] + ofs); 500 regs[r0] = *(uint16_t *)ptr; 501 break; 502 CASE_32_64(ld16s) 503 tci_args_rrs(insn, &r0, &r1, &ofs); 504 ptr = (void *)(regs[r1] + ofs); 505 regs[r0] = *(int16_t *)ptr; 506 break; 507 case INDEX_op_ld_i32: 508 CASE_64(ld32u) 509 tci_args_rrs(insn, &r0, &r1, &ofs); 510 ptr = (void *)(regs[r1] + ofs); 511 regs[r0] = *(uint32_t *)ptr; 512 break; 513 CASE_32_64(st8) 514 tci_args_rrs(insn, &r0, &r1, &ofs); 515 ptr = (void *)(regs[r1] + ofs); 516 *(uint8_t *)ptr = regs[r0]; 517 break; 518 CASE_32_64(st16) 519 tci_args_rrs(insn, &r0, &r1, &ofs); 520 ptr = (void *)(regs[r1] + ofs); 521 *(uint16_t *)ptr = regs[r0]; 522 break; 523 case INDEX_op_st_i32: 524 CASE_64(st32) 525 tci_args_rrs(insn, &r0, &r1, &ofs); 526 ptr = (void *)(regs[r1] + ofs); 527 *(uint32_t *)ptr = regs[r0]; 528 break; 529 530 /* Arithmetic operations (mixed 32/64 bit). */ 531 532 CASE_32_64(add) 533 tci_args_rrr(insn, &r0, &r1, &r2); 534 regs[r0] = regs[r1] + regs[r2]; 535 break; 536 CASE_32_64(sub) 537 tci_args_rrr(insn, &r0, &r1, &r2); 538 regs[r0] = regs[r1] - regs[r2]; 539 break; 540 CASE_32_64(mul) 541 tci_args_rrr(insn, &r0, &r1, &r2); 542 regs[r0] = regs[r1] * regs[r2]; 543 break; 544 CASE_32_64(and) 545 tci_args_rrr(insn, &r0, &r1, &r2); 546 regs[r0] = regs[r1] & regs[r2]; 547 break; 548 CASE_32_64(or) 549 tci_args_rrr(insn, &r0, &r1, &r2); 550 regs[r0] = regs[r1] | regs[r2]; 551 break; 552 CASE_32_64(xor) 553 tci_args_rrr(insn, &r0, &r1, &r2); 554 regs[r0] = regs[r1] ^ regs[r2]; 555 break; 556 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 557 CASE_32_64(andc) 558 tci_args_rrr(insn, &r0, &r1, &r2); 559 regs[r0] = regs[r1] & ~regs[r2]; 560 break; 561 #endif 562 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 563 CASE_32_64(orc) 564 tci_args_rrr(insn, &r0, &r1, &r2); 565 regs[r0] = regs[r1] | ~regs[r2]; 566 break; 567 #endif 568 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 569 CASE_32_64(eqv) 570 tci_args_rrr(insn, &r0, &r1, &r2); 571 regs[r0] = ~(regs[r1] ^ regs[r2]); 572 break; 573 #endif 574 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 575 CASE_32_64(nand) 576 tci_args_rrr(insn, &r0, &r1, &r2); 577 regs[r0] = ~(regs[r1] & regs[r2]); 578 break; 579 #endif 580 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 581 CASE_32_64(nor) 582 tci_args_rrr(insn, &r0, &r1, &r2); 583 regs[r0] = ~(regs[r1] | regs[r2]); 584 break; 585 #endif 586 587 /* Arithmetic operations (32 bit). */ 588 589 case INDEX_op_div_i32: 590 tci_args_rrr(insn, &r0, &r1, &r2); 591 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 592 break; 593 case INDEX_op_divu_i32: 594 tci_args_rrr(insn, &r0, &r1, &r2); 595 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 596 break; 597 case INDEX_op_rem_i32: 598 tci_args_rrr(insn, &r0, &r1, &r2); 599 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 600 break; 601 case INDEX_op_remu_i32: 602 tci_args_rrr(insn, &r0, &r1, &r2); 603 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 604 break; 605 #if TCG_TARGET_HAS_clz_i32 606 case INDEX_op_clz_i32: 607 tci_args_rrr(insn, &r0, &r1, &r2); 608 tmp32 = regs[r1]; 609 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 610 break; 611 #endif 612 #if TCG_TARGET_HAS_ctz_i32 613 case INDEX_op_ctz_i32: 614 tci_args_rrr(insn, &r0, &r1, &r2); 615 tmp32 = regs[r1]; 616 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 617 break; 618 #endif 619 #if TCG_TARGET_HAS_ctpop_i32 620 case INDEX_op_ctpop_i32: 621 tci_args_rr(insn, &r0, &r1); 622 regs[r0] = ctpop32(regs[r1]); 623 break; 624 #endif 625 626 /* Shift/rotate operations (32 bit). */ 627 628 case INDEX_op_shl_i32: 629 tci_args_rrr(insn, &r0, &r1, &r2); 630 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); 631 break; 632 case INDEX_op_shr_i32: 633 tci_args_rrr(insn, &r0, &r1, &r2); 634 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); 635 break; 636 case INDEX_op_sar_i32: 637 tci_args_rrr(insn, &r0, &r1, &r2); 638 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); 639 break; 640 #if TCG_TARGET_HAS_rot_i32 641 case INDEX_op_rotl_i32: 642 tci_args_rrr(insn, &r0, &r1, &r2); 643 regs[r0] = rol32(regs[r1], regs[r2] & 31); 644 break; 645 case INDEX_op_rotr_i32: 646 tci_args_rrr(insn, &r0, &r1, &r2); 647 regs[r0] = ror32(regs[r1], regs[r2] & 31); 648 break; 649 #endif 650 #if TCG_TARGET_HAS_deposit_i32 651 case INDEX_op_deposit_i32: 652 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 653 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); 654 break; 655 #endif 656 #if TCG_TARGET_HAS_extract_i32 657 case INDEX_op_extract_i32: 658 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 659 regs[r0] = extract32(regs[r1], pos, len); 660 break; 661 #endif 662 #if TCG_TARGET_HAS_sextract_i32 663 case INDEX_op_sextract_i32: 664 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 665 regs[r0] = sextract32(regs[r1], pos, len); 666 break; 667 #endif 668 case INDEX_op_brcond_i32: 669 tci_args_rl(insn, tb_ptr, &r0, &ptr); 670 if ((uint32_t)regs[r0]) { 671 tb_ptr = ptr; 672 } 673 break; 674 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 675 case INDEX_op_add2_i32: 676 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 677 T1 = tci_uint64(regs[r3], regs[r2]); 678 T2 = tci_uint64(regs[r5], regs[r4]); 679 tci_write_reg64(regs, r1, r0, T1 + T2); 680 break; 681 #endif 682 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 683 case INDEX_op_sub2_i32: 684 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 685 T1 = tci_uint64(regs[r3], regs[r2]); 686 T2 = tci_uint64(regs[r5], regs[r4]); 687 tci_write_reg64(regs, r1, r0, T1 - T2); 688 break; 689 #endif 690 #if TCG_TARGET_HAS_mulu2_i32 691 case INDEX_op_mulu2_i32: 692 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 693 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 694 tci_write_reg64(regs, r1, r0, tmp64); 695 break; 696 #endif 697 #if TCG_TARGET_HAS_muls2_i32 698 case INDEX_op_muls2_i32: 699 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 700 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 701 tci_write_reg64(regs, r1, r0, tmp64); 702 break; 703 #endif 704 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 705 CASE_32_64(ext8s) 706 tci_args_rr(insn, &r0, &r1); 707 regs[r0] = (int8_t)regs[r1]; 708 break; 709 #endif 710 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \ 711 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 712 CASE_32_64(ext16s) 713 tci_args_rr(insn, &r0, &r1); 714 regs[r0] = (int16_t)regs[r1]; 715 break; 716 #endif 717 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 718 CASE_32_64(ext8u) 719 tci_args_rr(insn, &r0, &r1); 720 regs[r0] = (uint8_t)regs[r1]; 721 break; 722 #endif 723 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 724 CASE_32_64(ext16u) 725 tci_args_rr(insn, &r0, &r1); 726 regs[r0] = (uint16_t)regs[r1]; 727 break; 728 #endif 729 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 730 CASE_32_64(bswap16) 731 tci_args_rr(insn, &r0, &r1); 732 regs[r0] = bswap16(regs[r1]); 733 break; 734 #endif 735 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 736 CASE_32_64(bswap32) 737 tci_args_rr(insn, &r0, &r1); 738 regs[r0] = bswap32(regs[r1]); 739 break; 740 #endif 741 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 742 CASE_32_64(not) 743 tci_args_rr(insn, &r0, &r1); 744 regs[r0] = ~regs[r1]; 745 break; 746 #endif 747 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64 748 CASE_32_64(neg) 749 tci_args_rr(insn, &r0, &r1); 750 regs[r0] = -regs[r1]; 751 break; 752 #endif 753 #if TCG_TARGET_REG_BITS == 64 754 /* Load/store operations (64 bit). */ 755 756 case INDEX_op_ld32s_i64: 757 tci_args_rrs(insn, &r0, &r1, &ofs); 758 ptr = (void *)(regs[r1] + ofs); 759 regs[r0] = *(int32_t *)ptr; 760 break; 761 case INDEX_op_ld_i64: 762 tci_args_rrs(insn, &r0, &r1, &ofs); 763 ptr = (void *)(regs[r1] + ofs); 764 regs[r0] = *(uint64_t *)ptr; 765 break; 766 case INDEX_op_st_i64: 767 tci_args_rrs(insn, &r0, &r1, &ofs); 768 ptr = (void *)(regs[r1] + ofs); 769 *(uint64_t *)ptr = regs[r0]; 770 break; 771 772 /* Arithmetic operations (64 bit). */ 773 774 case INDEX_op_div_i64: 775 tci_args_rrr(insn, &r0, &r1, &r2); 776 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 777 break; 778 case INDEX_op_divu_i64: 779 tci_args_rrr(insn, &r0, &r1, &r2); 780 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 781 break; 782 case INDEX_op_rem_i64: 783 tci_args_rrr(insn, &r0, &r1, &r2); 784 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 785 break; 786 case INDEX_op_remu_i64: 787 tci_args_rrr(insn, &r0, &r1, &r2); 788 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 789 break; 790 #if TCG_TARGET_HAS_clz_i64 791 case INDEX_op_clz_i64: 792 tci_args_rrr(insn, &r0, &r1, &r2); 793 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 794 break; 795 #endif 796 #if TCG_TARGET_HAS_ctz_i64 797 case INDEX_op_ctz_i64: 798 tci_args_rrr(insn, &r0, &r1, &r2); 799 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 800 break; 801 #endif 802 #if TCG_TARGET_HAS_ctpop_i64 803 case INDEX_op_ctpop_i64: 804 tci_args_rr(insn, &r0, &r1); 805 regs[r0] = ctpop64(regs[r1]); 806 break; 807 #endif 808 #if TCG_TARGET_HAS_mulu2_i64 809 case INDEX_op_mulu2_i64: 810 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 811 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 812 break; 813 #endif 814 #if TCG_TARGET_HAS_muls2_i64 815 case INDEX_op_muls2_i64: 816 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 817 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 818 break; 819 #endif 820 #if TCG_TARGET_HAS_add2_i64 821 case INDEX_op_add2_i64: 822 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 823 T1 = regs[r2] + regs[r4]; 824 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); 825 regs[r0] = T1; 826 regs[r1] = T2; 827 break; 828 #endif 829 #if TCG_TARGET_HAS_add2_i64 830 case INDEX_op_sub2_i64: 831 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 832 T1 = regs[r2] - regs[r4]; 833 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); 834 regs[r0] = T1; 835 regs[r1] = T2; 836 break; 837 #endif 838 839 /* Shift/rotate operations (64 bit). */ 840 841 case INDEX_op_shl_i64: 842 tci_args_rrr(insn, &r0, &r1, &r2); 843 regs[r0] = regs[r1] << (regs[r2] & 63); 844 break; 845 case INDEX_op_shr_i64: 846 tci_args_rrr(insn, &r0, &r1, &r2); 847 regs[r0] = regs[r1] >> (regs[r2] & 63); 848 break; 849 case INDEX_op_sar_i64: 850 tci_args_rrr(insn, &r0, &r1, &r2); 851 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); 852 break; 853 #if TCG_TARGET_HAS_rot_i64 854 case INDEX_op_rotl_i64: 855 tci_args_rrr(insn, &r0, &r1, &r2); 856 regs[r0] = rol64(regs[r1], regs[r2] & 63); 857 break; 858 case INDEX_op_rotr_i64: 859 tci_args_rrr(insn, &r0, &r1, &r2); 860 regs[r0] = ror64(regs[r1], regs[r2] & 63); 861 break; 862 #endif 863 #if TCG_TARGET_HAS_deposit_i64 864 case INDEX_op_deposit_i64: 865 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 866 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); 867 break; 868 #endif 869 #if TCG_TARGET_HAS_extract_i64 870 case INDEX_op_extract_i64: 871 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 872 regs[r0] = extract64(regs[r1], pos, len); 873 break; 874 #endif 875 #if TCG_TARGET_HAS_sextract_i64 876 case INDEX_op_sextract_i64: 877 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 878 regs[r0] = sextract64(regs[r1], pos, len); 879 break; 880 #endif 881 case INDEX_op_brcond_i64: 882 tci_args_rl(insn, tb_ptr, &r0, &ptr); 883 if (regs[r0]) { 884 tb_ptr = ptr; 885 } 886 break; 887 case INDEX_op_ext32s_i64: 888 case INDEX_op_ext_i32_i64: 889 tci_args_rr(insn, &r0, &r1); 890 regs[r0] = (int32_t)regs[r1]; 891 break; 892 case INDEX_op_ext32u_i64: 893 case INDEX_op_extu_i32_i64: 894 tci_args_rr(insn, &r0, &r1); 895 regs[r0] = (uint32_t)regs[r1]; 896 break; 897 #if TCG_TARGET_HAS_bswap64_i64 898 case INDEX_op_bswap64_i64: 899 tci_args_rr(insn, &r0, &r1); 900 regs[r0] = bswap64(regs[r1]); 901 break; 902 #endif 903 #endif /* TCG_TARGET_REG_BITS == 64 */ 904 905 /* QEMU specific operations. */ 906 907 case INDEX_op_exit_tb: 908 tci_args_l(insn, tb_ptr, &ptr); 909 return (uintptr_t)ptr; 910 911 case INDEX_op_goto_tb: 912 tci_args_l(insn, tb_ptr, &ptr); 913 tb_ptr = *(void **)ptr; 914 break; 915 916 case INDEX_op_goto_ptr: 917 tci_args_r(insn, &r0); 918 ptr = (void *)regs[r0]; 919 if (!ptr) { 920 return 0; 921 } 922 tb_ptr = ptr; 923 break; 924 925 case INDEX_op_qemu_ld_i32: 926 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 927 tci_args_rrm(insn, &r0, &r1, &oi); 928 taddr = regs[r1]; 929 } else { 930 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 931 taddr = tci_uint64(regs[r2], regs[r1]); 932 } 933 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr); 934 regs[r0] = tmp32; 935 break; 936 937 case INDEX_op_qemu_ld_i64: 938 if (TCG_TARGET_REG_BITS == 64) { 939 tci_args_rrm(insn, &r0, &r1, &oi); 940 taddr = regs[r1]; 941 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 942 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 943 taddr = regs[r2]; 944 } else { 945 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 946 taddr = tci_uint64(regs[r3], regs[r2]); 947 oi = regs[r4]; 948 } 949 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 950 if (TCG_TARGET_REG_BITS == 32) { 951 tci_write_reg64(regs, r1, r0, tmp64); 952 } else { 953 regs[r0] = tmp64; 954 } 955 break; 956 957 case INDEX_op_qemu_st_i32: 958 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 959 tci_args_rrm(insn, &r0, &r1, &oi); 960 taddr = regs[r1]; 961 } else { 962 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 963 taddr = tci_uint64(regs[r2], regs[r1]); 964 } 965 tmp32 = regs[r0]; 966 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr); 967 break; 968 969 case INDEX_op_qemu_st_i64: 970 if (TCG_TARGET_REG_BITS == 64) { 971 tci_args_rrm(insn, &r0, &r1, &oi); 972 taddr = regs[r1]; 973 tmp64 = regs[r0]; 974 } else { 975 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { 976 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 977 taddr = regs[r2]; 978 } else { 979 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 980 taddr = tci_uint64(regs[r3], regs[r2]); 981 oi = regs[r4]; 982 } 983 tmp64 = tci_uint64(regs[r1], regs[r0]); 984 } 985 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 986 break; 987 988 case INDEX_op_mb: 989 /* Ensure ordering for all kinds */ 990 smp_mb(); 991 break; 992 default: 993 g_assert_not_reached(); 994 } 995 } 996 } 997 998 /* 999 * Disassembler that matches the interpreter 1000 */ 1001 1002 static const char *str_r(TCGReg r) 1003 { 1004 static const char regs[TCG_TARGET_NB_REGS][4] = { 1005 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1006 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 1007 }; 1008 1009 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 1010 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 1011 1012 assert((unsigned)r < TCG_TARGET_NB_REGS); 1013 return regs[r]; 1014 } 1015 1016 static const char *str_c(TCGCond c) 1017 { 1018 static const char cond[16][8] = { 1019 [TCG_COND_NEVER] = "never", 1020 [TCG_COND_ALWAYS] = "always", 1021 [TCG_COND_EQ] = "eq", 1022 [TCG_COND_NE] = "ne", 1023 [TCG_COND_LT] = "lt", 1024 [TCG_COND_GE] = "ge", 1025 [TCG_COND_LE] = "le", 1026 [TCG_COND_GT] = "gt", 1027 [TCG_COND_LTU] = "ltu", 1028 [TCG_COND_GEU] = "geu", 1029 [TCG_COND_LEU] = "leu", 1030 [TCG_COND_GTU] = "gtu", 1031 }; 1032 1033 assert((unsigned)c < ARRAY_SIZE(cond)); 1034 assert(cond[c][0] != 0); 1035 return cond[c]; 1036 } 1037 1038 /* Disassemble TCI bytecode. */ 1039 int print_insn_tci(bfd_vma addr, disassemble_info *info) 1040 { 1041 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 1042 const TCGOpDef *def; 1043 const char *op_name; 1044 uint32_t insn; 1045 TCGOpcode op; 1046 TCGReg r0, r1, r2, r3, r4, r5; 1047 tcg_target_ulong i1; 1048 int32_t s2; 1049 TCGCond c; 1050 MemOpIdx oi; 1051 uint8_t pos, len; 1052 void *ptr; 1053 1054 /* TCI is always the host, so we don't need to load indirect. */ 1055 insn = *tb_ptr++; 1056 1057 info->fprintf_func(info->stream, "%08x ", insn); 1058 1059 op = extract32(insn, 0, 8); 1060 def = &tcg_op_defs[op]; 1061 op_name = def->name; 1062 1063 switch (op) { 1064 case INDEX_op_br: 1065 case INDEX_op_exit_tb: 1066 case INDEX_op_goto_tb: 1067 tci_args_l(insn, tb_ptr, &ptr); 1068 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 1069 break; 1070 1071 case INDEX_op_goto_ptr: 1072 tci_args_r(insn, &r0); 1073 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 1074 break; 1075 1076 case INDEX_op_call: 1077 tci_args_nl(insn, tb_ptr, &len, &ptr); 1078 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 1079 break; 1080 1081 case INDEX_op_brcond_i32: 1082 case INDEX_op_brcond_i64: 1083 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1084 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 1085 op_name, str_r(r0), ptr); 1086 break; 1087 1088 case INDEX_op_setcond_i32: 1089 case INDEX_op_setcond_i64: 1090 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 1091 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1092 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 1093 break; 1094 1095 case INDEX_op_tci_movi: 1096 tci_args_ri(insn, &r0, &i1); 1097 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 1098 op_name, str_r(r0), i1); 1099 break; 1100 1101 case INDEX_op_tci_movl: 1102 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1103 info->fprintf_func(info->stream, "%-12s %s, %p", 1104 op_name, str_r(r0), ptr); 1105 break; 1106 1107 case INDEX_op_ld8u_i32: 1108 case INDEX_op_ld8u_i64: 1109 case INDEX_op_ld8s_i32: 1110 case INDEX_op_ld8s_i64: 1111 case INDEX_op_ld16u_i32: 1112 case INDEX_op_ld16u_i64: 1113 case INDEX_op_ld16s_i32: 1114 case INDEX_op_ld16s_i64: 1115 case INDEX_op_ld32u_i64: 1116 case INDEX_op_ld32s_i64: 1117 case INDEX_op_ld_i32: 1118 case INDEX_op_ld_i64: 1119 case INDEX_op_st8_i32: 1120 case INDEX_op_st8_i64: 1121 case INDEX_op_st16_i32: 1122 case INDEX_op_st16_i64: 1123 case INDEX_op_st32_i64: 1124 case INDEX_op_st_i32: 1125 case INDEX_op_st_i64: 1126 tci_args_rrs(insn, &r0, &r1, &s2); 1127 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 1128 op_name, str_r(r0), str_r(r1), s2); 1129 break; 1130 1131 case INDEX_op_mov_i32: 1132 case INDEX_op_mov_i64: 1133 case INDEX_op_ext8s_i32: 1134 case INDEX_op_ext8s_i64: 1135 case INDEX_op_ext8u_i32: 1136 case INDEX_op_ext8u_i64: 1137 case INDEX_op_ext16s_i32: 1138 case INDEX_op_ext16s_i64: 1139 case INDEX_op_ext16u_i32: 1140 case INDEX_op_ext32s_i64: 1141 case INDEX_op_ext32u_i64: 1142 case INDEX_op_ext_i32_i64: 1143 case INDEX_op_extu_i32_i64: 1144 case INDEX_op_bswap16_i32: 1145 case INDEX_op_bswap16_i64: 1146 case INDEX_op_bswap32_i32: 1147 case INDEX_op_bswap32_i64: 1148 case INDEX_op_bswap64_i64: 1149 case INDEX_op_not_i32: 1150 case INDEX_op_not_i64: 1151 case INDEX_op_neg_i32: 1152 case INDEX_op_neg_i64: 1153 case INDEX_op_ctpop_i32: 1154 case INDEX_op_ctpop_i64: 1155 tci_args_rr(insn, &r0, &r1); 1156 info->fprintf_func(info->stream, "%-12s %s, %s", 1157 op_name, str_r(r0), str_r(r1)); 1158 break; 1159 1160 case INDEX_op_add_i32: 1161 case INDEX_op_add_i64: 1162 case INDEX_op_sub_i32: 1163 case INDEX_op_sub_i64: 1164 case INDEX_op_mul_i32: 1165 case INDEX_op_mul_i64: 1166 case INDEX_op_and_i32: 1167 case INDEX_op_and_i64: 1168 case INDEX_op_or_i32: 1169 case INDEX_op_or_i64: 1170 case INDEX_op_xor_i32: 1171 case INDEX_op_xor_i64: 1172 case INDEX_op_andc_i32: 1173 case INDEX_op_andc_i64: 1174 case INDEX_op_orc_i32: 1175 case INDEX_op_orc_i64: 1176 case INDEX_op_eqv_i32: 1177 case INDEX_op_eqv_i64: 1178 case INDEX_op_nand_i32: 1179 case INDEX_op_nand_i64: 1180 case INDEX_op_nor_i32: 1181 case INDEX_op_nor_i64: 1182 case INDEX_op_div_i32: 1183 case INDEX_op_div_i64: 1184 case INDEX_op_rem_i32: 1185 case INDEX_op_rem_i64: 1186 case INDEX_op_divu_i32: 1187 case INDEX_op_divu_i64: 1188 case INDEX_op_remu_i32: 1189 case INDEX_op_remu_i64: 1190 case INDEX_op_shl_i32: 1191 case INDEX_op_shl_i64: 1192 case INDEX_op_shr_i32: 1193 case INDEX_op_shr_i64: 1194 case INDEX_op_sar_i32: 1195 case INDEX_op_sar_i64: 1196 case INDEX_op_rotl_i32: 1197 case INDEX_op_rotl_i64: 1198 case INDEX_op_rotr_i32: 1199 case INDEX_op_rotr_i64: 1200 case INDEX_op_clz_i32: 1201 case INDEX_op_clz_i64: 1202 case INDEX_op_ctz_i32: 1203 case INDEX_op_ctz_i64: 1204 tci_args_rrr(insn, &r0, &r1, &r2); 1205 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1206 op_name, str_r(r0), str_r(r1), str_r(r2)); 1207 break; 1208 1209 case INDEX_op_deposit_i32: 1210 case INDEX_op_deposit_i64: 1211 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1212 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1213 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1214 break; 1215 1216 case INDEX_op_extract_i32: 1217 case INDEX_op_extract_i64: 1218 case INDEX_op_sextract_i32: 1219 case INDEX_op_sextract_i64: 1220 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1221 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1222 op_name, str_r(r0), str_r(r1), pos, len); 1223 break; 1224 1225 case INDEX_op_movcond_i32: 1226 case INDEX_op_movcond_i64: 1227 case INDEX_op_setcond2_i32: 1228 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1229 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1230 op_name, str_r(r0), str_r(r1), str_r(r2), 1231 str_r(r3), str_r(r4), str_c(c)); 1232 break; 1233 1234 case INDEX_op_mulu2_i32: 1235 case INDEX_op_mulu2_i64: 1236 case INDEX_op_muls2_i32: 1237 case INDEX_op_muls2_i64: 1238 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1239 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1240 op_name, str_r(r0), str_r(r1), 1241 str_r(r2), str_r(r3)); 1242 break; 1243 1244 case INDEX_op_add2_i32: 1245 case INDEX_op_add2_i64: 1246 case INDEX_op_sub2_i32: 1247 case INDEX_op_sub2_i64: 1248 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 1249 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1250 op_name, str_r(r0), str_r(r1), str_r(r2), 1251 str_r(r3), str_r(r4), str_r(r5)); 1252 break; 1253 1254 case INDEX_op_qemu_ld_i64: 1255 case INDEX_op_qemu_st_i64: 1256 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1257 goto do_qemu_ldst; 1258 case INDEX_op_qemu_ld_i32: 1259 case INDEX_op_qemu_st_i32: 1260 len = 1; 1261 do_qemu_ldst: 1262 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS); 1263 switch (len) { 1264 case 2: 1265 tci_args_rrm(insn, &r0, &r1, &oi); 1266 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1267 op_name, str_r(r0), str_r(r1), oi); 1268 break; 1269 case 3: 1270 tci_args_rrrm(insn, &r0, &r1, &r2, &oi); 1271 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x", 1272 op_name, str_r(r0), str_r(r1), str_r(r2), oi); 1273 break; 1274 case 4: 1275 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1276 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", 1277 op_name, str_r(r0), str_r(r1), 1278 str_r(r2), str_r(r3), str_r(r4)); 1279 break; 1280 default: 1281 g_assert_not_reached(); 1282 } 1283 break; 1284 1285 case 0: 1286 /* tcg_out_nop_fill uses zeros */ 1287 if (insn == 0) { 1288 info->fprintf_func(info->stream, "align"); 1289 break; 1290 } 1291 /* fall through */ 1292 1293 default: 1294 info->fprintf_func(info->stream, "illegal opcode %d", op); 1295 break; 1296 } 1297 1298 return sizeof(insn); 1299 } 1300