1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "tcg/tcg.h" 22 #include "tcg/tcg-ldst.h" 23 #include <ffi.h> 24 25 26 /* 27 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 28 * Without assertions, the interpreter runs much faster. 29 */ 30 #if defined(CONFIG_DEBUG_TCG) 31 # define tci_assert(cond) assert(cond) 32 #else 33 # define tci_assert(cond) ((void)(cond)) 34 #endif 35 36 __thread uintptr_t tci_tb_ptr; 37 38 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 39 uint32_t low_index, uint64_t value) 40 { 41 regs[low_index] = (uint32_t)value; 42 regs[high_index] = value >> 32; 43 } 44 45 /* Create a 64 bit value from two 32 bit values. */ 46 static uint64_t tci_uint64(uint32_t high, uint32_t low) 47 { 48 return ((uint64_t)high << 32) + low; 49 } 50 51 /* 52 * Load sets of arguments all at once. The naming convention is: 53 * tci_args_<arguments> 54 * where arguments is a sequence of 55 * 56 * b = immediate (bit position) 57 * c = condition (TCGCond) 58 * i = immediate (uint32_t) 59 * I = immediate (tcg_target_ulong) 60 * l = label or pointer 61 * m = immediate (MemOpIdx) 62 * n = immediate (call return length) 63 * r = register 64 * s = signed ldst offset 65 */ 66 67 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 68 { 69 int diff = sextract32(insn, 12, 20); 70 *l0 = diff ? (void *)tb_ptr + diff : NULL; 71 } 72 73 static void tci_args_r(uint32_t insn, TCGReg *r0) 74 { 75 *r0 = extract32(insn, 8, 4); 76 } 77 78 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 79 uint8_t *n0, void **l1) 80 { 81 *n0 = extract32(insn, 8, 4); 82 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 83 } 84 85 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 86 TCGReg *r0, void **l1) 87 { 88 *r0 = extract32(insn, 8, 4); 89 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 90 } 91 92 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 93 { 94 *r0 = extract32(insn, 8, 4); 95 *r1 = extract32(insn, 12, 4); 96 } 97 98 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 99 { 100 *r0 = extract32(insn, 8, 4); 101 *i1 = sextract32(insn, 12, 20); 102 } 103 104 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 105 TCGReg *r1, MemOpIdx *m2) 106 { 107 *r0 = extract32(insn, 8, 4); 108 *r1 = extract32(insn, 12, 4); 109 *m2 = extract32(insn, 16, 16); 110 } 111 112 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 113 { 114 *r0 = extract32(insn, 8, 4); 115 *r1 = extract32(insn, 12, 4); 116 *r2 = extract32(insn, 16, 4); 117 } 118 119 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 120 { 121 *r0 = extract32(insn, 8, 4); 122 *r1 = extract32(insn, 12, 4); 123 *i2 = sextract32(insn, 16, 16); 124 } 125 126 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 127 uint8_t *i2, uint8_t *i3) 128 { 129 *r0 = extract32(insn, 8, 4); 130 *r1 = extract32(insn, 12, 4); 131 *i2 = extract32(insn, 16, 6); 132 *i3 = extract32(insn, 22, 6); 133 } 134 135 static void tci_args_rrrc(uint32_t insn, 136 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 137 { 138 *r0 = extract32(insn, 8, 4); 139 *r1 = extract32(insn, 12, 4); 140 *r2 = extract32(insn, 16, 4); 141 *c3 = extract32(insn, 20, 4); 142 } 143 144 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 145 TCGReg *r2, uint8_t *i3, uint8_t *i4) 146 { 147 *r0 = extract32(insn, 8, 4); 148 *r1 = extract32(insn, 12, 4); 149 *r2 = extract32(insn, 16, 4); 150 *i3 = extract32(insn, 20, 6); 151 *i4 = extract32(insn, 26, 6); 152 } 153 154 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 155 TCGReg *r2, TCGReg *r3, TCGReg *r4) 156 { 157 *r0 = extract32(insn, 8, 4); 158 *r1 = extract32(insn, 12, 4); 159 *r2 = extract32(insn, 16, 4); 160 *r3 = extract32(insn, 20, 4); 161 *r4 = extract32(insn, 24, 4); 162 } 163 164 static void tci_args_rrrr(uint32_t insn, 165 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 166 { 167 *r0 = extract32(insn, 8, 4); 168 *r1 = extract32(insn, 12, 4); 169 *r2 = extract32(insn, 16, 4); 170 *r3 = extract32(insn, 20, 4); 171 } 172 173 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 174 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 175 { 176 *r0 = extract32(insn, 8, 4); 177 *r1 = extract32(insn, 12, 4); 178 *r2 = extract32(insn, 16, 4); 179 *r3 = extract32(insn, 20, 4); 180 *r4 = extract32(insn, 24, 4); 181 *c5 = extract32(insn, 28, 4); 182 } 183 184 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1, 185 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5) 186 { 187 *r0 = extract32(insn, 8, 4); 188 *r1 = extract32(insn, 12, 4); 189 *r2 = extract32(insn, 16, 4); 190 *r3 = extract32(insn, 20, 4); 191 *r4 = extract32(insn, 24, 4); 192 *r5 = extract32(insn, 28, 4); 193 } 194 195 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 196 { 197 bool result = false; 198 int32_t i0 = u0; 199 int32_t i1 = u1; 200 switch (condition) { 201 case TCG_COND_EQ: 202 result = (u0 == u1); 203 break; 204 case TCG_COND_NE: 205 result = (u0 != u1); 206 break; 207 case TCG_COND_LT: 208 result = (i0 < i1); 209 break; 210 case TCG_COND_GE: 211 result = (i0 >= i1); 212 break; 213 case TCG_COND_LE: 214 result = (i0 <= i1); 215 break; 216 case TCG_COND_GT: 217 result = (i0 > i1); 218 break; 219 case TCG_COND_LTU: 220 result = (u0 < u1); 221 break; 222 case TCG_COND_GEU: 223 result = (u0 >= u1); 224 break; 225 case TCG_COND_LEU: 226 result = (u0 <= u1); 227 break; 228 case TCG_COND_GTU: 229 result = (u0 > u1); 230 break; 231 default: 232 g_assert_not_reached(); 233 } 234 return result; 235 } 236 237 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 238 { 239 bool result = false; 240 int64_t i0 = u0; 241 int64_t i1 = u1; 242 switch (condition) { 243 case TCG_COND_EQ: 244 result = (u0 == u1); 245 break; 246 case TCG_COND_NE: 247 result = (u0 != u1); 248 break; 249 case TCG_COND_LT: 250 result = (i0 < i1); 251 break; 252 case TCG_COND_GE: 253 result = (i0 >= i1); 254 break; 255 case TCG_COND_LE: 256 result = (i0 <= i1); 257 break; 258 case TCG_COND_GT: 259 result = (i0 > i1); 260 break; 261 case TCG_COND_LTU: 262 result = (u0 < u1); 263 break; 264 case TCG_COND_GEU: 265 result = (u0 >= u1); 266 break; 267 case TCG_COND_LEU: 268 result = (u0 <= u1); 269 break; 270 case TCG_COND_GTU: 271 result = (u0 > u1); 272 break; 273 default: 274 g_assert_not_reached(); 275 } 276 return result; 277 } 278 279 static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr, 280 MemOpIdx oi, const void *tb_ptr) 281 { 282 MemOp mop = get_memop(oi); 283 uintptr_t ra = (uintptr_t)tb_ptr; 284 285 switch (mop & MO_SSIZE) { 286 case MO_UB: 287 return helper_ldub_mmu(env, taddr, oi, ra); 288 case MO_SB: 289 return helper_ldsb_mmu(env, taddr, oi, ra); 290 case MO_UW: 291 return helper_lduw_mmu(env, taddr, oi, ra); 292 case MO_SW: 293 return helper_ldsw_mmu(env, taddr, oi, ra); 294 case MO_UL: 295 return helper_ldul_mmu(env, taddr, oi, ra); 296 case MO_SL: 297 return helper_ldsl_mmu(env, taddr, oi, ra); 298 case MO_UQ: 299 return helper_ldq_mmu(env, taddr, oi, ra); 300 default: 301 g_assert_not_reached(); 302 } 303 } 304 305 static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val, 306 MemOpIdx oi, const void *tb_ptr) 307 { 308 MemOp mop = get_memop(oi); 309 uintptr_t ra = (uintptr_t)tb_ptr; 310 311 switch (mop & MO_SIZE) { 312 case MO_UB: 313 helper_stb_mmu(env, taddr, val, oi, ra); 314 break; 315 case MO_UW: 316 helper_stw_mmu(env, taddr, val, oi, ra); 317 break; 318 case MO_UL: 319 helper_stl_mmu(env, taddr, val, oi, ra); 320 break; 321 case MO_UQ: 322 helper_stq_mmu(env, taddr, val, oi, ra); 323 break; 324 default: 325 g_assert_not_reached(); 326 } 327 } 328 329 #if TCG_TARGET_REG_BITS == 64 330 # define CASE_32_64(x) \ 331 case glue(glue(INDEX_op_, x), _i64): \ 332 case glue(glue(INDEX_op_, x), _i32): 333 # define CASE_64(x) \ 334 case glue(glue(INDEX_op_, x), _i64): 335 #else 336 # define CASE_32_64(x) \ 337 case glue(glue(INDEX_op_, x), _i32): 338 # define CASE_64(x) 339 #endif 340 341 /* Interpret pseudo code in tb. */ 342 /* 343 * Disable CFI checks. 344 * One possible operation in the pseudo code is a call to binary code. 345 * Therefore, disable CFI checks in the interpreter function 346 */ 347 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 348 const void *v_tb_ptr) 349 { 350 const uint32_t *tb_ptr = v_tb_ptr; 351 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 352 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 353 / sizeof(uint64_t)]; 354 355 regs[TCG_AREG0] = (tcg_target_ulong)env; 356 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 357 tci_assert(tb_ptr); 358 359 for (;;) { 360 uint32_t insn; 361 TCGOpcode opc; 362 TCGReg r0, r1, r2, r3, r4, r5; 363 tcg_target_ulong t1; 364 TCGCond condition; 365 uint8_t pos, len; 366 uint32_t tmp32; 367 uint64_t tmp64, taddr; 368 uint64_t T1, T2; 369 MemOpIdx oi; 370 int32_t ofs; 371 void *ptr; 372 373 insn = *tb_ptr++; 374 opc = extract32(insn, 0, 8); 375 376 switch (opc) { 377 case INDEX_op_call: 378 { 379 void *call_slots[MAX_CALL_IARGS]; 380 ffi_cif *cif; 381 void *func; 382 unsigned i, s, n; 383 384 tci_args_nl(insn, tb_ptr, &len, &ptr); 385 func = ((void **)ptr)[0]; 386 cif = ((void **)ptr)[1]; 387 388 n = cif->nargs; 389 for (i = s = 0; i < n; ++i) { 390 ffi_type *t = cif->arg_types[i]; 391 call_slots[i] = &stack[s]; 392 s += DIV_ROUND_UP(t->size, 8); 393 } 394 395 /* Helper functions may need to access the "return address" */ 396 tci_tb_ptr = (uintptr_t)tb_ptr; 397 ffi_call(cif, func, stack, call_slots); 398 } 399 400 switch (len) { 401 case 0: /* void */ 402 break; 403 case 1: /* uint32_t */ 404 /* 405 * The result winds up "left-aligned" in the stack[0] slot. 406 * Note that libffi has an odd special case in that it will 407 * always widen an integral result to ffi_arg. 408 */ 409 if (sizeof(ffi_arg) == 8) { 410 regs[TCG_REG_R0] = (uint32_t)stack[0]; 411 } else { 412 regs[TCG_REG_R0] = *(uint32_t *)stack; 413 } 414 break; 415 case 2: /* uint64_t */ 416 /* 417 * For TCG_TARGET_REG_BITS == 32, the register pair 418 * must stay in host memory order. 419 */ 420 memcpy(®s[TCG_REG_R0], stack, 8); 421 break; 422 case 3: /* Int128 */ 423 memcpy(®s[TCG_REG_R0], stack, 16); 424 break; 425 default: 426 g_assert_not_reached(); 427 } 428 break; 429 430 case INDEX_op_br: 431 tci_args_l(insn, tb_ptr, &ptr); 432 tb_ptr = ptr; 433 continue; 434 case INDEX_op_setcond_i32: 435 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 436 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 437 break; 438 case INDEX_op_movcond_i32: 439 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 440 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 441 regs[r0] = regs[tmp32 ? r3 : r4]; 442 break; 443 #if TCG_TARGET_REG_BITS == 32 444 case INDEX_op_setcond2_i32: 445 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 446 T1 = tci_uint64(regs[r2], regs[r1]); 447 T2 = tci_uint64(regs[r4], regs[r3]); 448 regs[r0] = tci_compare64(T1, T2, condition); 449 break; 450 #elif TCG_TARGET_REG_BITS == 64 451 case INDEX_op_setcond_i64: 452 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 453 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 454 break; 455 case INDEX_op_movcond_i64: 456 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 457 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 458 regs[r0] = regs[tmp32 ? r3 : r4]; 459 break; 460 #endif 461 CASE_32_64(mov) 462 tci_args_rr(insn, &r0, &r1); 463 regs[r0] = regs[r1]; 464 break; 465 case INDEX_op_tci_movi: 466 tci_args_ri(insn, &r0, &t1); 467 regs[r0] = t1; 468 break; 469 case INDEX_op_tci_movl: 470 tci_args_rl(insn, tb_ptr, &r0, &ptr); 471 regs[r0] = *(tcg_target_ulong *)ptr; 472 break; 473 474 /* Load/store operations (32 bit). */ 475 476 CASE_32_64(ld8u) 477 tci_args_rrs(insn, &r0, &r1, &ofs); 478 ptr = (void *)(regs[r1] + ofs); 479 regs[r0] = *(uint8_t *)ptr; 480 break; 481 CASE_32_64(ld8s) 482 tci_args_rrs(insn, &r0, &r1, &ofs); 483 ptr = (void *)(regs[r1] + ofs); 484 regs[r0] = *(int8_t *)ptr; 485 break; 486 CASE_32_64(ld16u) 487 tci_args_rrs(insn, &r0, &r1, &ofs); 488 ptr = (void *)(regs[r1] + ofs); 489 regs[r0] = *(uint16_t *)ptr; 490 break; 491 CASE_32_64(ld16s) 492 tci_args_rrs(insn, &r0, &r1, &ofs); 493 ptr = (void *)(regs[r1] + ofs); 494 regs[r0] = *(int16_t *)ptr; 495 break; 496 case INDEX_op_ld_i32: 497 CASE_64(ld32u) 498 tci_args_rrs(insn, &r0, &r1, &ofs); 499 ptr = (void *)(regs[r1] + ofs); 500 regs[r0] = *(uint32_t *)ptr; 501 break; 502 CASE_32_64(st8) 503 tci_args_rrs(insn, &r0, &r1, &ofs); 504 ptr = (void *)(regs[r1] + ofs); 505 *(uint8_t *)ptr = regs[r0]; 506 break; 507 CASE_32_64(st16) 508 tci_args_rrs(insn, &r0, &r1, &ofs); 509 ptr = (void *)(regs[r1] + ofs); 510 *(uint16_t *)ptr = regs[r0]; 511 break; 512 case INDEX_op_st_i32: 513 CASE_64(st32) 514 tci_args_rrs(insn, &r0, &r1, &ofs); 515 ptr = (void *)(regs[r1] + ofs); 516 *(uint32_t *)ptr = regs[r0]; 517 break; 518 519 /* Arithmetic operations (mixed 32/64 bit). */ 520 521 CASE_32_64(add) 522 tci_args_rrr(insn, &r0, &r1, &r2); 523 regs[r0] = regs[r1] + regs[r2]; 524 break; 525 CASE_32_64(sub) 526 tci_args_rrr(insn, &r0, &r1, &r2); 527 regs[r0] = regs[r1] - regs[r2]; 528 break; 529 CASE_32_64(mul) 530 tci_args_rrr(insn, &r0, &r1, &r2); 531 regs[r0] = regs[r1] * regs[r2]; 532 break; 533 CASE_32_64(and) 534 tci_args_rrr(insn, &r0, &r1, &r2); 535 regs[r0] = regs[r1] & regs[r2]; 536 break; 537 CASE_32_64(or) 538 tci_args_rrr(insn, &r0, &r1, &r2); 539 regs[r0] = regs[r1] | regs[r2]; 540 break; 541 CASE_32_64(xor) 542 tci_args_rrr(insn, &r0, &r1, &r2); 543 regs[r0] = regs[r1] ^ regs[r2]; 544 break; 545 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64 546 CASE_32_64(andc) 547 tci_args_rrr(insn, &r0, &r1, &r2); 548 regs[r0] = regs[r1] & ~regs[r2]; 549 break; 550 #endif 551 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64 552 CASE_32_64(orc) 553 tci_args_rrr(insn, &r0, &r1, &r2); 554 regs[r0] = regs[r1] | ~regs[r2]; 555 break; 556 #endif 557 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64 558 CASE_32_64(eqv) 559 tci_args_rrr(insn, &r0, &r1, &r2); 560 regs[r0] = ~(regs[r1] ^ regs[r2]); 561 break; 562 #endif 563 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64 564 CASE_32_64(nand) 565 tci_args_rrr(insn, &r0, &r1, &r2); 566 regs[r0] = ~(regs[r1] & regs[r2]); 567 break; 568 #endif 569 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64 570 CASE_32_64(nor) 571 tci_args_rrr(insn, &r0, &r1, &r2); 572 regs[r0] = ~(regs[r1] | regs[r2]); 573 break; 574 #endif 575 576 /* Arithmetic operations (32 bit). */ 577 578 case INDEX_op_div_i32: 579 tci_args_rrr(insn, &r0, &r1, &r2); 580 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 581 break; 582 case INDEX_op_divu_i32: 583 tci_args_rrr(insn, &r0, &r1, &r2); 584 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 585 break; 586 case INDEX_op_rem_i32: 587 tci_args_rrr(insn, &r0, &r1, &r2); 588 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 589 break; 590 case INDEX_op_remu_i32: 591 tci_args_rrr(insn, &r0, &r1, &r2); 592 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 593 break; 594 #if TCG_TARGET_HAS_clz_i32 595 case INDEX_op_clz_i32: 596 tci_args_rrr(insn, &r0, &r1, &r2); 597 tmp32 = regs[r1]; 598 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 599 break; 600 #endif 601 #if TCG_TARGET_HAS_ctz_i32 602 case INDEX_op_ctz_i32: 603 tci_args_rrr(insn, &r0, &r1, &r2); 604 tmp32 = regs[r1]; 605 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 606 break; 607 #endif 608 #if TCG_TARGET_HAS_ctpop_i32 609 case INDEX_op_ctpop_i32: 610 tci_args_rr(insn, &r0, &r1); 611 regs[r0] = ctpop32(regs[r1]); 612 break; 613 #endif 614 615 /* Shift/rotate operations (32 bit). */ 616 617 case INDEX_op_shl_i32: 618 tci_args_rrr(insn, &r0, &r1, &r2); 619 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31); 620 break; 621 case INDEX_op_shr_i32: 622 tci_args_rrr(insn, &r0, &r1, &r2); 623 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31); 624 break; 625 case INDEX_op_sar_i32: 626 tci_args_rrr(insn, &r0, &r1, &r2); 627 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31); 628 break; 629 #if TCG_TARGET_HAS_rot_i32 630 case INDEX_op_rotl_i32: 631 tci_args_rrr(insn, &r0, &r1, &r2); 632 regs[r0] = rol32(regs[r1], regs[r2] & 31); 633 break; 634 case INDEX_op_rotr_i32: 635 tci_args_rrr(insn, &r0, &r1, &r2); 636 regs[r0] = ror32(regs[r1], regs[r2] & 31); 637 break; 638 #endif 639 #if TCG_TARGET_HAS_deposit_i32 640 case INDEX_op_deposit_i32: 641 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 642 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]); 643 break; 644 #endif 645 #if TCG_TARGET_HAS_extract_i32 646 case INDEX_op_extract_i32: 647 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 648 regs[r0] = extract32(regs[r1], pos, len); 649 break; 650 #endif 651 #if TCG_TARGET_HAS_sextract_i32 652 case INDEX_op_sextract_i32: 653 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 654 regs[r0] = sextract32(regs[r1], pos, len); 655 break; 656 #endif 657 case INDEX_op_brcond_i32: 658 tci_args_rl(insn, tb_ptr, &r0, &ptr); 659 if ((uint32_t)regs[r0]) { 660 tb_ptr = ptr; 661 } 662 break; 663 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32 664 case INDEX_op_add2_i32: 665 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 666 T1 = tci_uint64(regs[r3], regs[r2]); 667 T2 = tci_uint64(regs[r5], regs[r4]); 668 tci_write_reg64(regs, r1, r0, T1 + T2); 669 break; 670 #endif 671 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32 672 case INDEX_op_sub2_i32: 673 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 674 T1 = tci_uint64(regs[r3], regs[r2]); 675 T2 = tci_uint64(regs[r5], regs[r4]); 676 tci_write_reg64(regs, r1, r0, T1 - T2); 677 break; 678 #endif 679 #if TCG_TARGET_HAS_mulu2_i32 680 case INDEX_op_mulu2_i32: 681 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 682 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 683 tci_write_reg64(regs, r1, r0, tmp64); 684 break; 685 #endif 686 #if TCG_TARGET_HAS_muls2_i32 687 case INDEX_op_muls2_i32: 688 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 689 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 690 tci_write_reg64(regs, r1, r0, tmp64); 691 break; 692 #endif 693 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64 694 CASE_32_64(ext8s) 695 tci_args_rr(insn, &r0, &r1); 696 regs[r0] = (int8_t)regs[r1]; 697 break; 698 #endif 699 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \ 700 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 701 CASE_32_64(ext16s) 702 tci_args_rr(insn, &r0, &r1); 703 regs[r0] = (int16_t)regs[r1]; 704 break; 705 #endif 706 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64 707 CASE_32_64(ext8u) 708 tci_args_rr(insn, &r0, &r1); 709 regs[r0] = (uint8_t)regs[r1]; 710 break; 711 #endif 712 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64 713 CASE_32_64(ext16u) 714 tci_args_rr(insn, &r0, &r1); 715 regs[r0] = (uint16_t)regs[r1]; 716 break; 717 #endif 718 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64 719 CASE_32_64(bswap16) 720 tci_args_rr(insn, &r0, &r1); 721 regs[r0] = bswap16(regs[r1]); 722 break; 723 #endif 724 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64 725 CASE_32_64(bswap32) 726 tci_args_rr(insn, &r0, &r1); 727 regs[r0] = bswap32(regs[r1]); 728 break; 729 #endif 730 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64 731 CASE_32_64(not) 732 tci_args_rr(insn, &r0, &r1); 733 regs[r0] = ~regs[r1]; 734 break; 735 #endif 736 CASE_32_64(neg) 737 tci_args_rr(insn, &r0, &r1); 738 regs[r0] = -regs[r1]; 739 break; 740 #if TCG_TARGET_REG_BITS == 64 741 /* Load/store operations (64 bit). */ 742 743 case INDEX_op_ld32s_i64: 744 tci_args_rrs(insn, &r0, &r1, &ofs); 745 ptr = (void *)(regs[r1] + ofs); 746 regs[r0] = *(int32_t *)ptr; 747 break; 748 case INDEX_op_ld_i64: 749 tci_args_rrs(insn, &r0, &r1, &ofs); 750 ptr = (void *)(regs[r1] + ofs); 751 regs[r0] = *(uint64_t *)ptr; 752 break; 753 case INDEX_op_st_i64: 754 tci_args_rrs(insn, &r0, &r1, &ofs); 755 ptr = (void *)(regs[r1] + ofs); 756 *(uint64_t *)ptr = regs[r0]; 757 break; 758 759 /* Arithmetic operations (64 bit). */ 760 761 case INDEX_op_div_i64: 762 tci_args_rrr(insn, &r0, &r1, &r2); 763 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 764 break; 765 case INDEX_op_divu_i64: 766 tci_args_rrr(insn, &r0, &r1, &r2); 767 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 768 break; 769 case INDEX_op_rem_i64: 770 tci_args_rrr(insn, &r0, &r1, &r2); 771 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 772 break; 773 case INDEX_op_remu_i64: 774 tci_args_rrr(insn, &r0, &r1, &r2); 775 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 776 break; 777 #if TCG_TARGET_HAS_clz_i64 778 case INDEX_op_clz_i64: 779 tci_args_rrr(insn, &r0, &r1, &r2); 780 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 781 break; 782 #endif 783 #if TCG_TARGET_HAS_ctz_i64 784 case INDEX_op_ctz_i64: 785 tci_args_rrr(insn, &r0, &r1, &r2); 786 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 787 break; 788 #endif 789 #if TCG_TARGET_HAS_ctpop_i64 790 case INDEX_op_ctpop_i64: 791 tci_args_rr(insn, &r0, &r1); 792 regs[r0] = ctpop64(regs[r1]); 793 break; 794 #endif 795 #if TCG_TARGET_HAS_mulu2_i64 796 case INDEX_op_mulu2_i64: 797 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 798 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 799 break; 800 #endif 801 #if TCG_TARGET_HAS_muls2_i64 802 case INDEX_op_muls2_i64: 803 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 804 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 805 break; 806 #endif 807 #if TCG_TARGET_HAS_add2_i64 808 case INDEX_op_add2_i64: 809 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 810 T1 = regs[r2] + regs[r4]; 811 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]); 812 regs[r0] = T1; 813 regs[r1] = T2; 814 break; 815 #endif 816 #if TCG_TARGET_HAS_add2_i64 817 case INDEX_op_sub2_i64: 818 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 819 T1 = regs[r2] - regs[r4]; 820 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]); 821 regs[r0] = T1; 822 regs[r1] = T2; 823 break; 824 #endif 825 826 /* Shift/rotate operations (64 bit). */ 827 828 case INDEX_op_shl_i64: 829 tci_args_rrr(insn, &r0, &r1, &r2); 830 regs[r0] = regs[r1] << (regs[r2] & 63); 831 break; 832 case INDEX_op_shr_i64: 833 tci_args_rrr(insn, &r0, &r1, &r2); 834 regs[r0] = regs[r1] >> (regs[r2] & 63); 835 break; 836 case INDEX_op_sar_i64: 837 tci_args_rrr(insn, &r0, &r1, &r2); 838 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63); 839 break; 840 #if TCG_TARGET_HAS_rot_i64 841 case INDEX_op_rotl_i64: 842 tci_args_rrr(insn, &r0, &r1, &r2); 843 regs[r0] = rol64(regs[r1], regs[r2] & 63); 844 break; 845 case INDEX_op_rotr_i64: 846 tci_args_rrr(insn, &r0, &r1, &r2); 847 regs[r0] = ror64(regs[r1], regs[r2] & 63); 848 break; 849 #endif 850 #if TCG_TARGET_HAS_deposit_i64 851 case INDEX_op_deposit_i64: 852 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 853 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]); 854 break; 855 #endif 856 #if TCG_TARGET_HAS_extract_i64 857 case INDEX_op_extract_i64: 858 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 859 regs[r0] = extract64(regs[r1], pos, len); 860 break; 861 #endif 862 #if TCG_TARGET_HAS_sextract_i64 863 case INDEX_op_sextract_i64: 864 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 865 regs[r0] = sextract64(regs[r1], pos, len); 866 break; 867 #endif 868 case INDEX_op_brcond_i64: 869 tci_args_rl(insn, tb_ptr, &r0, &ptr); 870 if (regs[r0]) { 871 tb_ptr = ptr; 872 } 873 break; 874 case INDEX_op_ext32s_i64: 875 case INDEX_op_ext_i32_i64: 876 tci_args_rr(insn, &r0, &r1); 877 regs[r0] = (int32_t)regs[r1]; 878 break; 879 case INDEX_op_ext32u_i64: 880 case INDEX_op_extu_i32_i64: 881 tci_args_rr(insn, &r0, &r1); 882 regs[r0] = (uint32_t)regs[r1]; 883 break; 884 #if TCG_TARGET_HAS_bswap64_i64 885 case INDEX_op_bswap64_i64: 886 tci_args_rr(insn, &r0, &r1); 887 regs[r0] = bswap64(regs[r1]); 888 break; 889 #endif 890 #endif /* TCG_TARGET_REG_BITS == 64 */ 891 892 /* QEMU specific operations. */ 893 894 case INDEX_op_exit_tb: 895 tci_args_l(insn, tb_ptr, &ptr); 896 return (uintptr_t)ptr; 897 898 case INDEX_op_goto_tb: 899 tci_args_l(insn, tb_ptr, &ptr); 900 tb_ptr = *(void **)ptr; 901 break; 902 903 case INDEX_op_goto_ptr: 904 tci_args_r(insn, &r0); 905 ptr = (void *)regs[r0]; 906 if (!ptr) { 907 return 0; 908 } 909 tb_ptr = ptr; 910 break; 911 912 case INDEX_op_qemu_ld_a32_i32: 913 tci_args_rrm(insn, &r0, &r1, &oi); 914 taddr = (uint32_t)regs[r1]; 915 goto do_ld_i32; 916 case INDEX_op_qemu_ld_a64_i32: 917 if (TCG_TARGET_REG_BITS == 64) { 918 tci_args_rrm(insn, &r0, &r1, &oi); 919 taddr = regs[r1]; 920 } else { 921 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 922 taddr = tci_uint64(regs[r2], regs[r1]); 923 oi = regs[r3]; 924 } 925 do_ld_i32: 926 regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr); 927 break; 928 929 case INDEX_op_qemu_ld_a32_i64: 930 if (TCG_TARGET_REG_BITS == 64) { 931 tci_args_rrm(insn, &r0, &r1, &oi); 932 taddr = (uint32_t)regs[r1]; 933 } else { 934 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 935 taddr = (uint32_t)regs[r2]; 936 oi = regs[r3]; 937 } 938 goto do_ld_i64; 939 case INDEX_op_qemu_ld_a64_i64: 940 if (TCG_TARGET_REG_BITS == 64) { 941 tci_args_rrm(insn, &r0, &r1, &oi); 942 taddr = regs[r1]; 943 } else { 944 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 945 taddr = tci_uint64(regs[r3], regs[r2]); 946 oi = regs[r4]; 947 } 948 do_ld_i64: 949 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 950 if (TCG_TARGET_REG_BITS == 32) { 951 tci_write_reg64(regs, r1, r0, tmp64); 952 } else { 953 regs[r0] = tmp64; 954 } 955 break; 956 957 case INDEX_op_qemu_st_a32_i32: 958 tci_args_rrm(insn, &r0, &r1, &oi); 959 taddr = (uint32_t)regs[r1]; 960 goto do_st_i32; 961 case INDEX_op_qemu_st_a64_i32: 962 if (TCG_TARGET_REG_BITS == 64) { 963 tci_args_rrm(insn, &r0, &r1, &oi); 964 taddr = regs[r1]; 965 } else { 966 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 967 taddr = tci_uint64(regs[r2], regs[r1]); 968 oi = regs[r3]; 969 } 970 do_st_i32: 971 tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr); 972 break; 973 974 case INDEX_op_qemu_st_a32_i64: 975 if (TCG_TARGET_REG_BITS == 64) { 976 tci_args_rrm(insn, &r0, &r1, &oi); 977 tmp64 = regs[r0]; 978 taddr = (uint32_t)regs[r1]; 979 } else { 980 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 981 tmp64 = tci_uint64(regs[r1], regs[r0]); 982 taddr = (uint32_t)regs[r2]; 983 oi = regs[r3]; 984 } 985 goto do_st_i64; 986 case INDEX_op_qemu_st_a64_i64: 987 if (TCG_TARGET_REG_BITS == 64) { 988 tci_args_rrm(insn, &r0, &r1, &oi); 989 tmp64 = regs[r0]; 990 taddr = regs[r1]; 991 } else { 992 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 993 tmp64 = tci_uint64(regs[r1], regs[r0]); 994 taddr = tci_uint64(regs[r3], regs[r2]); 995 oi = regs[r4]; 996 } 997 do_st_i64: 998 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 999 break; 1000 1001 case INDEX_op_mb: 1002 /* Ensure ordering for all kinds */ 1003 smp_mb(); 1004 break; 1005 default: 1006 g_assert_not_reached(); 1007 } 1008 } 1009 } 1010 1011 /* 1012 * Disassembler that matches the interpreter 1013 */ 1014 1015 static const char *str_r(TCGReg r) 1016 { 1017 static const char regs[TCG_TARGET_NB_REGS][4] = { 1018 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 1019 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 1020 }; 1021 1022 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 1023 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 1024 1025 assert((unsigned)r < TCG_TARGET_NB_REGS); 1026 return regs[r]; 1027 } 1028 1029 static const char *str_c(TCGCond c) 1030 { 1031 static const char cond[16][8] = { 1032 [TCG_COND_NEVER] = "never", 1033 [TCG_COND_ALWAYS] = "always", 1034 [TCG_COND_EQ] = "eq", 1035 [TCG_COND_NE] = "ne", 1036 [TCG_COND_LT] = "lt", 1037 [TCG_COND_GE] = "ge", 1038 [TCG_COND_LE] = "le", 1039 [TCG_COND_GT] = "gt", 1040 [TCG_COND_LTU] = "ltu", 1041 [TCG_COND_GEU] = "geu", 1042 [TCG_COND_LEU] = "leu", 1043 [TCG_COND_GTU] = "gtu", 1044 }; 1045 1046 assert((unsigned)c < ARRAY_SIZE(cond)); 1047 assert(cond[c][0] != 0); 1048 return cond[c]; 1049 } 1050 1051 /* Disassemble TCI bytecode. */ 1052 int print_insn_tci(bfd_vma addr, disassemble_info *info) 1053 { 1054 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 1055 const TCGOpDef *def; 1056 const char *op_name; 1057 uint32_t insn; 1058 TCGOpcode op; 1059 TCGReg r0, r1, r2, r3, r4, r5; 1060 tcg_target_ulong i1; 1061 int32_t s2; 1062 TCGCond c; 1063 MemOpIdx oi; 1064 uint8_t pos, len; 1065 void *ptr; 1066 1067 /* TCI is always the host, so we don't need to load indirect. */ 1068 insn = *tb_ptr++; 1069 1070 info->fprintf_func(info->stream, "%08x ", insn); 1071 1072 op = extract32(insn, 0, 8); 1073 def = &tcg_op_defs[op]; 1074 op_name = def->name; 1075 1076 switch (op) { 1077 case INDEX_op_br: 1078 case INDEX_op_exit_tb: 1079 case INDEX_op_goto_tb: 1080 tci_args_l(insn, tb_ptr, &ptr); 1081 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 1082 break; 1083 1084 case INDEX_op_goto_ptr: 1085 tci_args_r(insn, &r0); 1086 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 1087 break; 1088 1089 case INDEX_op_call: 1090 tci_args_nl(insn, tb_ptr, &len, &ptr); 1091 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 1092 break; 1093 1094 case INDEX_op_brcond_i32: 1095 case INDEX_op_brcond_i64: 1096 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1097 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 1098 op_name, str_r(r0), ptr); 1099 break; 1100 1101 case INDEX_op_setcond_i32: 1102 case INDEX_op_setcond_i64: 1103 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 1104 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1105 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 1106 break; 1107 1108 case INDEX_op_tci_movi: 1109 tci_args_ri(insn, &r0, &i1); 1110 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 1111 op_name, str_r(r0), i1); 1112 break; 1113 1114 case INDEX_op_tci_movl: 1115 tci_args_rl(insn, tb_ptr, &r0, &ptr); 1116 info->fprintf_func(info->stream, "%-12s %s, %p", 1117 op_name, str_r(r0), ptr); 1118 break; 1119 1120 case INDEX_op_ld8u_i32: 1121 case INDEX_op_ld8u_i64: 1122 case INDEX_op_ld8s_i32: 1123 case INDEX_op_ld8s_i64: 1124 case INDEX_op_ld16u_i32: 1125 case INDEX_op_ld16u_i64: 1126 case INDEX_op_ld16s_i32: 1127 case INDEX_op_ld16s_i64: 1128 case INDEX_op_ld32u_i64: 1129 case INDEX_op_ld32s_i64: 1130 case INDEX_op_ld_i32: 1131 case INDEX_op_ld_i64: 1132 case INDEX_op_st8_i32: 1133 case INDEX_op_st8_i64: 1134 case INDEX_op_st16_i32: 1135 case INDEX_op_st16_i64: 1136 case INDEX_op_st32_i64: 1137 case INDEX_op_st_i32: 1138 case INDEX_op_st_i64: 1139 tci_args_rrs(insn, &r0, &r1, &s2); 1140 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 1141 op_name, str_r(r0), str_r(r1), s2); 1142 break; 1143 1144 case INDEX_op_mov_i32: 1145 case INDEX_op_mov_i64: 1146 case INDEX_op_ext8s_i32: 1147 case INDEX_op_ext8s_i64: 1148 case INDEX_op_ext8u_i32: 1149 case INDEX_op_ext8u_i64: 1150 case INDEX_op_ext16s_i32: 1151 case INDEX_op_ext16s_i64: 1152 case INDEX_op_ext16u_i32: 1153 case INDEX_op_ext32s_i64: 1154 case INDEX_op_ext32u_i64: 1155 case INDEX_op_ext_i32_i64: 1156 case INDEX_op_extu_i32_i64: 1157 case INDEX_op_bswap16_i32: 1158 case INDEX_op_bswap16_i64: 1159 case INDEX_op_bswap32_i32: 1160 case INDEX_op_bswap32_i64: 1161 case INDEX_op_bswap64_i64: 1162 case INDEX_op_not_i32: 1163 case INDEX_op_not_i64: 1164 case INDEX_op_neg_i32: 1165 case INDEX_op_neg_i64: 1166 case INDEX_op_ctpop_i32: 1167 case INDEX_op_ctpop_i64: 1168 tci_args_rr(insn, &r0, &r1); 1169 info->fprintf_func(info->stream, "%-12s %s, %s", 1170 op_name, str_r(r0), str_r(r1)); 1171 break; 1172 1173 case INDEX_op_add_i32: 1174 case INDEX_op_add_i64: 1175 case INDEX_op_sub_i32: 1176 case INDEX_op_sub_i64: 1177 case INDEX_op_mul_i32: 1178 case INDEX_op_mul_i64: 1179 case INDEX_op_and_i32: 1180 case INDEX_op_and_i64: 1181 case INDEX_op_or_i32: 1182 case INDEX_op_or_i64: 1183 case INDEX_op_xor_i32: 1184 case INDEX_op_xor_i64: 1185 case INDEX_op_andc_i32: 1186 case INDEX_op_andc_i64: 1187 case INDEX_op_orc_i32: 1188 case INDEX_op_orc_i64: 1189 case INDEX_op_eqv_i32: 1190 case INDEX_op_eqv_i64: 1191 case INDEX_op_nand_i32: 1192 case INDEX_op_nand_i64: 1193 case INDEX_op_nor_i32: 1194 case INDEX_op_nor_i64: 1195 case INDEX_op_div_i32: 1196 case INDEX_op_div_i64: 1197 case INDEX_op_rem_i32: 1198 case INDEX_op_rem_i64: 1199 case INDEX_op_divu_i32: 1200 case INDEX_op_divu_i64: 1201 case INDEX_op_remu_i32: 1202 case INDEX_op_remu_i64: 1203 case INDEX_op_shl_i32: 1204 case INDEX_op_shl_i64: 1205 case INDEX_op_shr_i32: 1206 case INDEX_op_shr_i64: 1207 case INDEX_op_sar_i32: 1208 case INDEX_op_sar_i64: 1209 case INDEX_op_rotl_i32: 1210 case INDEX_op_rotl_i64: 1211 case INDEX_op_rotr_i32: 1212 case INDEX_op_rotr_i64: 1213 case INDEX_op_clz_i32: 1214 case INDEX_op_clz_i64: 1215 case INDEX_op_ctz_i32: 1216 case INDEX_op_ctz_i64: 1217 tci_args_rrr(insn, &r0, &r1, &r2); 1218 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1219 op_name, str_r(r0), str_r(r1), str_r(r2)); 1220 break; 1221 1222 case INDEX_op_deposit_i32: 1223 case INDEX_op_deposit_i64: 1224 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1225 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1226 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1227 break; 1228 1229 case INDEX_op_extract_i32: 1230 case INDEX_op_extract_i64: 1231 case INDEX_op_sextract_i32: 1232 case INDEX_op_sextract_i64: 1233 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1234 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1235 op_name, str_r(r0), str_r(r1), pos, len); 1236 break; 1237 1238 case INDEX_op_movcond_i32: 1239 case INDEX_op_movcond_i64: 1240 case INDEX_op_setcond2_i32: 1241 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1242 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1243 op_name, str_r(r0), str_r(r1), str_r(r2), 1244 str_r(r3), str_r(r4), str_c(c)); 1245 break; 1246 1247 case INDEX_op_mulu2_i32: 1248 case INDEX_op_mulu2_i64: 1249 case INDEX_op_muls2_i32: 1250 case INDEX_op_muls2_i64: 1251 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1252 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1253 op_name, str_r(r0), str_r(r1), 1254 str_r(r2), str_r(r3)); 1255 break; 1256 1257 case INDEX_op_add2_i32: 1258 case INDEX_op_add2_i64: 1259 case INDEX_op_sub2_i32: 1260 case INDEX_op_sub2_i64: 1261 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5); 1262 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1263 op_name, str_r(r0), str_r(r1), str_r(r2), 1264 str_r(r3), str_r(r4), str_r(r5)); 1265 break; 1266 1267 case INDEX_op_qemu_ld_a32_i32: 1268 case INDEX_op_qemu_st_a32_i32: 1269 len = 1 + 1; 1270 goto do_qemu_ldst; 1271 case INDEX_op_qemu_ld_a32_i64: 1272 case INDEX_op_qemu_st_a32_i64: 1273 case INDEX_op_qemu_ld_a64_i32: 1274 case INDEX_op_qemu_st_a64_i32: 1275 len = 1 + DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1276 goto do_qemu_ldst; 1277 case INDEX_op_qemu_ld_a64_i64: 1278 case INDEX_op_qemu_st_a64_i64: 1279 len = 2 * DIV_ROUND_UP(64, TCG_TARGET_REG_BITS); 1280 goto do_qemu_ldst; 1281 do_qemu_ldst: 1282 switch (len) { 1283 case 2: 1284 tci_args_rrm(insn, &r0, &r1, &oi); 1285 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1286 op_name, str_r(r0), str_r(r1), oi); 1287 break; 1288 case 3: 1289 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1290 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1291 op_name, str_r(r0), str_r(r1), 1292 str_r(r2), str_r(r3)); 1293 break; 1294 case 4: 1295 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4); 1296 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s", 1297 op_name, str_r(r0), str_r(r1), 1298 str_r(r2), str_r(r3), str_r(r4)); 1299 break; 1300 default: 1301 g_assert_not_reached(); 1302 } 1303 break; 1304 1305 case 0: 1306 /* tcg_out_nop_fill uses zeros */ 1307 if (insn == 0) { 1308 info->fprintf_func(info->stream, "align"); 1309 break; 1310 } 1311 /* fall through */ 1312 1313 default: 1314 info->fprintf_func(info->stream, "illegal opcode %d", op); 1315 break; 1316 } 1317 1318 return sizeof(insn); 1319 } 1320