1 /* 2 * Tiny Code Interpreter for QEMU 3 * 4 * Copyright (c) 2009, 2011, 2016 Stefan Weil 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "tcg/tcg.h" 22 #include "tcg/helper-info.h" 23 #include "tcg/tcg-ldst.h" 24 #include "disas/dis-asm.h" 25 #include "tcg-has.h" 26 #include <ffi.h> 27 28 29 #define ctpop_tr glue(ctpop, TCG_TARGET_REG_BITS) 30 #define deposit_tr glue(deposit, TCG_TARGET_REG_BITS) 31 #define extract_tr glue(extract, TCG_TARGET_REG_BITS) 32 #define sextract_tr glue(sextract, TCG_TARGET_REG_BITS) 33 34 /* 35 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined). 36 * Without assertions, the interpreter runs much faster. 37 */ 38 #if defined(CONFIG_DEBUG_TCG) 39 # define tci_assert(cond) assert(cond) 40 #else 41 # define tci_assert(cond) ((void)(cond)) 42 #endif 43 44 __thread uintptr_t tci_tb_ptr; 45 46 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, 47 uint32_t low_index, uint64_t value) 48 { 49 regs[low_index] = (uint32_t)value; 50 regs[high_index] = value >> 32; 51 } 52 53 /* Create a 64 bit value from two 32 bit values. */ 54 static uint64_t tci_uint64(uint32_t high, uint32_t low) 55 { 56 return ((uint64_t)high << 32) + low; 57 } 58 59 /* 60 * Load sets of arguments all at once. The naming convention is: 61 * tci_args_<arguments> 62 * where arguments is a sequence of 63 * 64 * b = immediate (bit position) 65 * c = condition (TCGCond) 66 * i = immediate (uint32_t) 67 * I = immediate (tcg_target_ulong) 68 * l = label or pointer 69 * m = immediate (MemOpIdx) 70 * n = immediate (call return length) 71 * r = register 72 * s = signed ldst offset 73 */ 74 75 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0) 76 { 77 int diff = sextract32(insn, 12, 20); 78 *l0 = diff ? (void *)tb_ptr + diff : NULL; 79 } 80 81 static void tci_args_r(uint32_t insn, TCGReg *r0) 82 { 83 *r0 = extract32(insn, 8, 4); 84 } 85 86 static void tci_args_nl(uint32_t insn, const void *tb_ptr, 87 uint8_t *n0, void **l1) 88 { 89 *n0 = extract32(insn, 8, 4); 90 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 91 } 92 93 static void tci_args_rl(uint32_t insn, const void *tb_ptr, 94 TCGReg *r0, void **l1) 95 { 96 *r0 = extract32(insn, 8, 4); 97 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr; 98 } 99 100 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1) 101 { 102 *r0 = extract32(insn, 8, 4); 103 *r1 = extract32(insn, 12, 4); 104 } 105 106 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) 107 { 108 *r0 = extract32(insn, 8, 4); 109 *i1 = sextract32(insn, 12, 20); 110 } 111 112 static void tci_args_rrm(uint32_t insn, TCGReg *r0, 113 TCGReg *r1, MemOpIdx *m2) 114 { 115 *r0 = extract32(insn, 8, 4); 116 *r1 = extract32(insn, 12, 4); 117 *m2 = extract32(insn, 16, 16); 118 } 119 120 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2) 121 { 122 *r0 = extract32(insn, 8, 4); 123 *r1 = extract32(insn, 12, 4); 124 *r2 = extract32(insn, 16, 4); 125 } 126 127 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2) 128 { 129 *r0 = extract32(insn, 8, 4); 130 *r1 = extract32(insn, 12, 4); 131 *i2 = sextract32(insn, 16, 16); 132 } 133 134 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 135 uint8_t *i2, uint8_t *i3) 136 { 137 *r0 = extract32(insn, 8, 4); 138 *r1 = extract32(insn, 12, 4); 139 *i2 = extract32(insn, 16, 6); 140 *i3 = extract32(insn, 22, 6); 141 } 142 143 static void tci_args_rrrc(uint32_t insn, 144 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3) 145 { 146 *r0 = extract32(insn, 8, 4); 147 *r1 = extract32(insn, 12, 4); 148 *r2 = extract32(insn, 16, 4); 149 *c3 = extract32(insn, 20, 4); 150 } 151 152 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1, 153 TCGReg *r2, uint8_t *i3, uint8_t *i4) 154 { 155 *r0 = extract32(insn, 8, 4); 156 *r1 = extract32(insn, 12, 4); 157 *r2 = extract32(insn, 16, 4); 158 *i3 = extract32(insn, 20, 6); 159 *i4 = extract32(insn, 26, 6); 160 } 161 162 static void tci_args_rrrr(uint32_t insn, 163 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) 164 { 165 *r0 = extract32(insn, 8, 4); 166 *r1 = extract32(insn, 12, 4); 167 *r2 = extract32(insn, 16, 4); 168 *r3 = extract32(insn, 20, 4); 169 } 170 171 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1, 172 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5) 173 { 174 *r0 = extract32(insn, 8, 4); 175 *r1 = extract32(insn, 12, 4); 176 *r2 = extract32(insn, 16, 4); 177 *r3 = extract32(insn, 20, 4); 178 *r4 = extract32(insn, 24, 4); 179 *c5 = extract32(insn, 28, 4); 180 } 181 182 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition) 183 { 184 bool result = false; 185 int32_t i0 = u0; 186 int32_t i1 = u1; 187 switch (condition) { 188 case TCG_COND_EQ: 189 result = (u0 == u1); 190 break; 191 case TCG_COND_NE: 192 result = (u0 != u1); 193 break; 194 case TCG_COND_LT: 195 result = (i0 < i1); 196 break; 197 case TCG_COND_GE: 198 result = (i0 >= i1); 199 break; 200 case TCG_COND_LE: 201 result = (i0 <= i1); 202 break; 203 case TCG_COND_GT: 204 result = (i0 > i1); 205 break; 206 case TCG_COND_LTU: 207 result = (u0 < u1); 208 break; 209 case TCG_COND_GEU: 210 result = (u0 >= u1); 211 break; 212 case TCG_COND_LEU: 213 result = (u0 <= u1); 214 break; 215 case TCG_COND_GTU: 216 result = (u0 > u1); 217 break; 218 case TCG_COND_TSTEQ: 219 result = (u0 & u1) == 0; 220 break; 221 case TCG_COND_TSTNE: 222 result = (u0 & u1) != 0; 223 break; 224 default: 225 g_assert_not_reached(); 226 } 227 return result; 228 } 229 230 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) 231 { 232 bool result = false; 233 int64_t i0 = u0; 234 int64_t i1 = u1; 235 switch (condition) { 236 case TCG_COND_EQ: 237 result = (u0 == u1); 238 break; 239 case TCG_COND_NE: 240 result = (u0 != u1); 241 break; 242 case TCG_COND_LT: 243 result = (i0 < i1); 244 break; 245 case TCG_COND_GE: 246 result = (i0 >= i1); 247 break; 248 case TCG_COND_LE: 249 result = (i0 <= i1); 250 break; 251 case TCG_COND_GT: 252 result = (i0 > i1); 253 break; 254 case TCG_COND_LTU: 255 result = (u0 < u1); 256 break; 257 case TCG_COND_GEU: 258 result = (u0 >= u1); 259 break; 260 case TCG_COND_LEU: 261 result = (u0 <= u1); 262 break; 263 case TCG_COND_GTU: 264 result = (u0 > u1); 265 break; 266 case TCG_COND_TSTEQ: 267 result = (u0 & u1) == 0; 268 break; 269 case TCG_COND_TSTNE: 270 result = (u0 & u1) != 0; 271 break; 272 default: 273 g_assert_not_reached(); 274 } 275 return result; 276 } 277 278 static uint64_t tci_qemu_ld(CPUArchState *env, uint64_t taddr, 279 MemOpIdx oi, const void *tb_ptr) 280 { 281 MemOp mop = get_memop(oi); 282 uintptr_t ra = (uintptr_t)tb_ptr; 283 284 switch (mop & MO_SSIZE) { 285 case MO_UB: 286 return helper_ldub_mmu(env, taddr, oi, ra); 287 case MO_SB: 288 return helper_ldsb_mmu(env, taddr, oi, ra); 289 case MO_UW: 290 return helper_lduw_mmu(env, taddr, oi, ra); 291 case MO_SW: 292 return helper_ldsw_mmu(env, taddr, oi, ra); 293 case MO_UL: 294 return helper_ldul_mmu(env, taddr, oi, ra); 295 case MO_SL: 296 return helper_ldsl_mmu(env, taddr, oi, ra); 297 case MO_UQ: 298 return helper_ldq_mmu(env, taddr, oi, ra); 299 default: 300 g_assert_not_reached(); 301 } 302 } 303 304 static void tci_qemu_st(CPUArchState *env, uint64_t taddr, uint64_t val, 305 MemOpIdx oi, const void *tb_ptr) 306 { 307 MemOp mop = get_memop(oi); 308 uintptr_t ra = (uintptr_t)tb_ptr; 309 310 switch (mop & MO_SIZE) { 311 case MO_UB: 312 helper_stb_mmu(env, taddr, val, oi, ra); 313 break; 314 case MO_UW: 315 helper_stw_mmu(env, taddr, val, oi, ra); 316 break; 317 case MO_UL: 318 helper_stl_mmu(env, taddr, val, oi, ra); 319 break; 320 case MO_UQ: 321 helper_stq_mmu(env, taddr, val, oi, ra); 322 break; 323 default: 324 g_assert_not_reached(); 325 } 326 } 327 328 #if TCG_TARGET_REG_BITS == 64 329 # define CASE_32_64(x) \ 330 case glue(glue(INDEX_op_, x), _i64): \ 331 case glue(glue(INDEX_op_, x), _i32): 332 # define CASE_64(x) \ 333 case glue(glue(INDEX_op_, x), _i64): 334 #else 335 # define CASE_32_64(x) \ 336 case glue(glue(INDEX_op_, x), _i32): 337 # define CASE_64(x) 338 #endif 339 340 /* Interpret pseudo code in tb. */ 341 /* 342 * Disable CFI checks. 343 * One possible operation in the pseudo code is a call to binary code. 344 * Therefore, disable CFI checks in the interpreter function 345 */ 346 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, 347 const void *v_tb_ptr) 348 { 349 const uint32_t *tb_ptr = v_tb_ptr; 350 tcg_target_ulong regs[TCG_TARGET_NB_REGS]; 351 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE) 352 / sizeof(uint64_t)]; 353 bool carry = false; 354 355 regs[TCG_AREG0] = (tcg_target_ulong)env; 356 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack; 357 tci_assert(tb_ptr); 358 359 for (;;) { 360 uint32_t insn; 361 TCGOpcode opc; 362 TCGReg r0, r1, r2, r3, r4; 363 tcg_target_ulong t1; 364 TCGCond condition; 365 uint8_t pos, len; 366 uint32_t tmp32; 367 uint64_t tmp64, taddr; 368 MemOpIdx oi; 369 int32_t ofs; 370 void *ptr; 371 372 insn = *tb_ptr++; 373 opc = extract32(insn, 0, 8); 374 375 switch (opc) { 376 case INDEX_op_call: 377 { 378 void *call_slots[MAX_CALL_IARGS]; 379 ffi_cif *cif; 380 void *func; 381 unsigned i, s, n; 382 383 tci_args_nl(insn, tb_ptr, &len, &ptr); 384 func = ((void **)ptr)[0]; 385 cif = ((void **)ptr)[1]; 386 387 n = cif->nargs; 388 for (i = s = 0; i < n; ++i) { 389 ffi_type *t = cif->arg_types[i]; 390 call_slots[i] = &stack[s]; 391 s += DIV_ROUND_UP(t->size, 8); 392 } 393 394 /* Helper functions may need to access the "return address" */ 395 tci_tb_ptr = (uintptr_t)tb_ptr; 396 ffi_call(cif, func, stack, call_slots); 397 } 398 399 switch (len) { 400 case 0: /* void */ 401 break; 402 case 1: /* uint32_t */ 403 /* 404 * The result winds up "left-aligned" in the stack[0] slot. 405 * Note that libffi has an odd special case in that it will 406 * always widen an integral result to ffi_arg. 407 */ 408 if (sizeof(ffi_arg) == 8) { 409 regs[TCG_REG_R0] = (uint32_t)stack[0]; 410 } else { 411 regs[TCG_REG_R0] = *(uint32_t *)stack; 412 } 413 break; 414 case 2: /* uint64_t */ 415 /* 416 * For TCG_TARGET_REG_BITS == 32, the register pair 417 * must stay in host memory order. 418 */ 419 memcpy(®s[TCG_REG_R0], stack, 8); 420 break; 421 case 3: /* Int128 */ 422 memcpy(®s[TCG_REG_R0], stack, 16); 423 break; 424 default: 425 g_assert_not_reached(); 426 } 427 break; 428 429 case INDEX_op_br: 430 tci_args_l(insn, tb_ptr, &ptr); 431 tb_ptr = ptr; 432 continue; 433 #if TCG_TARGET_REG_BITS == 32 434 case INDEX_op_setcond2_i32: 435 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 436 regs[r0] = tci_compare64(tci_uint64(regs[r2], regs[r1]), 437 tci_uint64(regs[r4], regs[r3]), 438 condition); 439 break; 440 #elif TCG_TARGET_REG_BITS == 64 441 case INDEX_op_setcond: 442 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 443 regs[r0] = tci_compare64(regs[r1], regs[r2], condition); 444 break; 445 case INDEX_op_movcond: 446 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 447 tmp32 = tci_compare64(regs[r1], regs[r2], condition); 448 regs[r0] = regs[tmp32 ? r3 : r4]; 449 break; 450 #endif 451 case INDEX_op_mov: 452 tci_args_rr(insn, &r0, &r1); 453 regs[r0] = regs[r1]; 454 break; 455 case INDEX_op_tci_movi: 456 tci_args_ri(insn, &r0, &t1); 457 regs[r0] = t1; 458 break; 459 case INDEX_op_tci_movl: 460 tci_args_rl(insn, tb_ptr, &r0, &ptr); 461 regs[r0] = *(tcg_target_ulong *)ptr; 462 break; 463 case INDEX_op_tci_setcarry: 464 carry = true; 465 break; 466 467 /* Load/store operations (32 bit). */ 468 469 CASE_32_64(ld8u) 470 tci_args_rrs(insn, &r0, &r1, &ofs); 471 ptr = (void *)(regs[r1] + ofs); 472 regs[r0] = *(uint8_t *)ptr; 473 break; 474 CASE_32_64(ld8s) 475 tci_args_rrs(insn, &r0, &r1, &ofs); 476 ptr = (void *)(regs[r1] + ofs); 477 regs[r0] = *(int8_t *)ptr; 478 break; 479 CASE_32_64(ld16u) 480 tci_args_rrs(insn, &r0, &r1, &ofs); 481 ptr = (void *)(regs[r1] + ofs); 482 regs[r0] = *(uint16_t *)ptr; 483 break; 484 CASE_32_64(ld16s) 485 tci_args_rrs(insn, &r0, &r1, &ofs); 486 ptr = (void *)(regs[r1] + ofs); 487 regs[r0] = *(int16_t *)ptr; 488 break; 489 case INDEX_op_ld_i32: 490 CASE_64(ld32u) 491 tci_args_rrs(insn, &r0, &r1, &ofs); 492 ptr = (void *)(regs[r1] + ofs); 493 regs[r0] = *(uint32_t *)ptr; 494 break; 495 CASE_32_64(st8) 496 tci_args_rrs(insn, &r0, &r1, &ofs); 497 ptr = (void *)(regs[r1] + ofs); 498 *(uint8_t *)ptr = regs[r0]; 499 break; 500 CASE_32_64(st16) 501 tci_args_rrs(insn, &r0, &r1, &ofs); 502 ptr = (void *)(regs[r1] + ofs); 503 *(uint16_t *)ptr = regs[r0]; 504 break; 505 case INDEX_op_st_i32: 506 CASE_64(st32) 507 tci_args_rrs(insn, &r0, &r1, &ofs); 508 ptr = (void *)(regs[r1] + ofs); 509 *(uint32_t *)ptr = regs[r0]; 510 break; 511 512 /* Arithmetic operations (mixed 32/64 bit). */ 513 514 case INDEX_op_add: 515 tci_args_rrr(insn, &r0, &r1, &r2); 516 regs[r0] = regs[r1] + regs[r2]; 517 break; 518 case INDEX_op_sub: 519 tci_args_rrr(insn, &r0, &r1, &r2); 520 regs[r0] = regs[r1] - regs[r2]; 521 break; 522 case INDEX_op_mul: 523 tci_args_rrr(insn, &r0, &r1, &r2); 524 regs[r0] = regs[r1] * regs[r2]; 525 break; 526 case INDEX_op_and: 527 tci_args_rrr(insn, &r0, &r1, &r2); 528 regs[r0] = regs[r1] & regs[r2]; 529 break; 530 case INDEX_op_or: 531 tci_args_rrr(insn, &r0, &r1, &r2); 532 regs[r0] = regs[r1] | regs[r2]; 533 break; 534 case INDEX_op_xor: 535 tci_args_rrr(insn, &r0, &r1, &r2); 536 regs[r0] = regs[r1] ^ regs[r2]; 537 break; 538 case INDEX_op_andc: 539 tci_args_rrr(insn, &r0, &r1, &r2); 540 regs[r0] = regs[r1] & ~regs[r2]; 541 break; 542 case INDEX_op_orc: 543 tci_args_rrr(insn, &r0, &r1, &r2); 544 regs[r0] = regs[r1] | ~regs[r2]; 545 break; 546 case INDEX_op_eqv: 547 tci_args_rrr(insn, &r0, &r1, &r2); 548 regs[r0] = ~(regs[r1] ^ regs[r2]); 549 break; 550 case INDEX_op_nand: 551 tci_args_rrr(insn, &r0, &r1, &r2); 552 regs[r0] = ~(regs[r1] & regs[r2]); 553 break; 554 case INDEX_op_nor: 555 tci_args_rrr(insn, &r0, &r1, &r2); 556 regs[r0] = ~(regs[r1] | regs[r2]); 557 break; 558 case INDEX_op_neg: 559 tci_args_rr(insn, &r0, &r1); 560 regs[r0] = -regs[r1]; 561 break; 562 case INDEX_op_not: 563 tci_args_rr(insn, &r0, &r1); 564 regs[r0] = ~regs[r1]; 565 break; 566 case INDEX_op_ctpop: 567 tci_args_rr(insn, &r0, &r1); 568 regs[r0] = ctpop_tr(regs[r1]); 569 break; 570 case INDEX_op_addco: 571 tci_args_rrr(insn, &r0, &r1, &r2); 572 t1 = regs[r1] + regs[r2]; 573 carry = t1 < regs[r1]; 574 regs[r0] = t1; 575 break; 576 case INDEX_op_addci: 577 tci_args_rrr(insn, &r0, &r1, &r2); 578 regs[r0] = regs[r1] + regs[r2] + carry; 579 break; 580 case INDEX_op_addcio: 581 tci_args_rrr(insn, &r0, &r1, &r2); 582 if (carry) { 583 t1 = regs[r1] + regs[r2] + 1; 584 carry = t1 <= regs[r1]; 585 } else { 586 t1 = regs[r1] + regs[r2]; 587 carry = t1 < regs[r1]; 588 } 589 regs[r0] = t1; 590 break; 591 case INDEX_op_subbo: 592 tci_args_rrr(insn, &r0, &r1, &r2); 593 carry = regs[r1] < regs[r2]; 594 regs[r0] = regs[r1] - regs[r2]; 595 break; 596 case INDEX_op_subbi: 597 tci_args_rrr(insn, &r0, &r1, &r2); 598 regs[r0] = regs[r1] - regs[r2] - carry; 599 break; 600 case INDEX_op_subbio: 601 tci_args_rrr(insn, &r0, &r1, &r2); 602 if (carry) { 603 carry = regs[r1] <= regs[r2]; 604 regs[r0] = regs[r1] - regs[r2] - 1; 605 } else { 606 carry = regs[r1] < regs[r2]; 607 regs[r0] = regs[r1] - regs[r2]; 608 } 609 break; 610 case INDEX_op_muls2: 611 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 612 #if TCG_TARGET_REG_BITS == 32 613 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3]; 614 tci_write_reg64(regs, r1, r0, tmp64); 615 #else 616 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]); 617 #endif 618 break; 619 case INDEX_op_mulu2: 620 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 621 #if TCG_TARGET_REG_BITS == 32 622 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3]; 623 tci_write_reg64(regs, r1, r0, tmp64); 624 #else 625 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]); 626 #endif 627 break; 628 629 /* Arithmetic operations (32 bit). */ 630 631 case INDEX_op_tci_divs32: 632 tci_args_rrr(insn, &r0, &r1, &r2); 633 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2]; 634 break; 635 case INDEX_op_tci_divu32: 636 tci_args_rrr(insn, &r0, &r1, &r2); 637 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2]; 638 break; 639 case INDEX_op_tci_rems32: 640 tci_args_rrr(insn, &r0, &r1, &r2); 641 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2]; 642 break; 643 case INDEX_op_tci_remu32: 644 tci_args_rrr(insn, &r0, &r1, &r2); 645 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2]; 646 break; 647 case INDEX_op_tci_clz32: 648 tci_args_rrr(insn, &r0, &r1, &r2); 649 tmp32 = regs[r1]; 650 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2]; 651 break; 652 case INDEX_op_tci_ctz32: 653 tci_args_rrr(insn, &r0, &r1, &r2); 654 tmp32 = regs[r1]; 655 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2]; 656 break; 657 case INDEX_op_tci_setcond32: 658 tci_args_rrrc(insn, &r0, &r1, &r2, &condition); 659 regs[r0] = tci_compare32(regs[r1], regs[r2], condition); 660 break; 661 case INDEX_op_tci_movcond32: 662 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition); 663 tmp32 = tci_compare32(regs[r1], regs[r2], condition); 664 regs[r0] = regs[tmp32 ? r3 : r4]; 665 break; 666 667 /* Shift/rotate operations. */ 668 669 case INDEX_op_shl: 670 tci_args_rrr(insn, &r0, &r1, &r2); 671 regs[r0] = regs[r1] << (regs[r2] % TCG_TARGET_REG_BITS); 672 break; 673 case INDEX_op_shr: 674 tci_args_rrr(insn, &r0, &r1, &r2); 675 regs[r0] = regs[r1] >> (regs[r2] % TCG_TARGET_REG_BITS); 676 break; 677 case INDEX_op_sar: 678 tci_args_rrr(insn, &r0, &r1, &r2); 679 regs[r0] = ((tcg_target_long)regs[r1] 680 >> (regs[r2] % TCG_TARGET_REG_BITS)); 681 break; 682 case INDEX_op_tci_rotl32: 683 tci_args_rrr(insn, &r0, &r1, &r2); 684 regs[r0] = rol32(regs[r1], regs[r2] & 31); 685 break; 686 case INDEX_op_tci_rotr32: 687 tci_args_rrr(insn, &r0, &r1, &r2); 688 regs[r0] = ror32(regs[r1], regs[r2] & 31); 689 break; 690 case INDEX_op_deposit: 691 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 692 regs[r0] = deposit_tr(regs[r1], pos, len, regs[r2]); 693 break; 694 case INDEX_op_extract: 695 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 696 regs[r0] = extract_tr(regs[r1], pos, len); 697 break; 698 case INDEX_op_sextract: 699 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 700 regs[r0] = sextract_tr(regs[r1], pos, len); 701 break; 702 case INDEX_op_brcond: 703 tci_args_rl(insn, tb_ptr, &r0, &ptr); 704 if (regs[r0]) { 705 tb_ptr = ptr; 706 } 707 break; 708 case INDEX_op_bswap16: 709 tci_args_rr(insn, &r0, &r1); 710 regs[r0] = bswap16(regs[r1]); 711 break; 712 case INDEX_op_bswap32: 713 tci_args_rr(insn, &r0, &r1); 714 regs[r0] = bswap32(regs[r1]); 715 break; 716 #if TCG_TARGET_REG_BITS == 64 717 /* Load/store operations (64 bit). */ 718 719 case INDEX_op_ld32s_i64: 720 tci_args_rrs(insn, &r0, &r1, &ofs); 721 ptr = (void *)(regs[r1] + ofs); 722 regs[r0] = *(int32_t *)ptr; 723 break; 724 case INDEX_op_ld_i64: 725 tci_args_rrs(insn, &r0, &r1, &ofs); 726 ptr = (void *)(regs[r1] + ofs); 727 regs[r0] = *(uint64_t *)ptr; 728 break; 729 case INDEX_op_st_i64: 730 tci_args_rrs(insn, &r0, &r1, &ofs); 731 ptr = (void *)(regs[r1] + ofs); 732 *(uint64_t *)ptr = regs[r0]; 733 break; 734 735 /* Arithmetic operations (64 bit). */ 736 737 case INDEX_op_divs: 738 tci_args_rrr(insn, &r0, &r1, &r2); 739 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; 740 break; 741 case INDEX_op_divu: 742 tci_args_rrr(insn, &r0, &r1, &r2); 743 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2]; 744 break; 745 case INDEX_op_rems: 746 tci_args_rrr(insn, &r0, &r1, &r2); 747 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2]; 748 break; 749 case INDEX_op_remu: 750 tci_args_rrr(insn, &r0, &r1, &r2); 751 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2]; 752 break; 753 case INDEX_op_clz: 754 tci_args_rrr(insn, &r0, &r1, &r2); 755 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2]; 756 break; 757 case INDEX_op_ctz: 758 tci_args_rrr(insn, &r0, &r1, &r2); 759 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2]; 760 break; 761 762 /* Shift/rotate operations (64 bit). */ 763 764 case INDEX_op_rotl: 765 tci_args_rrr(insn, &r0, &r1, &r2); 766 regs[r0] = rol64(regs[r1], regs[r2] & 63); 767 break; 768 case INDEX_op_rotr: 769 tci_args_rrr(insn, &r0, &r1, &r2); 770 regs[r0] = ror64(regs[r1], regs[r2] & 63); 771 break; 772 case INDEX_op_ext_i32_i64: 773 tci_args_rr(insn, &r0, &r1); 774 regs[r0] = (int32_t)regs[r1]; 775 break; 776 case INDEX_op_extu_i32_i64: 777 tci_args_rr(insn, &r0, &r1); 778 regs[r0] = (uint32_t)regs[r1]; 779 break; 780 case INDEX_op_bswap64: 781 tci_args_rr(insn, &r0, &r1); 782 regs[r0] = bswap64(regs[r1]); 783 break; 784 #endif /* TCG_TARGET_REG_BITS == 64 */ 785 786 /* QEMU specific operations. */ 787 788 case INDEX_op_exit_tb: 789 tci_args_l(insn, tb_ptr, &ptr); 790 return (uintptr_t)ptr; 791 792 case INDEX_op_goto_tb: 793 tci_args_l(insn, tb_ptr, &ptr); 794 tb_ptr = *(void **)ptr; 795 break; 796 797 case INDEX_op_goto_ptr: 798 tci_args_r(insn, &r0); 799 ptr = (void *)regs[r0]; 800 if (!ptr) { 801 return 0; 802 } 803 tb_ptr = ptr; 804 break; 805 806 case INDEX_op_qemu_ld_i32: 807 tci_args_rrm(insn, &r0, &r1, &oi); 808 taddr = regs[r1]; 809 regs[r0] = tci_qemu_ld(env, taddr, oi, tb_ptr); 810 break; 811 812 case INDEX_op_qemu_ld_i64: 813 if (TCG_TARGET_REG_BITS == 64) { 814 tci_args_rrm(insn, &r0, &r1, &oi); 815 taddr = regs[r1]; 816 } else { 817 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 818 taddr = regs[r2]; 819 oi = regs[r3]; 820 } 821 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr); 822 if (TCG_TARGET_REG_BITS == 32) { 823 tci_write_reg64(regs, r1, r0, tmp64); 824 } else { 825 regs[r0] = tmp64; 826 } 827 break; 828 829 case INDEX_op_qemu_st_i32: 830 tci_args_rrm(insn, &r0, &r1, &oi); 831 taddr = regs[r1]; 832 tci_qemu_st(env, taddr, regs[r0], oi, tb_ptr); 833 break; 834 835 case INDEX_op_qemu_st_i64: 836 if (TCG_TARGET_REG_BITS == 64) { 837 tci_args_rrm(insn, &r0, &r1, &oi); 838 tmp64 = regs[r0]; 839 taddr = regs[r1]; 840 } else { 841 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 842 tmp64 = tci_uint64(regs[r1], regs[r0]); 843 taddr = regs[r2]; 844 oi = regs[r3]; 845 } 846 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr); 847 break; 848 849 case INDEX_op_mb: 850 /* Ensure ordering for all kinds */ 851 smp_mb(); 852 break; 853 default: 854 g_assert_not_reached(); 855 } 856 } 857 } 858 859 /* 860 * Disassembler that matches the interpreter 861 */ 862 863 static const char *str_r(TCGReg r) 864 { 865 static const char regs[TCG_TARGET_NB_REGS][4] = { 866 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 867 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp" 868 }; 869 870 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14); 871 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15); 872 873 assert((unsigned)r < TCG_TARGET_NB_REGS); 874 return regs[r]; 875 } 876 877 static const char *str_c(TCGCond c) 878 { 879 static const char cond[16][8] = { 880 [TCG_COND_NEVER] = "never", 881 [TCG_COND_ALWAYS] = "always", 882 [TCG_COND_EQ] = "eq", 883 [TCG_COND_NE] = "ne", 884 [TCG_COND_LT] = "lt", 885 [TCG_COND_GE] = "ge", 886 [TCG_COND_LE] = "le", 887 [TCG_COND_GT] = "gt", 888 [TCG_COND_LTU] = "ltu", 889 [TCG_COND_GEU] = "geu", 890 [TCG_COND_LEU] = "leu", 891 [TCG_COND_GTU] = "gtu", 892 [TCG_COND_TSTEQ] = "tsteq", 893 [TCG_COND_TSTNE] = "tstne", 894 }; 895 896 assert((unsigned)c < ARRAY_SIZE(cond)); 897 assert(cond[c][0] != 0); 898 return cond[c]; 899 } 900 901 /* Disassemble TCI bytecode. */ 902 int print_insn_tci(bfd_vma addr, disassemble_info *info) 903 { 904 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr; 905 const TCGOpDef *def; 906 const char *op_name; 907 uint32_t insn; 908 TCGOpcode op; 909 TCGReg r0, r1, r2, r3, r4; 910 tcg_target_ulong i1; 911 int32_t s2; 912 TCGCond c; 913 MemOpIdx oi; 914 uint8_t pos, len; 915 void *ptr; 916 917 /* TCI is always the host, so we don't need to load indirect. */ 918 insn = *tb_ptr++; 919 920 info->fprintf_func(info->stream, "%08x ", insn); 921 922 op = extract32(insn, 0, 8); 923 def = &tcg_op_defs[op]; 924 op_name = def->name; 925 926 switch (op) { 927 case INDEX_op_br: 928 case INDEX_op_exit_tb: 929 case INDEX_op_goto_tb: 930 tci_args_l(insn, tb_ptr, &ptr); 931 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr); 932 break; 933 934 case INDEX_op_goto_ptr: 935 tci_args_r(insn, &r0); 936 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0)); 937 break; 938 939 case INDEX_op_call: 940 tci_args_nl(insn, tb_ptr, &len, &ptr); 941 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr); 942 break; 943 944 case INDEX_op_brcond: 945 tci_args_rl(insn, tb_ptr, &r0, &ptr); 946 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p", 947 op_name, str_r(r0), ptr); 948 break; 949 950 case INDEX_op_setcond: 951 case INDEX_op_tci_setcond32: 952 tci_args_rrrc(insn, &r0, &r1, &r2, &c); 953 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 954 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c)); 955 break; 956 957 case INDEX_op_tci_movi: 958 tci_args_ri(insn, &r0, &i1); 959 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx, 960 op_name, str_r(r0), i1); 961 break; 962 963 case INDEX_op_tci_movl: 964 tci_args_rl(insn, tb_ptr, &r0, &ptr); 965 info->fprintf_func(info->stream, "%-12s %s, %p", 966 op_name, str_r(r0), ptr); 967 break; 968 969 case INDEX_op_tci_setcarry: 970 info->fprintf_func(info->stream, "%-12s", op_name); 971 break; 972 973 case INDEX_op_ld8u_i32: 974 case INDEX_op_ld8u_i64: 975 case INDEX_op_ld8s_i32: 976 case INDEX_op_ld8s_i64: 977 case INDEX_op_ld16u_i32: 978 case INDEX_op_ld16u_i64: 979 case INDEX_op_ld16s_i32: 980 case INDEX_op_ld16s_i64: 981 case INDEX_op_ld32u_i64: 982 case INDEX_op_ld32s_i64: 983 case INDEX_op_ld_i32: 984 case INDEX_op_ld_i64: 985 case INDEX_op_st8_i32: 986 case INDEX_op_st8_i64: 987 case INDEX_op_st16_i32: 988 case INDEX_op_st16_i64: 989 case INDEX_op_st32_i64: 990 case INDEX_op_st_i32: 991 case INDEX_op_st_i64: 992 tci_args_rrs(insn, &r0, &r1, &s2); 993 info->fprintf_func(info->stream, "%-12s %s, %s, %d", 994 op_name, str_r(r0), str_r(r1), s2); 995 break; 996 997 case INDEX_op_bswap16: 998 case INDEX_op_bswap32: 999 case INDEX_op_ctpop: 1000 case INDEX_op_mov: 1001 case INDEX_op_neg: 1002 case INDEX_op_not: 1003 case INDEX_op_ext_i32_i64: 1004 case INDEX_op_extu_i32_i64: 1005 case INDEX_op_bswap64: 1006 tci_args_rr(insn, &r0, &r1); 1007 info->fprintf_func(info->stream, "%-12s %s, %s", 1008 op_name, str_r(r0), str_r(r1)); 1009 break; 1010 1011 case INDEX_op_add: 1012 case INDEX_op_addci: 1013 case INDEX_op_addcio: 1014 case INDEX_op_addco: 1015 case INDEX_op_and: 1016 case INDEX_op_andc: 1017 case INDEX_op_clz: 1018 case INDEX_op_ctz: 1019 case INDEX_op_divs: 1020 case INDEX_op_divu: 1021 case INDEX_op_eqv: 1022 case INDEX_op_mul: 1023 case INDEX_op_nand: 1024 case INDEX_op_nor: 1025 case INDEX_op_or: 1026 case INDEX_op_orc: 1027 case INDEX_op_rems: 1028 case INDEX_op_remu: 1029 case INDEX_op_rotl: 1030 case INDEX_op_rotr: 1031 case INDEX_op_sar: 1032 case INDEX_op_shl: 1033 case INDEX_op_shr: 1034 case INDEX_op_sub: 1035 case INDEX_op_subbi: 1036 case INDEX_op_subbio: 1037 case INDEX_op_subbo: 1038 case INDEX_op_xor: 1039 case INDEX_op_tci_ctz32: 1040 case INDEX_op_tci_clz32: 1041 case INDEX_op_tci_divs32: 1042 case INDEX_op_tci_divu32: 1043 case INDEX_op_tci_rems32: 1044 case INDEX_op_tci_remu32: 1045 case INDEX_op_tci_rotl32: 1046 case INDEX_op_tci_rotr32: 1047 tci_args_rrr(insn, &r0, &r1, &r2); 1048 info->fprintf_func(info->stream, "%-12s %s, %s, %s", 1049 op_name, str_r(r0), str_r(r1), str_r(r2)); 1050 break; 1051 1052 case INDEX_op_deposit: 1053 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len); 1054 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d", 1055 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len); 1056 break; 1057 1058 case INDEX_op_extract: 1059 case INDEX_op_sextract: 1060 tci_args_rrbb(insn, &r0, &r1, &pos, &len); 1061 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d", 1062 op_name, str_r(r0), str_r(r1), pos, len); 1063 break; 1064 1065 case INDEX_op_tci_movcond32: 1066 case INDEX_op_movcond: 1067 case INDEX_op_setcond2_i32: 1068 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c); 1069 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s", 1070 op_name, str_r(r0), str_r(r1), str_r(r2), 1071 str_r(r3), str_r(r4), str_c(c)); 1072 break; 1073 1074 case INDEX_op_muls2: 1075 case INDEX_op_mulu2: 1076 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1077 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1078 op_name, str_r(r0), str_r(r1), 1079 str_r(r2), str_r(r3)); 1080 break; 1081 1082 case INDEX_op_qemu_ld_i64: 1083 case INDEX_op_qemu_st_i64: 1084 if (TCG_TARGET_REG_BITS == 32) { 1085 tci_args_rrrr(insn, &r0, &r1, &r2, &r3); 1086 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s", 1087 op_name, str_r(r0), str_r(r1), 1088 str_r(r2), str_r(r3)); 1089 break; 1090 } 1091 /* fall through */ 1092 case INDEX_op_qemu_ld_i32: 1093 case INDEX_op_qemu_st_i32: 1094 tci_args_rrm(insn, &r0, &r1, &oi); 1095 info->fprintf_func(info->stream, "%-12s %s, %s, %x", 1096 op_name, str_r(r0), str_r(r1), oi); 1097 break; 1098 1099 case 0: 1100 /* tcg_out_nop_fill uses zeros */ 1101 if (insn == 0) { 1102 info->fprintf_func(info->stream, "align"); 1103 break; 1104 } 1105 /* fall through */ 1106 1107 default: 1108 info->fprintf_func(info->stream, "illegal opcode %d", op); 1109 break; 1110 } 1111 1112 return sizeof(insn); 1113 } 1114