1 /* 2 * RX translation 3 * 4 * Copyright (c) 2019 Yoshinori Sato 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "qemu/bswap.h" 21 #include "qemu/qemu-print.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "tcg/tcg-op.h" 25 #include "exec/helper-proto.h" 26 #include "exec/helper-gen.h" 27 #include "exec/translator.h" 28 #include "exec/log.h" 29 30 #define HELPER_H "helper.h" 31 #include "exec/helper-info.c.inc" 32 #undef HELPER_H 33 34 35 typedef struct DisasContext { 36 DisasContextBase base; 37 CPURXState *env; 38 uint32_t pc; 39 uint32_t tb_flags; 40 } DisasContext; 41 42 typedef struct DisasCompare { 43 TCGv value; 44 TCGv temp; 45 TCGCond cond; 46 } DisasCompare; 47 48 const char *rx_crname(uint8_t cr) 49 { 50 static const char *cr_names[] = { 51 "psw", "pc", "usp", "fpsw", "", "", "", "", 52 "bpsw", "bpc", "isp", "fintv", "intb", "", "", "" 53 }; 54 if (cr >= ARRAY_SIZE(cr_names)) { 55 return "illegal"; 56 } 57 return cr_names[cr]; 58 } 59 60 /* Target-specific values for dc->base.is_jmp. */ 61 #define DISAS_JUMP DISAS_TARGET_0 62 #define DISAS_UPDATE DISAS_TARGET_1 63 #define DISAS_EXIT DISAS_TARGET_2 64 65 /* global register indexes */ 66 static TCGv cpu_regs[16]; 67 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c; 68 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl; 69 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp; 70 static TCGv cpu_fintv, cpu_intb, cpu_pc; 71 static TCGv_i64 cpu_acc; 72 73 #define cpu_sp cpu_regs[0] 74 75 /* decoder helper */ 76 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn, 77 int i, int n) 78 { 79 while (++i <= n) { 80 uint8_t b = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next++); 81 insn |= b << (32 - i * 8); 82 } 83 return insn; 84 } 85 86 static uint32_t li(DisasContext *ctx, int sz) 87 { 88 int32_t tmp, addr; 89 CPURXState *env = ctx->env; 90 addr = ctx->base.pc_next; 91 92 switch (sz) { 93 case 1: 94 ctx->base.pc_next += 1; 95 return (int8_t)translator_ldub(env, &ctx->base, addr); 96 case 2: 97 ctx->base.pc_next += 2; 98 return (int16_t)translator_lduw(env, &ctx->base, addr); 99 case 3: 100 ctx->base.pc_next += 3; 101 tmp = (int8_t)translator_ldub(env, &ctx->base, addr + 2); 102 tmp <<= 16; 103 tmp |= translator_lduw(env, &ctx->base, addr); 104 return tmp; 105 case 0: 106 ctx->base.pc_next += 4; 107 return translator_ldl(env, &ctx->base, addr); 108 default: 109 g_assert_not_reached(); 110 } 111 return 0; 112 } 113 114 static int bdsp_s(DisasContext *ctx, int d) 115 { 116 /* 117 * 0 -> 8 118 * 1 -> 9 119 * 2 -> 10 120 * 3 -> 3 121 * : 122 * 7 -> 7 123 */ 124 if (d < 3) { 125 d += 8; 126 } 127 return d; 128 } 129 130 /* Include the auto-generated decoder. */ 131 #include "decode-insns.c.inc" 132 133 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags) 134 { 135 CPURXState *env = cpu_env(cs); 136 int i; 137 uint32_t psw; 138 139 psw = rx_cpu_pack_psw(env); 140 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n", 141 env->pc, psw); 142 for (i = 0; i < 16; i += 4) { 143 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n", 144 i, env->regs[i], i + 1, env->regs[i + 1], 145 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]); 146 } 147 } 148 149 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) 150 { 151 if (translator_use_goto_tb(&dc->base, dest)) { 152 tcg_gen_goto_tb(n); 153 tcg_gen_movi_i32(cpu_pc, dest); 154 tcg_gen_exit_tb(dc->base.tb, n); 155 } else { 156 tcg_gen_movi_i32(cpu_pc, dest); 157 tcg_gen_lookup_and_goto_ptr(); 158 } 159 dc->base.is_jmp = DISAS_NORETURN; 160 } 161 162 /* generic load wrapper */ 163 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem) 164 { 165 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE); 166 } 167 168 /* unsigned load wrapper */ 169 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem) 170 { 171 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE); 172 } 173 174 /* generic store wrapper */ 175 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem) 176 { 177 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE); 178 } 179 180 /* [ri, rb] */ 181 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem, 182 int size, int ri, int rb) 183 { 184 tcg_gen_shli_i32(mem, cpu_regs[ri], size); 185 tcg_gen_add_i32(mem, mem, cpu_regs[rb]); 186 } 187 188 /* dsp[reg] */ 189 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem, 190 int ld, int size, int reg) 191 { 192 uint32_t dsp; 193 194 switch (ld) { 195 case 0: 196 return cpu_regs[reg]; 197 case 1: 198 dsp = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next) << size; 199 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp); 200 ctx->base.pc_next += 1; 201 return mem; 202 case 2: 203 dsp = translator_lduw(ctx->env, &ctx->base, ctx->base.pc_next) << size; 204 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp); 205 ctx->base.pc_next += 2; 206 return mem; 207 default: 208 g_assert_not_reached(); 209 } 210 } 211 212 static inline MemOp mi_to_mop(unsigned mi) 213 { 214 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB }; 215 tcg_debug_assert(mi < 5); 216 return mop[mi]; 217 } 218 219 /* load source operand */ 220 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem, 221 int ld, int mi, int rs) 222 { 223 TCGv addr; 224 MemOp mop; 225 if (ld < 3) { 226 mop = mi_to_mop(mi); 227 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs); 228 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE); 229 return mem; 230 } else { 231 return cpu_regs[rs]; 232 } 233 } 234 235 /* Processor mode check */ 236 static int is_privileged(DisasContext *ctx, int is_exception) 237 { 238 if (FIELD_EX32(ctx->tb_flags, PSW, PM)) { 239 if (is_exception) { 240 gen_helper_raise_privilege_violation(tcg_env); 241 } 242 return 0; 243 } else { 244 return 1; 245 } 246 } 247 248 /* generate QEMU condition */ 249 static void psw_cond(DisasCompare *dc, uint32_t cond) 250 { 251 tcg_debug_assert(cond < 16); 252 switch (cond) { 253 case 0: /* z */ 254 dc->cond = TCG_COND_EQ; 255 dc->value = cpu_psw_z; 256 break; 257 case 1: /* nz */ 258 dc->cond = TCG_COND_NE; 259 dc->value = cpu_psw_z; 260 break; 261 case 2: /* c */ 262 dc->cond = TCG_COND_NE; 263 dc->value = cpu_psw_c; 264 break; 265 case 3: /* nc */ 266 dc->cond = TCG_COND_EQ; 267 dc->value = cpu_psw_c; 268 break; 269 case 4: /* gtu (C& ~Z) == 1 */ 270 case 5: /* leu (C& ~Z) == 0 */ 271 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0); 272 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c); 273 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ; 274 dc->value = dc->temp; 275 break; 276 case 6: /* pz (S == 0) */ 277 dc->cond = TCG_COND_GE; 278 dc->value = cpu_psw_s; 279 break; 280 case 7: /* n (S == 1) */ 281 dc->cond = TCG_COND_LT; 282 dc->value = cpu_psw_s; 283 break; 284 case 8: /* ge (S^O)==0 */ 285 case 9: /* lt (S^O)==1 */ 286 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s); 287 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT; 288 dc->value = dc->temp; 289 break; 290 case 10: /* gt ((S^O)|Z)==0 */ 291 case 11: /* le ((S^O)|Z)==1 */ 292 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s); 293 tcg_gen_sari_i32(dc->temp, dc->temp, 31); 294 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp); 295 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ; 296 dc->value = dc->temp; 297 break; 298 case 12: /* o */ 299 dc->cond = TCG_COND_LT; 300 dc->value = cpu_psw_o; 301 break; 302 case 13: /* no */ 303 dc->cond = TCG_COND_GE; 304 dc->value = cpu_psw_o; 305 break; 306 case 14: /* always true */ 307 dc->cond = TCG_COND_ALWAYS; 308 dc->value = dc->temp; 309 break; 310 case 15: /* always false */ 311 dc->cond = TCG_COND_NEVER; 312 dc->value = dc->temp; 313 break; 314 } 315 } 316 317 static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc) 318 { 319 switch (cr) { 320 case 0: /* PSW */ 321 gen_helper_pack_psw(ret, tcg_env); 322 break; 323 case 1: /* PC */ 324 tcg_gen_movi_i32(ret, pc); 325 break; 326 case 2: /* USP */ 327 if (FIELD_EX32(ctx->tb_flags, PSW, U)) { 328 tcg_gen_mov_i32(ret, cpu_sp); 329 } else { 330 tcg_gen_mov_i32(ret, cpu_usp); 331 } 332 break; 333 case 3: /* FPSW */ 334 tcg_gen_mov_i32(ret, cpu_fpsw); 335 break; 336 case 8: /* BPSW */ 337 tcg_gen_mov_i32(ret, cpu_bpsw); 338 break; 339 case 9: /* BPC */ 340 tcg_gen_mov_i32(ret, cpu_bpc); 341 break; 342 case 10: /* ISP */ 343 if (FIELD_EX32(ctx->tb_flags, PSW, U)) { 344 tcg_gen_mov_i32(ret, cpu_isp); 345 } else { 346 tcg_gen_mov_i32(ret, cpu_sp); 347 } 348 break; 349 case 11: /* FINTV */ 350 tcg_gen_mov_i32(ret, cpu_fintv); 351 break; 352 case 12: /* INTB */ 353 tcg_gen_mov_i32(ret, cpu_intb); 354 break; 355 default: 356 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr); 357 /* Unimplement registers return 0 */ 358 tcg_gen_movi_i32(ret, 0); 359 break; 360 } 361 } 362 363 static void move_to_cr(DisasContext *ctx, TCGv val, int cr) 364 { 365 if (cr >= 8 && !is_privileged(ctx, 0)) { 366 /* Some control registers can only be written in privileged mode. */ 367 qemu_log_mask(LOG_GUEST_ERROR, 368 "disallow control register write %s", rx_crname(cr)); 369 return; 370 } 371 switch (cr) { 372 case 0: /* PSW */ 373 gen_helper_set_psw(tcg_env, val); 374 if (is_privileged(ctx, 0)) { 375 /* PSW.{I,U} may be updated here. exit TB. */ 376 ctx->base.is_jmp = DISAS_UPDATE; 377 } 378 break; 379 /* case 1: to PC not supported */ 380 case 2: /* USP */ 381 if (FIELD_EX32(ctx->tb_flags, PSW, U)) { 382 tcg_gen_mov_i32(cpu_sp, val); 383 } else { 384 tcg_gen_mov_i32(cpu_usp, val); 385 } 386 break; 387 case 3: /* FPSW */ 388 gen_helper_set_fpsw(tcg_env, val); 389 break; 390 case 8: /* BPSW */ 391 tcg_gen_mov_i32(cpu_bpsw, val); 392 break; 393 case 9: /* BPC */ 394 tcg_gen_mov_i32(cpu_bpc, val); 395 break; 396 case 10: /* ISP */ 397 if (FIELD_EX32(ctx->tb_flags, PSW, U)) { 398 tcg_gen_mov_i32(cpu_isp, val); 399 } else { 400 tcg_gen_mov_i32(cpu_sp, val); 401 } 402 break; 403 case 11: /* FINTV */ 404 tcg_gen_mov_i32(cpu_fintv, val); 405 break; 406 case 12: /* INTB */ 407 tcg_gen_mov_i32(cpu_intb, val); 408 break; 409 default: 410 qemu_log_mask(LOG_GUEST_ERROR, 411 "Unimplement control register %d", cr); 412 break; 413 } 414 } 415 416 static void push(TCGv val) 417 { 418 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 419 rx_gen_st(MO_32, val, cpu_sp); 420 } 421 422 static void pop(TCGv ret) 423 { 424 rx_gen_ld(MO_32, ret, cpu_sp); 425 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4); 426 } 427 428 /* mov.<bwl> rs,dsp5[rd] */ 429 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a) 430 { 431 TCGv mem; 432 mem = tcg_temp_new(); 433 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); 434 rx_gen_st(a->sz, cpu_regs[a->rs], mem); 435 return true; 436 } 437 438 /* mov.<bwl> dsp5[rs],rd */ 439 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a) 440 { 441 TCGv mem; 442 mem = tcg_temp_new(); 443 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); 444 rx_gen_ld(a->sz, cpu_regs[a->rd], mem); 445 return true; 446 } 447 448 /* mov.l #uimm4,rd */ 449 /* mov.l #uimm8,rd */ 450 /* mov.l #imm,rd */ 451 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a) 452 { 453 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm); 454 return true; 455 } 456 457 /* mov.<bwl> #uimm8,dsp[rd] */ 458 /* mov.<bwl> #imm, dsp[rd] */ 459 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a) 460 { 461 TCGv imm, mem; 462 imm = tcg_constant_i32(a->imm); 463 mem = tcg_temp_new(); 464 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); 465 rx_gen_st(a->sz, imm, mem); 466 return true; 467 } 468 469 /* mov.<bwl> [ri,rb],rd */ 470 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a) 471 { 472 TCGv mem; 473 mem = tcg_temp_new(); 474 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 475 rx_gen_ld(a->sz, cpu_regs[a->rd], mem); 476 return true; 477 } 478 479 /* mov.<bwl> rd,[ri,rb] */ 480 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) 481 { 482 TCGv mem; 483 mem = tcg_temp_new(); 484 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 485 rx_gen_st(a->sz, cpu_regs[a->rs], mem); 486 return true; 487 } 488 489 /* mov.<bwl> dsp[rs],dsp[rd] */ 490 /* mov.<bwl> rs,dsp[rd] */ 491 /* mov.<bwl> dsp[rs],rd */ 492 /* mov.<bwl> rs,rd */ 493 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) 494 { 495 TCGv tmp, mem, addr; 496 497 if (a->lds == 3 && a->ldd == 3) { 498 /* mov.<bwl> rs,rd */ 499 tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz | MO_SIGN); 500 return true; 501 } 502 503 mem = tcg_temp_new(); 504 if (a->lds == 3) { 505 /* mov.<bwl> rs,dsp[rd] */ 506 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs); 507 rx_gen_st(a->sz, cpu_regs[a->rd], addr); 508 } else if (a->ldd == 3) { 509 /* mov.<bwl> dsp[rs],rd */ 510 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs); 511 rx_gen_ld(a->sz, cpu_regs[a->rd], addr); 512 } else { 513 /* mov.<bwl> dsp[rs],dsp[rd] */ 514 tmp = tcg_temp_new(); 515 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs); 516 rx_gen_ld(a->sz, tmp, addr); 517 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd); 518 rx_gen_st(a->sz, tmp, addr); 519 } 520 return true; 521 } 522 523 /* mov.<bwl> rs,[rd+] */ 524 /* mov.<bwl> rs,[-rd] */ 525 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a) 526 { 527 TCGv val; 528 val = tcg_temp_new(); 529 tcg_gen_mov_i32(val, cpu_regs[a->rs]); 530 if (a->ad == 1) { 531 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 532 } 533 rx_gen_st(a->sz, val, cpu_regs[a->rd]); 534 if (a->ad == 0) { 535 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 536 } 537 return true; 538 } 539 540 /* mov.<bwl> [rd+],rs */ 541 /* mov.<bwl> [-rd],rs */ 542 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a) 543 { 544 TCGv val; 545 val = tcg_temp_new(); 546 if (a->ad == 1) { 547 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 548 } 549 rx_gen_ld(a->sz, val, cpu_regs[a->rd]); 550 if (a->ad == 0) { 551 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 552 } 553 tcg_gen_mov_i32(cpu_regs[a->rs], val); 554 return true; 555 } 556 557 /* movu.<bw> dsp5[rs],rd */ 558 /* movu.<bw> dsp[rs],rd */ 559 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a) 560 { 561 TCGv mem; 562 mem = tcg_temp_new(); 563 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); 564 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); 565 return true; 566 } 567 568 /* movu.<bw> rs,rd */ 569 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a) 570 { 571 tcg_gen_ext_i32(cpu_regs[a->rd], cpu_regs[a->rs], a->sz); 572 return true; 573 } 574 575 /* movu.<bw> [ri,rb],rd */ 576 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a) 577 { 578 TCGv mem; 579 mem = tcg_temp_new(); 580 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 581 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); 582 return true; 583 } 584 585 /* movu.<bw> [rd+],rs */ 586 /* mov.<bw> [-rd],rs */ 587 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a) 588 { 589 TCGv val; 590 val = tcg_temp_new(); 591 if (a->ad == 1) { 592 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 593 } 594 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]); 595 if (a->ad == 0) { 596 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 597 } 598 tcg_gen_mov_i32(cpu_regs[a->rs], val); 599 return true; 600 } 601 602 603 /* pop rd */ 604 static bool trans_POP(DisasContext *ctx, arg_POP *a) 605 { 606 /* mov.l [r0+], rd */ 607 arg_MOV_rp mov_a; 608 mov_a.rd = 0; 609 mov_a.rs = a->rd; 610 mov_a.ad = 0; 611 mov_a.sz = MO_32; 612 trans_MOV_pr(ctx, &mov_a); 613 return true; 614 } 615 616 /* popc cr */ 617 static bool trans_POPC(DisasContext *ctx, arg_POPC *a) 618 { 619 TCGv val; 620 val = tcg_temp_new(); 621 pop(val); 622 move_to_cr(ctx, val, a->cr); 623 return true; 624 } 625 626 /* popm rd-rd2 */ 627 static bool trans_POPM(DisasContext *ctx, arg_POPM *a) 628 { 629 int r; 630 if (a->rd == 0 || a->rd >= a->rd2) { 631 qemu_log_mask(LOG_GUEST_ERROR, 632 "Invalid register ranges r%d-r%d", a->rd, a->rd2); 633 } 634 r = a->rd; 635 while (r <= a->rd2 && r < 16) { 636 pop(cpu_regs[r++]); 637 } 638 return true; 639 } 640 641 642 /* push.<bwl> rs */ 643 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a) 644 { 645 TCGv val; 646 val = tcg_temp_new(); 647 tcg_gen_mov_i32(val, cpu_regs[a->rs]); 648 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 649 rx_gen_st(a->sz, val, cpu_sp); 650 return true; 651 } 652 653 /* push.<bwl> dsp[rs] */ 654 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a) 655 { 656 TCGv mem, val, addr; 657 mem = tcg_temp_new(); 658 val = tcg_temp_new(); 659 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs); 660 rx_gen_ld(a->sz, val, addr); 661 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 662 rx_gen_st(a->sz, val, cpu_sp); 663 return true; 664 } 665 666 /* pushc rx */ 667 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a) 668 { 669 TCGv val; 670 val = tcg_temp_new(); 671 move_from_cr(ctx, val, a->cr, ctx->pc); 672 push(val); 673 return true; 674 } 675 676 /* pushm rs-rs2 */ 677 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a) 678 { 679 int r; 680 681 if (a->rs == 0 || a->rs >= a->rs2) { 682 qemu_log_mask(LOG_GUEST_ERROR, 683 "Invalid register ranges r%d-r%d", a->rs, a->rs2); 684 } 685 r = a->rs2; 686 while (r >= a->rs && r >= 0) { 687 push(cpu_regs[r--]); 688 } 689 return true; 690 } 691 692 /* xchg rs,rd */ 693 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a) 694 { 695 TCGv tmp; 696 tmp = tcg_temp_new(); 697 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]); 698 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]); 699 tcg_gen_mov_i32(cpu_regs[a->rd], tmp); 700 return true; 701 } 702 703 /* xchg dsp[rs].<mi>,rd */ 704 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a) 705 { 706 TCGv mem, addr; 707 mem = tcg_temp_new(); 708 switch (a->mi) { 709 case 0: /* dsp[rs].b */ 710 case 1: /* dsp[rs].w */ 711 case 2: /* dsp[rs].l */ 712 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs); 713 break; 714 case 3: /* dsp[rs].uw */ 715 case 4: /* dsp[rs].ub */ 716 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs); 717 break; 718 default: 719 g_assert_not_reached(); 720 } 721 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd], 722 0, mi_to_mop(a->mi)); 723 return true; 724 } 725 726 static inline void stcond(TCGCond cond, int rd, int imm) 727 { 728 TCGv z; 729 TCGv _imm; 730 z = tcg_constant_i32(0); 731 _imm = tcg_constant_i32(imm); 732 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z, 733 _imm, cpu_regs[rd]); 734 } 735 736 /* stz #imm,rd */ 737 static bool trans_STZ(DisasContext *ctx, arg_STZ *a) 738 { 739 stcond(TCG_COND_EQ, a->rd, a->imm); 740 return true; 741 } 742 743 /* stnz #imm,rd */ 744 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a) 745 { 746 stcond(TCG_COND_NE, a->rd, a->imm); 747 return true; 748 } 749 750 /* sccnd.<bwl> rd */ 751 /* sccnd.<bwl> dsp:[rd] */ 752 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a) 753 { 754 DisasCompare dc; 755 TCGv val, mem, addr; 756 dc.temp = tcg_temp_new(); 757 psw_cond(&dc, a->cd); 758 if (a->ld < 3) { 759 val = tcg_temp_new(); 760 mem = tcg_temp_new(); 761 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0); 762 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd); 763 rx_gen_st(a->sz, val, addr); 764 } else { 765 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0); 766 } 767 return true; 768 } 769 770 /* rtsd #imm */ 771 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a) 772 { 773 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2); 774 pop(cpu_pc); 775 ctx->base.is_jmp = DISAS_JUMP; 776 return true; 777 } 778 779 /* rtsd #imm, rd-rd2 */ 780 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a) 781 { 782 int dst; 783 int adj; 784 785 if (a->rd2 >= a->rd) { 786 adj = a->imm - (a->rd2 - a->rd + 1); 787 } else { 788 adj = a->imm - (15 - a->rd + 1); 789 } 790 791 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2); 792 dst = a->rd; 793 while (dst <= a->rd2 && dst < 16) { 794 pop(cpu_regs[dst++]); 795 } 796 pop(cpu_pc); 797 ctx->base.is_jmp = DISAS_JUMP; 798 return true; 799 } 800 801 typedef void (*op2fn)(TCGv ret, TCGv arg1); 802 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2); 803 804 static inline void rx_gen_op_rr(op2fn opr, int dst, int src) 805 { 806 opr(cpu_regs[dst], cpu_regs[src]); 807 } 808 809 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2) 810 { 811 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]); 812 } 813 814 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2) 815 { 816 TCGv imm = tcg_constant_i32(src2); 817 opr(cpu_regs[dst], cpu_regs[src], imm); 818 } 819 820 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx, 821 int dst, int src, int ld, int mi) 822 { 823 TCGv val, mem; 824 mem = tcg_temp_new(); 825 val = rx_load_source(ctx, mem, ld, mi, src); 826 opr(cpu_regs[dst], cpu_regs[dst], val); 827 } 828 829 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2) 830 { 831 tcg_gen_and_i32(cpu_psw_s, arg1, arg2); 832 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 833 tcg_gen_mov_i32(ret, cpu_psw_s); 834 } 835 836 /* and #uimm:4, rd */ 837 /* and #imm, rd */ 838 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a) 839 { 840 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm); 841 return true; 842 } 843 844 /* and dsp[rs], rd */ 845 /* and rs,rd */ 846 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a) 847 { 848 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi); 849 return true; 850 } 851 852 /* and rs,rs2,rd */ 853 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a) 854 { 855 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2); 856 return true; 857 } 858 859 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2) 860 { 861 tcg_gen_or_i32(cpu_psw_s, arg1, arg2); 862 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 863 tcg_gen_mov_i32(ret, cpu_psw_s); 864 } 865 866 /* or #uimm:4, rd */ 867 /* or #imm, rd */ 868 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a) 869 { 870 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm); 871 return true; 872 } 873 874 /* or dsp[rs], rd */ 875 /* or rs,rd */ 876 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a) 877 { 878 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi); 879 return true; 880 } 881 882 /* or rs,rs2,rd */ 883 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a) 884 { 885 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2); 886 return true; 887 } 888 889 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2) 890 { 891 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2); 892 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 893 tcg_gen_mov_i32(ret, cpu_psw_s); 894 } 895 896 /* xor #imm, rd */ 897 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a) 898 { 899 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm); 900 return true; 901 } 902 903 /* xor dsp[rs], rd */ 904 /* xor rs,rd */ 905 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a) 906 { 907 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi); 908 return true; 909 } 910 911 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2) 912 { 913 tcg_gen_and_i32(cpu_psw_s, arg1, arg2); 914 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 915 } 916 917 /* tst #imm, rd */ 918 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a) 919 { 920 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm); 921 return true; 922 } 923 924 /* tst dsp[rs], rd */ 925 /* tst rs, rd */ 926 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a) 927 { 928 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi); 929 return true; 930 } 931 932 static void rx_not(TCGv ret, TCGv arg1) 933 { 934 tcg_gen_not_i32(ret, arg1); 935 tcg_gen_mov_i32(cpu_psw_z, ret); 936 tcg_gen_mov_i32(cpu_psw_s, ret); 937 } 938 939 /* not rd */ 940 /* not rs, rd */ 941 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a) 942 { 943 rx_gen_op_rr(rx_not, a->rd, a->rs); 944 return true; 945 } 946 947 static void rx_neg(TCGv ret, TCGv arg1) 948 { 949 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000); 950 tcg_gen_neg_i32(ret, arg1); 951 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0); 952 tcg_gen_mov_i32(cpu_psw_z, ret); 953 tcg_gen_mov_i32(cpu_psw_s, ret); 954 } 955 956 957 /* neg rd */ 958 /* neg rs, rd */ 959 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a) 960 { 961 rx_gen_op_rr(rx_neg, a->rd, a->rs); 962 return true; 963 } 964 965 /* ret = arg1 + arg2 + psw_c */ 966 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2) 967 { 968 TCGv z = tcg_constant_i32(0); 969 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z); 970 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z); 971 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 972 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); 973 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); 974 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 975 tcg_gen_mov_i32(ret, cpu_psw_s); 976 } 977 978 /* adc #imm, rd */ 979 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a) 980 { 981 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm); 982 return true; 983 } 984 985 /* adc rs, rd */ 986 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a) 987 { 988 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs); 989 return true; 990 } 991 992 /* adc dsp[rs], rd */ 993 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a) 994 { 995 /* mi only 2 */ 996 if (a->mi != 2) { 997 return false; 998 } 999 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi); 1000 return true; 1001 } 1002 1003 /* ret = arg1 + arg2 */ 1004 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2) 1005 { 1006 TCGv z = tcg_constant_i32(0); 1007 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z); 1008 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 1009 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); 1010 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); 1011 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 1012 tcg_gen_mov_i32(ret, cpu_psw_s); 1013 } 1014 1015 /* add #uimm4, rd */ 1016 /* add #imm, rs, rd */ 1017 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a) 1018 { 1019 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm); 1020 return true; 1021 } 1022 1023 /* add rs, rd */ 1024 /* add dsp[rs], rd */ 1025 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a) 1026 { 1027 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi); 1028 return true; 1029 } 1030 1031 /* add rs, rs2, rd */ 1032 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a) 1033 { 1034 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2); 1035 return true; 1036 } 1037 1038 /* ret = arg1 - arg2 */ 1039 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2) 1040 { 1041 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2); 1042 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2); 1043 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 1044 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); 1045 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); 1046 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 1047 /* CMP not required return */ 1048 if (ret) { 1049 tcg_gen_mov_i32(ret, cpu_psw_s); 1050 } 1051 } 1052 1053 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2) 1054 { 1055 rx_sub(NULL, arg1, arg2); 1056 } 1057 1058 /* ret = arg1 - arg2 - !psw_c */ 1059 /* -> ret = arg1 + ~arg2 + psw_c */ 1060 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2) 1061 { 1062 TCGv temp; 1063 temp = tcg_temp_new(); 1064 tcg_gen_not_i32(temp, arg2); 1065 rx_adc(ret, arg1, temp); 1066 } 1067 1068 /* cmp #imm4, rs2 */ 1069 /* cmp #imm8, rs2 */ 1070 /* cmp #imm, rs2 */ 1071 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a) 1072 { 1073 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm); 1074 return true; 1075 } 1076 1077 /* cmp rs, rs2 */ 1078 /* cmp dsp[rs], rs2 */ 1079 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a) 1080 { 1081 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi); 1082 return true; 1083 } 1084 1085 /* sub #imm4, rd */ 1086 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a) 1087 { 1088 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm); 1089 return true; 1090 } 1091 1092 /* sub rs, rd */ 1093 /* sub dsp[rs], rd */ 1094 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a) 1095 { 1096 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi); 1097 return true; 1098 } 1099 1100 /* sub rs2, rs, rd */ 1101 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a) 1102 { 1103 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs); 1104 return true; 1105 } 1106 1107 /* sbb rs, rd */ 1108 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a) 1109 { 1110 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs); 1111 return true; 1112 } 1113 1114 /* sbb dsp[rs], rd */ 1115 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a) 1116 { 1117 /* mi only 2 */ 1118 if (a->mi != 2) { 1119 return false; 1120 } 1121 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi); 1122 return true; 1123 } 1124 1125 /* abs rd */ 1126 /* abs rs, rd */ 1127 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a) 1128 { 1129 rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs); 1130 return true; 1131 } 1132 1133 /* max #imm, rd */ 1134 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a) 1135 { 1136 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm); 1137 return true; 1138 } 1139 1140 /* max rs, rd */ 1141 /* max dsp[rs], rd */ 1142 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a) 1143 { 1144 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1145 return true; 1146 } 1147 1148 /* min #imm, rd */ 1149 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a) 1150 { 1151 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm); 1152 return true; 1153 } 1154 1155 /* min rs, rd */ 1156 /* min dsp[rs], rd */ 1157 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a) 1158 { 1159 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1160 return true; 1161 } 1162 1163 /* mul #uimm4, rd */ 1164 /* mul #imm, rd */ 1165 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a) 1166 { 1167 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm); 1168 return true; 1169 } 1170 1171 /* mul rs, rd */ 1172 /* mul dsp[rs], rd */ 1173 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a) 1174 { 1175 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1176 return true; 1177 } 1178 1179 /* mul rs, rs2, rd */ 1180 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a) 1181 { 1182 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2); 1183 return true; 1184 } 1185 1186 /* emul #imm, rd */ 1187 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) 1188 { 1189 TCGv imm = tcg_constant_i32(a->imm); 1190 if (a->rd > 14) { 1191 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1192 } 1193 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1194 cpu_regs[a->rd], imm); 1195 return true; 1196 } 1197 1198 /* emul rs, rd */ 1199 /* emul dsp[rs], rd */ 1200 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) 1201 { 1202 TCGv val, mem; 1203 if (a->rd > 14) { 1204 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1205 } 1206 mem = tcg_temp_new(); 1207 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1208 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1209 cpu_regs[a->rd], val); 1210 return true; 1211 } 1212 1213 /* emulu #imm, rd */ 1214 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) 1215 { 1216 TCGv imm = tcg_constant_i32(a->imm); 1217 if (a->rd > 14) { 1218 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1219 } 1220 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1221 cpu_regs[a->rd], imm); 1222 return true; 1223 } 1224 1225 /* emulu rs, rd */ 1226 /* emulu dsp[rs], rd */ 1227 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a) 1228 { 1229 TCGv val, mem; 1230 if (a->rd > 14) { 1231 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1232 } 1233 mem = tcg_temp_new(); 1234 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1235 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1236 cpu_regs[a->rd], val); 1237 return true; 1238 } 1239 1240 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2) 1241 { 1242 gen_helper_div(ret, tcg_env, arg1, arg2); 1243 } 1244 1245 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2) 1246 { 1247 gen_helper_divu(ret, tcg_env, arg1, arg2); 1248 } 1249 1250 /* div #imm, rd */ 1251 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a) 1252 { 1253 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm); 1254 return true; 1255 } 1256 1257 /* div rs, rd */ 1258 /* div dsp[rs], rd */ 1259 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a) 1260 { 1261 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi); 1262 return true; 1263 } 1264 1265 /* divu #imm, rd */ 1266 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a) 1267 { 1268 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm); 1269 return true; 1270 } 1271 1272 /* divu rs, rd */ 1273 /* divu dsp[rs], rd */ 1274 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a) 1275 { 1276 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi); 1277 return true; 1278 } 1279 1280 1281 /* shll #imm:5, rd */ 1282 /* shll #imm:5, rs2, rd */ 1283 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a) 1284 { 1285 TCGv tmp; 1286 tmp = tcg_temp_new(); 1287 if (a->imm) { 1288 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm); 1289 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm); 1290 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0); 1291 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff); 1292 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp); 1293 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0); 1294 } else { 1295 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]); 1296 tcg_gen_movi_i32(cpu_psw_c, 0); 1297 tcg_gen_movi_i32(cpu_psw_o, 0); 1298 } 1299 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1300 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1301 return true; 1302 } 1303 1304 /* shll rs, rd */ 1305 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) 1306 { 1307 TCGLabel *noshift, *done; 1308 TCGv count, tmp; 1309 1310 noshift = gen_new_label(); 1311 done = gen_new_label(); 1312 /* if (cpu_regs[a->rs]) { */ 1313 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift); 1314 count = tcg_temp_new(); 1315 tmp = tcg_temp_new(); 1316 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31); 1317 tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp); 1318 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count); 1319 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); 1320 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0); 1321 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff); 1322 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp); 1323 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0); 1324 tcg_gen_br(done); 1325 /* } else { */ 1326 gen_set_label(noshift); 1327 tcg_gen_movi_i32(cpu_psw_c, 0); 1328 tcg_gen_movi_i32(cpu_psw_o, 0); 1329 /* } */ 1330 gen_set_label(done); 1331 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1332 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1333 return true; 1334 } 1335 1336 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm, 1337 unsigned int alith) 1338 { 1339 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = { 1340 tcg_gen_shri_i32, tcg_gen_sari_i32, 1341 }; 1342 tcg_debug_assert(alith < 2); 1343 if (imm) { 1344 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1); 1345 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1346 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1); 1347 } else { 1348 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]); 1349 tcg_gen_movi_i32(cpu_psw_c, 0); 1350 } 1351 tcg_gen_movi_i32(cpu_psw_o, 0); 1352 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1353 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1354 } 1355 1356 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith) 1357 { 1358 TCGLabel *noshift, *done; 1359 TCGv count; 1360 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = { 1361 tcg_gen_shri_i32, tcg_gen_sari_i32, 1362 }; 1363 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = { 1364 tcg_gen_shr_i32, tcg_gen_sar_i32, 1365 }; 1366 tcg_debug_assert(alith < 2); 1367 noshift = gen_new_label(); 1368 done = gen_new_label(); 1369 count = tcg_temp_new(); 1370 /* if (cpu_regs[rs]) { */ 1371 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift); 1372 tcg_gen_andi_i32(count, cpu_regs[rs], 31); 1373 tcg_gen_subi_i32(count, count, 1); 1374 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count); 1375 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1376 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1); 1377 tcg_gen_br(done); 1378 /* } else { */ 1379 gen_set_label(noshift); 1380 tcg_gen_movi_i32(cpu_psw_c, 0); 1381 /* } */ 1382 gen_set_label(done); 1383 tcg_gen_movi_i32(cpu_psw_o, 0); 1384 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1385 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1386 } 1387 1388 /* shar #imm:5, rd */ 1389 /* shar #imm:5, rs2, rd */ 1390 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a) 1391 { 1392 shiftr_imm(a->rd, a->rs2, a->imm, 1); 1393 return true; 1394 } 1395 1396 /* shar rs, rd */ 1397 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a) 1398 { 1399 shiftr_reg(a->rd, a->rs, 1); 1400 return true; 1401 } 1402 1403 /* shlr #imm:5, rd */ 1404 /* shlr #imm:5, rs2, rd */ 1405 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a) 1406 { 1407 shiftr_imm(a->rd, a->rs2, a->imm, 0); 1408 return true; 1409 } 1410 1411 /* shlr rs, rd */ 1412 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a) 1413 { 1414 shiftr_reg(a->rd, a->rs, 0); 1415 return true; 1416 } 1417 1418 /* rolc rd */ 1419 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a) 1420 { 1421 TCGv tmp; 1422 tmp = tcg_temp_new(); 1423 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31); 1424 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1); 1425 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c); 1426 tcg_gen_mov_i32(cpu_psw_c, tmp); 1427 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1428 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1429 return true; 1430 } 1431 1432 /* rorc rd */ 1433 static bool trans_RORC(DisasContext *ctx, arg_RORC *a) 1434 { 1435 TCGv tmp; 1436 tmp = tcg_temp_new(); 1437 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001); 1438 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1); 1439 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31); 1440 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c); 1441 tcg_gen_mov_i32(cpu_psw_c, tmp); 1442 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1443 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1444 return true; 1445 } 1446 1447 enum {ROTR = 0, ROTL = 1}; 1448 enum {ROT_IMM = 0, ROT_REG = 1}; 1449 static inline void rx_rot(int ir, int dir, int rd, int src) 1450 { 1451 switch (dir) { 1452 case ROTL: 1453 if (ir == ROT_IMM) { 1454 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src); 1455 } else { 1456 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]); 1457 } 1458 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1459 break; 1460 case ROTR: 1461 if (ir == ROT_IMM) { 1462 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src); 1463 } else { 1464 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]); 1465 } 1466 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31); 1467 break; 1468 } 1469 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1470 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1471 } 1472 1473 /* rotl #imm, rd */ 1474 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a) 1475 { 1476 rx_rot(ROT_IMM, ROTL, a->rd, a->imm); 1477 return true; 1478 } 1479 1480 /* rotl rs, rd */ 1481 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a) 1482 { 1483 rx_rot(ROT_REG, ROTL, a->rd, a->rs); 1484 return true; 1485 } 1486 1487 /* rotr #imm, rd */ 1488 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a) 1489 { 1490 rx_rot(ROT_IMM, ROTR, a->rd, a->imm); 1491 return true; 1492 } 1493 1494 /* rotr rs, rd */ 1495 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a) 1496 { 1497 rx_rot(ROT_REG, ROTR, a->rd, a->rs); 1498 return true; 1499 } 1500 1501 /* revl rs, rd */ 1502 static bool trans_REVL(DisasContext *ctx, arg_REVL *a) 1503 { 1504 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]); 1505 return true; 1506 } 1507 1508 /* revw rs, rd */ 1509 static bool trans_REVW(DisasContext *ctx, arg_REVW *a) 1510 { 1511 TCGv tmp; 1512 tmp = tcg_temp_new(); 1513 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff); 1514 tcg_gen_shli_i32(tmp, tmp, 8); 1515 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8); 1516 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff); 1517 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); 1518 return true; 1519 } 1520 1521 /* conditional branch helper */ 1522 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst) 1523 { 1524 DisasCompare dc; 1525 TCGLabel *t, *done; 1526 1527 switch (cd) { 1528 case 0 ... 13: 1529 dc.temp = tcg_temp_new(); 1530 psw_cond(&dc, cd); 1531 t = gen_new_label(); 1532 done = gen_new_label(); 1533 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t); 1534 gen_goto_tb(ctx, 0, ctx->base.pc_next); 1535 tcg_gen_br(done); 1536 gen_set_label(t); 1537 gen_goto_tb(ctx, 1, ctx->pc + dst); 1538 gen_set_label(done); 1539 break; 1540 case 14: 1541 /* always true case */ 1542 gen_goto_tb(ctx, 0, ctx->pc + dst); 1543 break; 1544 case 15: 1545 /* always false case */ 1546 /* Nothing do */ 1547 break; 1548 } 1549 } 1550 1551 /* beq dsp:3 / bne dsp:3 */ 1552 /* beq dsp:8 / bne dsp:8 */ 1553 /* bc dsp:8 / bnc dsp:8 */ 1554 /* bgtu dsp:8 / bleu dsp:8 */ 1555 /* bpz dsp:8 / bn dsp:8 */ 1556 /* bge dsp:8 / blt dsp:8 */ 1557 /* bgt dsp:8 / ble dsp:8 */ 1558 /* bo dsp:8 / bno dsp:8 */ 1559 /* beq dsp:16 / bne dsp:16 */ 1560 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a) 1561 { 1562 rx_bcnd_main(ctx, a->cd, a->dsp); 1563 return true; 1564 } 1565 1566 /* bra dsp:3 */ 1567 /* bra dsp:8 */ 1568 /* bra dsp:16 */ 1569 /* bra dsp:24 */ 1570 static bool trans_BRA(DisasContext *ctx, arg_BRA *a) 1571 { 1572 rx_bcnd_main(ctx, 14, a->dsp); 1573 return true; 1574 } 1575 1576 /* bra rs */ 1577 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a) 1578 { 1579 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc); 1580 ctx->base.is_jmp = DISAS_JUMP; 1581 return true; 1582 } 1583 1584 static inline void rx_save_pc(DisasContext *ctx) 1585 { 1586 TCGv pc = tcg_constant_i32(ctx->base.pc_next); 1587 push(pc); 1588 } 1589 1590 /* jmp rs */ 1591 static bool trans_JMP(DisasContext *ctx, arg_JMP *a) 1592 { 1593 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]); 1594 ctx->base.is_jmp = DISAS_JUMP; 1595 return true; 1596 } 1597 1598 /* jsr rs */ 1599 static bool trans_JSR(DisasContext *ctx, arg_JSR *a) 1600 { 1601 rx_save_pc(ctx); 1602 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]); 1603 ctx->base.is_jmp = DISAS_JUMP; 1604 return true; 1605 } 1606 1607 /* bsr dsp:16 */ 1608 /* bsr dsp:24 */ 1609 static bool trans_BSR(DisasContext *ctx, arg_BSR *a) 1610 { 1611 rx_save_pc(ctx); 1612 rx_bcnd_main(ctx, 14, a->dsp); 1613 return true; 1614 } 1615 1616 /* bsr rs */ 1617 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a) 1618 { 1619 rx_save_pc(ctx); 1620 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc); 1621 ctx->base.is_jmp = DISAS_JUMP; 1622 return true; 1623 } 1624 1625 /* rts */ 1626 static bool trans_RTS(DisasContext *ctx, arg_RTS *a) 1627 { 1628 pop(cpu_pc); 1629 ctx->base.is_jmp = DISAS_JUMP; 1630 return true; 1631 } 1632 1633 /* nop */ 1634 static bool trans_NOP(DisasContext *ctx, arg_NOP *a) 1635 { 1636 return true; 1637 } 1638 1639 /* scmpu */ 1640 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a) 1641 { 1642 gen_helper_scmpu(tcg_env); 1643 return true; 1644 } 1645 1646 /* smovu */ 1647 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a) 1648 { 1649 gen_helper_smovu(tcg_env); 1650 return true; 1651 } 1652 1653 /* smovf */ 1654 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a) 1655 { 1656 gen_helper_smovf(tcg_env); 1657 return true; 1658 } 1659 1660 /* smovb */ 1661 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a) 1662 { 1663 gen_helper_smovb(tcg_env); 1664 return true; 1665 } 1666 1667 #define STRING(op) \ 1668 do { \ 1669 TCGv size = tcg_constant_i32(a->sz); \ 1670 gen_helper_##op(tcg_env, size); \ 1671 } while (0) 1672 1673 /* suntile.<bwl> */ 1674 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a) 1675 { 1676 STRING(suntil); 1677 return true; 1678 } 1679 1680 /* swhile.<bwl> */ 1681 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a) 1682 { 1683 STRING(swhile); 1684 return true; 1685 } 1686 /* sstr.<bwl> */ 1687 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a) 1688 { 1689 STRING(sstr); 1690 return true; 1691 } 1692 1693 /* rmpa.<bwl> */ 1694 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a) 1695 { 1696 STRING(rmpa); 1697 return true; 1698 } 1699 1700 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2) 1701 { 1702 TCGv_i64 tmp0, tmp1; 1703 tmp0 = tcg_temp_new_i64(); 1704 tmp1 = tcg_temp_new_i64(); 1705 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]); 1706 tcg_gen_sari_i64(tmp0, tmp0, 16); 1707 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]); 1708 tcg_gen_sari_i64(tmp1, tmp1, 16); 1709 tcg_gen_mul_i64(ret, tmp0, tmp1); 1710 tcg_gen_shli_i64(ret, ret, 16); 1711 } 1712 1713 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2) 1714 { 1715 TCGv_i64 tmp0, tmp1; 1716 tmp0 = tcg_temp_new_i64(); 1717 tmp1 = tcg_temp_new_i64(); 1718 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]); 1719 tcg_gen_ext16s_i64(tmp0, tmp0); 1720 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]); 1721 tcg_gen_ext16s_i64(tmp1, tmp1); 1722 tcg_gen_mul_i64(ret, tmp0, tmp1); 1723 tcg_gen_shli_i64(ret, ret, 16); 1724 } 1725 1726 /* mulhi rs,rs2 */ 1727 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a) 1728 { 1729 rx_mul64hi(cpu_acc, a->rs, a->rs2); 1730 return true; 1731 } 1732 1733 /* mullo rs,rs2 */ 1734 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a) 1735 { 1736 rx_mul64lo(cpu_acc, a->rs, a->rs2); 1737 return true; 1738 } 1739 1740 /* machi rs,rs2 */ 1741 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a) 1742 { 1743 TCGv_i64 tmp; 1744 tmp = tcg_temp_new_i64(); 1745 rx_mul64hi(tmp, a->rs, a->rs2); 1746 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); 1747 return true; 1748 } 1749 1750 /* maclo rs,rs2 */ 1751 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a) 1752 { 1753 TCGv_i64 tmp; 1754 tmp = tcg_temp_new_i64(); 1755 rx_mul64lo(tmp, a->rs, a->rs2); 1756 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); 1757 return true; 1758 } 1759 1760 /* mvfachi rd */ 1761 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a) 1762 { 1763 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc); 1764 return true; 1765 } 1766 1767 /* mvfacmi rd */ 1768 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a) 1769 { 1770 TCGv_i64 rd64; 1771 rd64 = tcg_temp_new_i64(); 1772 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32); 1773 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64); 1774 return true; 1775 } 1776 1777 /* mvtachi rs */ 1778 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a) 1779 { 1780 TCGv_i64 rs64; 1781 rs64 = tcg_temp_new_i64(); 1782 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); 1783 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32); 1784 return true; 1785 } 1786 1787 /* mvtaclo rs */ 1788 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a) 1789 { 1790 TCGv_i64 rs64; 1791 rs64 = tcg_temp_new_i64(); 1792 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); 1793 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32); 1794 return true; 1795 } 1796 1797 /* racw #imm */ 1798 static bool trans_RACW(DisasContext *ctx, arg_RACW *a) 1799 { 1800 TCGv imm = tcg_constant_i32(a->imm + 1); 1801 gen_helper_racw(tcg_env, imm); 1802 return true; 1803 } 1804 1805 /* sat rd */ 1806 static bool trans_SAT(DisasContext *ctx, arg_SAT *a) 1807 { 1808 TCGv tmp, z; 1809 tmp = tcg_temp_new(); 1810 z = tcg_constant_i32(0); 1811 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */ 1812 tcg_gen_sari_i32(tmp, cpu_psw_s, 31); 1813 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */ 1814 tcg_gen_xori_i32(tmp, tmp, 0x80000000); 1815 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd], 1816 cpu_psw_o, z, tmp, cpu_regs[a->rd]); 1817 return true; 1818 } 1819 1820 /* satr */ 1821 static bool trans_SATR(DisasContext *ctx, arg_SATR *a) 1822 { 1823 gen_helper_satr(tcg_env); 1824 return true; 1825 } 1826 1827 #define cat3(a, b, c) a##b##c 1828 #define FOP(name, op) \ 1829 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ 1830 cat3(arg_, name, _ir) * a) \ 1831 { \ 1832 TCGv imm = tcg_constant_i32(li(ctx, 0)); \ 1833 gen_helper_##op(cpu_regs[a->rd], tcg_env, \ 1834 cpu_regs[a->rd], imm); \ 1835 return true; \ 1836 } \ 1837 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \ 1838 cat3(arg_, name, _mr) * a) \ 1839 { \ 1840 TCGv val, mem; \ 1841 mem = tcg_temp_new(); \ 1842 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ 1843 gen_helper_##op(cpu_regs[a->rd], tcg_env, \ 1844 cpu_regs[a->rd], val); \ 1845 return true; \ 1846 } 1847 1848 #define FCONVOP(name, op) \ 1849 static bool trans_##name(DisasContext *ctx, arg_##name * a) \ 1850 { \ 1851 TCGv val, mem; \ 1852 mem = tcg_temp_new(); \ 1853 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ 1854 gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \ 1855 return true; \ 1856 } 1857 1858 FOP(FADD, fadd) 1859 FOP(FSUB, fsub) 1860 FOP(FMUL, fmul) 1861 FOP(FDIV, fdiv) 1862 1863 /* fcmp #imm, rd */ 1864 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a) 1865 { 1866 TCGv imm = tcg_constant_i32(li(ctx, 0)); 1867 gen_helper_fcmp(tcg_env, cpu_regs[a->rd], imm); 1868 return true; 1869 } 1870 1871 /* fcmp dsp[rs], rd */ 1872 /* fcmp rs, rd */ 1873 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a) 1874 { 1875 TCGv val, mem; 1876 mem = tcg_temp_new(); 1877 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); 1878 gen_helper_fcmp(tcg_env, cpu_regs[a->rd], val); 1879 return true; 1880 } 1881 1882 FCONVOP(FTOI, ftoi) 1883 FCONVOP(ROUND, round) 1884 1885 /* itof rs, rd */ 1886 /* itof dsp[rs], rd */ 1887 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a) 1888 { 1889 TCGv val, mem; 1890 mem = tcg_temp_new(); 1891 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1892 gen_helper_itof(cpu_regs[a->rd], tcg_env, val); 1893 return true; 1894 } 1895 1896 static void rx_bsetm(TCGv mem, TCGv mask) 1897 { 1898 TCGv val; 1899 val = tcg_temp_new(); 1900 rx_gen_ld(MO_8, val, mem); 1901 tcg_gen_or_i32(val, val, mask); 1902 rx_gen_st(MO_8, val, mem); 1903 } 1904 1905 static void rx_bclrm(TCGv mem, TCGv mask) 1906 { 1907 TCGv val; 1908 val = tcg_temp_new(); 1909 rx_gen_ld(MO_8, val, mem); 1910 tcg_gen_andc_i32(val, val, mask); 1911 rx_gen_st(MO_8, val, mem); 1912 } 1913 1914 static void rx_btstm(TCGv mem, TCGv mask) 1915 { 1916 TCGv val; 1917 val = tcg_temp_new(); 1918 rx_gen_ld(MO_8, val, mem); 1919 tcg_gen_and_i32(val, val, mask); 1920 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0); 1921 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); 1922 } 1923 1924 static void rx_bnotm(TCGv mem, TCGv mask) 1925 { 1926 TCGv val; 1927 val = tcg_temp_new(); 1928 rx_gen_ld(MO_8, val, mem); 1929 tcg_gen_xor_i32(val, val, mask); 1930 rx_gen_st(MO_8, val, mem); 1931 } 1932 1933 static void rx_bsetr(TCGv reg, TCGv mask) 1934 { 1935 tcg_gen_or_i32(reg, reg, mask); 1936 } 1937 1938 static void rx_bclrr(TCGv reg, TCGv mask) 1939 { 1940 tcg_gen_andc_i32(reg, reg, mask); 1941 } 1942 1943 static inline void rx_btstr(TCGv reg, TCGv mask) 1944 { 1945 TCGv t0; 1946 t0 = tcg_temp_new(); 1947 tcg_gen_and_i32(t0, reg, mask); 1948 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0); 1949 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); 1950 } 1951 1952 static inline void rx_bnotr(TCGv reg, TCGv mask) 1953 { 1954 tcg_gen_xor_i32(reg, reg, mask); 1955 } 1956 1957 #define BITOP(name, op) \ 1958 static bool cat3(trans_, name, _im)(DisasContext *ctx, \ 1959 cat3(arg_, name, _im) * a) \ 1960 { \ 1961 TCGv mask, mem, addr; \ 1962 mem = tcg_temp_new(); \ 1963 mask = tcg_constant_i32(1 << a->imm); \ 1964 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ 1965 cat3(rx_, op, m)(addr, mask); \ 1966 return true; \ 1967 } \ 1968 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ 1969 cat3(arg_, name, _ir) * a) \ 1970 { \ 1971 TCGv mask; \ 1972 mask = tcg_constant_i32(1 << a->imm); \ 1973 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ 1974 return true; \ 1975 } \ 1976 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \ 1977 cat3(arg_, name, _rr) * a) \ 1978 { \ 1979 TCGv mask, b; \ 1980 mask = tcg_temp_new(); \ 1981 b = tcg_temp_new(); \ 1982 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \ 1983 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ 1984 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ 1985 return true; \ 1986 } \ 1987 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \ 1988 cat3(arg_, name, _rm) * a) \ 1989 { \ 1990 TCGv mask, mem, addr, b; \ 1991 mask = tcg_temp_new(); \ 1992 b = tcg_temp_new(); \ 1993 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \ 1994 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ 1995 mem = tcg_temp_new(); \ 1996 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ 1997 cat3(rx_, op, m)(addr, mask); \ 1998 return true; \ 1999 } 2000 2001 BITOP(BSET, bset) 2002 BITOP(BCLR, bclr) 2003 BITOP(BTST, btst) 2004 BITOP(BNOT, bnot) 2005 2006 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos) 2007 { 2008 TCGv bit; 2009 DisasCompare dc; 2010 dc.temp = tcg_temp_new(); 2011 bit = tcg_temp_new(); 2012 psw_cond(&dc, cond); 2013 tcg_gen_andi_i32(val, val, ~(1 << pos)); 2014 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0); 2015 tcg_gen_deposit_i32(val, val, bit, pos, 1); 2016 } 2017 2018 /* bmcnd #imm, dsp[rd] */ 2019 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a) 2020 { 2021 TCGv val, mem, addr; 2022 val = tcg_temp_new(); 2023 mem = tcg_temp_new(); 2024 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd); 2025 rx_gen_ld(MO_8, val, addr); 2026 bmcnd_op(val, a->cd, a->imm); 2027 rx_gen_st(MO_8, val, addr); 2028 return true; 2029 } 2030 2031 /* bmcond #imm, rd */ 2032 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a) 2033 { 2034 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm); 2035 return true; 2036 } 2037 2038 enum { 2039 PSW_C = 0, 2040 PSW_Z = 1, 2041 PSW_S = 2, 2042 PSW_O = 3, 2043 PSW_I = 8, 2044 PSW_U = 9, 2045 }; 2046 2047 static inline void clrsetpsw(DisasContext *ctx, int cb, int val) 2048 { 2049 if (cb < 8) { 2050 switch (cb) { 2051 case PSW_C: 2052 tcg_gen_movi_i32(cpu_psw_c, val); 2053 break; 2054 case PSW_Z: 2055 tcg_gen_movi_i32(cpu_psw_z, val == 0); 2056 break; 2057 case PSW_S: 2058 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0); 2059 break; 2060 case PSW_O: 2061 tcg_gen_movi_i32(cpu_psw_o, val << 31); 2062 break; 2063 default: 2064 qemu_log_mask(LOG_GUEST_ERROR, "Invalid destination %d", cb); 2065 break; 2066 } 2067 } else if (is_privileged(ctx, 0)) { 2068 switch (cb) { 2069 case PSW_I: 2070 tcg_gen_movi_i32(cpu_psw_i, val); 2071 ctx->base.is_jmp = DISAS_UPDATE; 2072 break; 2073 case PSW_U: 2074 if (FIELD_EX32(ctx->tb_flags, PSW, U) != val) { 2075 ctx->tb_flags = FIELD_DP32(ctx->tb_flags, PSW, U, val); 2076 tcg_gen_movi_i32(cpu_psw_u, val); 2077 tcg_gen_mov_i32(val ? cpu_isp : cpu_usp, cpu_sp); 2078 tcg_gen_mov_i32(cpu_sp, val ? cpu_usp : cpu_isp); 2079 } 2080 break; 2081 default: 2082 qemu_log_mask(LOG_GUEST_ERROR, "Invalid destination %d", cb); 2083 break; 2084 } 2085 } 2086 } 2087 2088 /* clrpsw psw */ 2089 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a) 2090 { 2091 clrsetpsw(ctx, a->cb, 0); 2092 return true; 2093 } 2094 2095 /* setpsw psw */ 2096 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a) 2097 { 2098 clrsetpsw(ctx, a->cb, 1); 2099 return true; 2100 } 2101 2102 /* mvtipl #imm */ 2103 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a) 2104 { 2105 if (is_privileged(ctx, 1)) { 2106 tcg_gen_movi_i32(cpu_psw_ipl, a->imm); 2107 ctx->base.is_jmp = DISAS_UPDATE; 2108 } 2109 return true; 2110 } 2111 2112 /* mvtc #imm, rd */ 2113 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) 2114 { 2115 TCGv imm; 2116 2117 imm = tcg_constant_i32(a->imm); 2118 move_to_cr(ctx, imm, a->cr); 2119 return true; 2120 } 2121 2122 /* mvtc rs, rd */ 2123 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a) 2124 { 2125 move_to_cr(ctx, cpu_regs[a->rs], a->cr); 2126 return true; 2127 } 2128 2129 /* mvfc rs, rd */ 2130 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a) 2131 { 2132 move_from_cr(ctx, cpu_regs[a->rd], a->cr, ctx->pc); 2133 return true; 2134 } 2135 2136 /* rtfi */ 2137 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a) 2138 { 2139 TCGv psw; 2140 if (is_privileged(ctx, 1)) { 2141 psw = tcg_temp_new(); 2142 tcg_gen_mov_i32(cpu_pc, cpu_bpc); 2143 tcg_gen_mov_i32(psw, cpu_bpsw); 2144 gen_helper_set_psw_rte(tcg_env, psw); 2145 ctx->base.is_jmp = DISAS_EXIT; 2146 } 2147 return true; 2148 } 2149 2150 /* rte */ 2151 static bool trans_RTE(DisasContext *ctx, arg_RTE *a) 2152 { 2153 TCGv psw; 2154 if (is_privileged(ctx, 1)) { 2155 psw = tcg_temp_new(); 2156 pop(cpu_pc); 2157 pop(psw); 2158 gen_helper_set_psw_rte(tcg_env, psw); 2159 ctx->base.is_jmp = DISAS_EXIT; 2160 } 2161 return true; 2162 } 2163 2164 /* brk */ 2165 static bool trans_BRK(DisasContext *ctx, arg_BRK *a) 2166 { 2167 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2168 gen_helper_rxbrk(tcg_env); 2169 ctx->base.is_jmp = DISAS_NORETURN; 2170 return true; 2171 } 2172 2173 /* int #imm */ 2174 static bool trans_INT(DisasContext *ctx, arg_INT *a) 2175 { 2176 TCGv vec; 2177 2178 tcg_debug_assert(a->imm < 0x100); 2179 vec = tcg_constant_i32(a->imm); 2180 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2181 gen_helper_rxint(tcg_env, vec); 2182 ctx->base.is_jmp = DISAS_NORETURN; 2183 return true; 2184 } 2185 2186 /* wait */ 2187 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a) 2188 { 2189 if (is_privileged(ctx, 1)) { 2190 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2191 gen_helper_wait(tcg_env); 2192 } 2193 return true; 2194 } 2195 2196 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 2197 { 2198 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2199 ctx->env = cpu_env(cs); 2200 ctx->tb_flags = ctx->base.tb->flags; 2201 } 2202 2203 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 2204 { 2205 } 2206 2207 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 2208 { 2209 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2210 2211 tcg_gen_insn_start(ctx->base.pc_next); 2212 } 2213 2214 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 2215 { 2216 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2217 uint32_t insn; 2218 2219 ctx->pc = ctx->base.pc_next; 2220 insn = decode_load(ctx); 2221 if (!decode(ctx, insn)) { 2222 gen_helper_raise_illegal_instruction(tcg_env); 2223 } 2224 } 2225 2226 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 2227 { 2228 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2229 2230 switch (ctx->base.is_jmp) { 2231 case DISAS_NEXT: 2232 case DISAS_TOO_MANY: 2233 gen_goto_tb(ctx, 0, dcbase->pc_next); 2234 break; 2235 case DISAS_JUMP: 2236 tcg_gen_lookup_and_goto_ptr(); 2237 break; 2238 case DISAS_UPDATE: 2239 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2240 /* fall through */ 2241 case DISAS_EXIT: 2242 tcg_gen_exit_tb(NULL, 0); 2243 break; 2244 case DISAS_NORETURN: 2245 break; 2246 default: 2247 g_assert_not_reached(); 2248 } 2249 } 2250 2251 static const TranslatorOps rx_tr_ops = { 2252 .init_disas_context = rx_tr_init_disas_context, 2253 .tb_start = rx_tr_tb_start, 2254 .insn_start = rx_tr_insn_start, 2255 .translate_insn = rx_tr_translate_insn, 2256 .tb_stop = rx_tr_tb_stop, 2257 }; 2258 2259 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 2260 vaddr pc, void *host_pc) 2261 { 2262 DisasContext dc; 2263 2264 translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base); 2265 } 2266 2267 #define ALLOC_REGISTER(sym, name) \ 2268 cpu_##sym = tcg_global_mem_new_i32(tcg_env, \ 2269 offsetof(CPURXState, sym), name) 2270 2271 void rx_translate_init(void) 2272 { 2273 static const char * const regnames[NUM_REGS] = { 2274 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", 2275 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15" 2276 }; 2277 int i; 2278 2279 for (i = 0; i < NUM_REGS; i++) { 2280 cpu_regs[i] = tcg_global_mem_new_i32(tcg_env, 2281 offsetof(CPURXState, regs[i]), 2282 regnames[i]); 2283 } 2284 ALLOC_REGISTER(pc, "PC"); 2285 ALLOC_REGISTER(psw_o, "PSW(O)"); 2286 ALLOC_REGISTER(psw_s, "PSW(S)"); 2287 ALLOC_REGISTER(psw_z, "PSW(Z)"); 2288 ALLOC_REGISTER(psw_c, "PSW(C)"); 2289 ALLOC_REGISTER(psw_u, "PSW(U)"); 2290 ALLOC_REGISTER(psw_i, "PSW(I)"); 2291 ALLOC_REGISTER(psw_pm, "PSW(PM)"); 2292 ALLOC_REGISTER(psw_ipl, "PSW(IPL)"); 2293 ALLOC_REGISTER(usp, "USP"); 2294 ALLOC_REGISTER(fpsw, "FPSW"); 2295 ALLOC_REGISTER(bpsw, "BPSW"); 2296 ALLOC_REGISTER(bpc, "BPC"); 2297 ALLOC_REGISTER(isp, "ISP"); 2298 ALLOC_REGISTER(fintv, "FINTV"); 2299 ALLOC_REGISTER(intb, "INTB"); 2300 cpu_acc = tcg_global_mem_new_i64(tcg_env, 2301 offsetof(CPURXState, acc), "ACC"); 2302 } 2303