1 /* 2 * RX translation 3 * 4 * Copyright (c) 2019 Yoshinori Sato 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2 or later, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "qemu/bswap.h" 21 #include "qemu/qemu-print.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "tcg/tcg-op.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/helper-proto.h" 27 #include "exec/helper-gen.h" 28 #include "exec/translator.h" 29 #include "exec/log.h" 30 31 #define HELPER_H "helper.h" 32 #include "exec/helper-info.c.inc" 33 #undef HELPER_H 34 35 36 typedef struct DisasContext { 37 DisasContextBase base; 38 CPURXState *env; 39 uint32_t pc; 40 uint32_t tb_flags; 41 } DisasContext; 42 43 typedef struct DisasCompare { 44 TCGv value; 45 TCGv temp; 46 TCGCond cond; 47 } DisasCompare; 48 49 const char *rx_crname(uint8_t cr) 50 { 51 static const char *cr_names[] = { 52 "psw", "pc", "usp", "fpsw", "", "", "", "", 53 "bpsw", "bpc", "isp", "fintv", "intb", "", "", "" 54 }; 55 if (cr >= ARRAY_SIZE(cr_names)) { 56 return "illegal"; 57 } 58 return cr_names[cr]; 59 } 60 61 /* Target-specific values for dc->base.is_jmp. */ 62 #define DISAS_JUMP DISAS_TARGET_0 63 #define DISAS_UPDATE DISAS_TARGET_1 64 #define DISAS_EXIT DISAS_TARGET_2 65 66 /* global register indexes */ 67 static TCGv cpu_regs[16]; 68 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c; 69 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl; 70 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp; 71 static TCGv cpu_fintv, cpu_intb, cpu_pc; 72 static TCGv_i64 cpu_acc; 73 74 #define cpu_sp cpu_regs[0] 75 76 #include "exec/gen-icount.h" 77 78 /* decoder helper */ 79 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn, 80 int i, int n) 81 { 82 while (++i <= n) { 83 uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++); 84 insn |= b << (32 - i * 8); 85 } 86 return insn; 87 } 88 89 static uint32_t li(DisasContext *ctx, int sz) 90 { 91 int32_t tmp, addr; 92 CPURXState *env = ctx->env; 93 addr = ctx->base.pc_next; 94 95 tcg_debug_assert(sz < 4); 96 switch (sz) { 97 case 1: 98 ctx->base.pc_next += 1; 99 return cpu_ldsb_code(env, addr); 100 case 2: 101 ctx->base.pc_next += 2; 102 return cpu_ldsw_code(env, addr); 103 case 3: 104 ctx->base.pc_next += 3; 105 tmp = cpu_ldsb_code(env, addr + 2) << 16; 106 tmp |= cpu_lduw_code(env, addr) & 0xffff; 107 return tmp; 108 case 0: 109 ctx->base.pc_next += 4; 110 return cpu_ldl_code(env, addr); 111 } 112 return 0; 113 } 114 115 static int bdsp_s(DisasContext *ctx, int d) 116 { 117 /* 118 * 0 -> 8 119 * 1 -> 9 120 * 2 -> 10 121 * 3 -> 3 122 * : 123 * 7 -> 7 124 */ 125 if (d < 3) { 126 d += 8; 127 } 128 return d; 129 } 130 131 /* Include the auto-generated decoder. */ 132 #include "decode-insns.c.inc" 133 134 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags) 135 { 136 RXCPU *cpu = RX_CPU(cs); 137 CPURXState *env = &cpu->env; 138 int i; 139 uint32_t psw; 140 141 psw = rx_cpu_pack_psw(env); 142 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n", 143 env->pc, psw); 144 for (i = 0; i < 16; i += 4) { 145 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n", 146 i, env->regs[i], i + 1, env->regs[i + 1], 147 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]); 148 } 149 } 150 151 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) 152 { 153 if (translator_use_goto_tb(&dc->base, dest)) { 154 tcg_gen_goto_tb(n); 155 tcg_gen_movi_i32(cpu_pc, dest); 156 tcg_gen_exit_tb(dc->base.tb, n); 157 } else { 158 tcg_gen_movi_i32(cpu_pc, dest); 159 tcg_gen_lookup_and_goto_ptr(); 160 } 161 dc->base.is_jmp = DISAS_NORETURN; 162 } 163 164 /* generic load wrapper */ 165 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem) 166 { 167 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE); 168 } 169 170 /* unsigned load wrapper */ 171 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem) 172 { 173 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE); 174 } 175 176 /* generic store wrapper */ 177 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem) 178 { 179 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE); 180 } 181 182 /* [ri, rb] */ 183 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem, 184 int size, int ri, int rb) 185 { 186 tcg_gen_shli_i32(mem, cpu_regs[ri], size); 187 tcg_gen_add_i32(mem, mem, cpu_regs[rb]); 188 } 189 190 /* dsp[reg] */ 191 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem, 192 int ld, int size, int reg) 193 { 194 uint32_t dsp; 195 196 tcg_debug_assert(ld < 3); 197 switch (ld) { 198 case 0: 199 return cpu_regs[reg]; 200 case 1: 201 dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size; 202 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp); 203 ctx->base.pc_next += 1; 204 return mem; 205 case 2: 206 dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size; 207 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp); 208 ctx->base.pc_next += 2; 209 return mem; 210 } 211 return NULL; 212 } 213 214 static inline MemOp mi_to_mop(unsigned mi) 215 { 216 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB }; 217 tcg_debug_assert(mi < 5); 218 return mop[mi]; 219 } 220 221 /* load source operand */ 222 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem, 223 int ld, int mi, int rs) 224 { 225 TCGv addr; 226 MemOp mop; 227 if (ld < 3) { 228 mop = mi_to_mop(mi); 229 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs); 230 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE); 231 return mem; 232 } else { 233 return cpu_regs[rs]; 234 } 235 } 236 237 /* Processor mode check */ 238 static int is_privileged(DisasContext *ctx, int is_exception) 239 { 240 if (FIELD_EX32(ctx->tb_flags, PSW, PM)) { 241 if (is_exception) { 242 gen_helper_raise_privilege_violation(cpu_env); 243 } 244 return 0; 245 } else { 246 return 1; 247 } 248 } 249 250 /* generate QEMU condition */ 251 static void psw_cond(DisasCompare *dc, uint32_t cond) 252 { 253 tcg_debug_assert(cond < 16); 254 switch (cond) { 255 case 0: /* z */ 256 dc->cond = TCG_COND_EQ; 257 dc->value = cpu_psw_z; 258 break; 259 case 1: /* nz */ 260 dc->cond = TCG_COND_NE; 261 dc->value = cpu_psw_z; 262 break; 263 case 2: /* c */ 264 dc->cond = TCG_COND_NE; 265 dc->value = cpu_psw_c; 266 break; 267 case 3: /* nc */ 268 dc->cond = TCG_COND_EQ; 269 dc->value = cpu_psw_c; 270 break; 271 case 4: /* gtu (C& ~Z) == 1 */ 272 case 5: /* leu (C& ~Z) == 0 */ 273 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0); 274 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c); 275 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ; 276 dc->value = dc->temp; 277 break; 278 case 6: /* pz (S == 0) */ 279 dc->cond = TCG_COND_GE; 280 dc->value = cpu_psw_s; 281 break; 282 case 7: /* n (S == 1) */ 283 dc->cond = TCG_COND_LT; 284 dc->value = cpu_psw_s; 285 break; 286 case 8: /* ge (S^O)==0 */ 287 case 9: /* lt (S^O)==1 */ 288 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s); 289 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT; 290 dc->value = dc->temp; 291 break; 292 case 10: /* gt ((S^O)|Z)==0 */ 293 case 11: /* le ((S^O)|Z)==1 */ 294 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s); 295 tcg_gen_sari_i32(dc->temp, dc->temp, 31); 296 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp); 297 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ; 298 dc->value = dc->temp; 299 break; 300 case 12: /* o */ 301 dc->cond = TCG_COND_LT; 302 dc->value = cpu_psw_o; 303 break; 304 case 13: /* no */ 305 dc->cond = TCG_COND_GE; 306 dc->value = cpu_psw_o; 307 break; 308 case 14: /* always true */ 309 dc->cond = TCG_COND_ALWAYS; 310 dc->value = dc->temp; 311 break; 312 case 15: /* always false */ 313 dc->cond = TCG_COND_NEVER; 314 dc->value = dc->temp; 315 break; 316 } 317 } 318 319 static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc) 320 { 321 switch (cr) { 322 case 0: /* PSW */ 323 gen_helper_pack_psw(ret, cpu_env); 324 break; 325 case 1: /* PC */ 326 tcg_gen_movi_i32(ret, pc); 327 break; 328 case 2: /* USP */ 329 if (FIELD_EX32(ctx->tb_flags, PSW, U)) { 330 tcg_gen_mov_i32(ret, cpu_sp); 331 } else { 332 tcg_gen_mov_i32(ret, cpu_usp); 333 } 334 break; 335 case 3: /* FPSW */ 336 tcg_gen_mov_i32(ret, cpu_fpsw); 337 break; 338 case 8: /* BPSW */ 339 tcg_gen_mov_i32(ret, cpu_bpsw); 340 break; 341 case 9: /* BPC */ 342 tcg_gen_mov_i32(ret, cpu_bpc); 343 break; 344 case 10: /* ISP */ 345 if (FIELD_EX32(ctx->tb_flags, PSW, U)) { 346 tcg_gen_mov_i32(ret, cpu_isp); 347 } else { 348 tcg_gen_mov_i32(ret, cpu_sp); 349 } 350 break; 351 case 11: /* FINTV */ 352 tcg_gen_mov_i32(ret, cpu_fintv); 353 break; 354 case 12: /* INTB */ 355 tcg_gen_mov_i32(ret, cpu_intb); 356 break; 357 default: 358 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr); 359 /* Unimplement registers return 0 */ 360 tcg_gen_movi_i32(ret, 0); 361 break; 362 } 363 } 364 365 static void move_to_cr(DisasContext *ctx, TCGv val, int cr) 366 { 367 if (cr >= 8 && !is_privileged(ctx, 0)) { 368 /* Some control registers can only be written in privileged mode. */ 369 qemu_log_mask(LOG_GUEST_ERROR, 370 "disallow control register write %s", rx_crname(cr)); 371 return; 372 } 373 switch (cr) { 374 case 0: /* PSW */ 375 gen_helper_set_psw(cpu_env, val); 376 if (is_privileged(ctx, 0)) { 377 /* PSW.{I,U} may be updated here. exit TB. */ 378 ctx->base.is_jmp = DISAS_UPDATE; 379 } 380 break; 381 /* case 1: to PC not supported */ 382 case 2: /* USP */ 383 if (FIELD_EX32(ctx->tb_flags, PSW, U)) { 384 tcg_gen_mov_i32(cpu_sp, val); 385 } else { 386 tcg_gen_mov_i32(cpu_usp, val); 387 } 388 break; 389 case 3: /* FPSW */ 390 gen_helper_set_fpsw(cpu_env, val); 391 break; 392 case 8: /* BPSW */ 393 tcg_gen_mov_i32(cpu_bpsw, val); 394 break; 395 case 9: /* BPC */ 396 tcg_gen_mov_i32(cpu_bpc, val); 397 break; 398 case 10: /* ISP */ 399 if (FIELD_EX32(ctx->tb_flags, PSW, U)) { 400 tcg_gen_mov_i32(cpu_isp, val); 401 } else { 402 tcg_gen_mov_i32(cpu_sp, val); 403 } 404 break; 405 case 11: /* FINTV */ 406 tcg_gen_mov_i32(cpu_fintv, val); 407 break; 408 case 12: /* INTB */ 409 tcg_gen_mov_i32(cpu_intb, val); 410 break; 411 default: 412 qemu_log_mask(LOG_GUEST_ERROR, 413 "Unimplement control register %d", cr); 414 break; 415 } 416 } 417 418 static void push(TCGv val) 419 { 420 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 421 rx_gen_st(MO_32, val, cpu_sp); 422 } 423 424 static void pop(TCGv ret) 425 { 426 rx_gen_ld(MO_32, ret, cpu_sp); 427 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4); 428 } 429 430 /* mov.<bwl> rs,dsp5[rd] */ 431 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a) 432 { 433 TCGv mem; 434 mem = tcg_temp_new(); 435 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); 436 rx_gen_st(a->sz, cpu_regs[a->rs], mem); 437 return true; 438 } 439 440 /* mov.<bwl> dsp5[rs],rd */ 441 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a) 442 { 443 TCGv mem; 444 mem = tcg_temp_new(); 445 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); 446 rx_gen_ld(a->sz, cpu_regs[a->rd], mem); 447 return true; 448 } 449 450 /* mov.l #uimm4,rd */ 451 /* mov.l #uimm8,rd */ 452 /* mov.l #imm,rd */ 453 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a) 454 { 455 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm); 456 return true; 457 } 458 459 /* mov.<bwl> #uimm8,dsp[rd] */ 460 /* mov.<bwl> #imm, dsp[rd] */ 461 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a) 462 { 463 TCGv imm, mem; 464 imm = tcg_constant_i32(a->imm); 465 mem = tcg_temp_new(); 466 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); 467 rx_gen_st(a->sz, imm, mem); 468 return true; 469 } 470 471 /* mov.<bwl> [ri,rb],rd */ 472 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a) 473 { 474 TCGv mem; 475 mem = tcg_temp_new(); 476 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 477 rx_gen_ld(a->sz, cpu_regs[a->rd], mem); 478 return true; 479 } 480 481 /* mov.<bwl> rd,[ri,rb] */ 482 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) 483 { 484 TCGv mem; 485 mem = tcg_temp_new(); 486 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 487 rx_gen_st(a->sz, cpu_regs[a->rs], mem); 488 return true; 489 } 490 491 /* mov.<bwl> dsp[rs],dsp[rd] */ 492 /* mov.<bwl> rs,dsp[rd] */ 493 /* mov.<bwl> dsp[rs],rd */ 494 /* mov.<bwl> rs,rd */ 495 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) 496 { 497 static void (* const mov[])(TCGv ret, TCGv arg) = { 498 tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32, 499 }; 500 TCGv tmp, mem, addr; 501 if (a->lds == 3 && a->ldd == 3) { 502 /* mov.<bwl> rs,rd */ 503 mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]); 504 return true; 505 } 506 507 mem = tcg_temp_new(); 508 if (a->lds == 3) { 509 /* mov.<bwl> rs,dsp[rd] */ 510 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs); 511 rx_gen_st(a->sz, cpu_regs[a->rd], addr); 512 } else if (a->ldd == 3) { 513 /* mov.<bwl> dsp[rs],rd */ 514 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs); 515 rx_gen_ld(a->sz, cpu_regs[a->rd], addr); 516 } else { 517 /* mov.<bwl> dsp[rs],dsp[rd] */ 518 tmp = tcg_temp_new(); 519 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs); 520 rx_gen_ld(a->sz, tmp, addr); 521 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd); 522 rx_gen_st(a->sz, tmp, addr); 523 } 524 return true; 525 } 526 527 /* mov.<bwl> rs,[rd+] */ 528 /* mov.<bwl> rs,[-rd] */ 529 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a) 530 { 531 TCGv val; 532 val = tcg_temp_new(); 533 tcg_gen_mov_i32(val, cpu_regs[a->rs]); 534 if (a->ad == 1) { 535 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 536 } 537 rx_gen_st(a->sz, val, cpu_regs[a->rd]); 538 if (a->ad == 0) { 539 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 540 } 541 return true; 542 } 543 544 /* mov.<bwl> [rd+],rs */ 545 /* mov.<bwl> [-rd],rs */ 546 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a) 547 { 548 TCGv val; 549 val = tcg_temp_new(); 550 if (a->ad == 1) { 551 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 552 } 553 rx_gen_ld(a->sz, val, cpu_regs[a->rd]); 554 if (a->ad == 0) { 555 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 556 } 557 tcg_gen_mov_i32(cpu_regs[a->rs], val); 558 return true; 559 } 560 561 /* movu.<bw> dsp5[rs],rd */ 562 /* movu.<bw> dsp[rs],rd */ 563 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a) 564 { 565 TCGv mem; 566 mem = tcg_temp_new(); 567 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); 568 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); 569 return true; 570 } 571 572 /* movu.<bw> rs,rd */ 573 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a) 574 { 575 static void (* const ext[])(TCGv ret, TCGv arg) = { 576 tcg_gen_ext8u_i32, tcg_gen_ext16u_i32, 577 }; 578 ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]); 579 return true; 580 } 581 582 /* movu.<bw> [ri,rb],rd */ 583 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a) 584 { 585 TCGv mem; 586 mem = tcg_temp_new(); 587 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); 588 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); 589 return true; 590 } 591 592 /* movu.<bw> [rd+],rs */ 593 /* mov.<bw> [-rd],rs */ 594 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a) 595 { 596 TCGv val; 597 val = tcg_temp_new(); 598 if (a->ad == 1) { 599 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 600 } 601 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]); 602 if (a->ad == 0) { 603 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); 604 } 605 tcg_gen_mov_i32(cpu_regs[a->rs], val); 606 return true; 607 } 608 609 610 /* pop rd */ 611 static bool trans_POP(DisasContext *ctx, arg_POP *a) 612 { 613 /* mov.l [r0+], rd */ 614 arg_MOV_rp mov_a; 615 mov_a.rd = 0; 616 mov_a.rs = a->rd; 617 mov_a.ad = 0; 618 mov_a.sz = MO_32; 619 trans_MOV_pr(ctx, &mov_a); 620 return true; 621 } 622 623 /* popc cr */ 624 static bool trans_POPC(DisasContext *ctx, arg_POPC *a) 625 { 626 TCGv val; 627 val = tcg_temp_new(); 628 pop(val); 629 move_to_cr(ctx, val, a->cr); 630 return true; 631 } 632 633 /* popm rd-rd2 */ 634 static bool trans_POPM(DisasContext *ctx, arg_POPM *a) 635 { 636 int r; 637 if (a->rd == 0 || a->rd >= a->rd2) { 638 qemu_log_mask(LOG_GUEST_ERROR, 639 "Invalid register ranges r%d-r%d", a->rd, a->rd2); 640 } 641 r = a->rd; 642 while (r <= a->rd2 && r < 16) { 643 pop(cpu_regs[r++]); 644 } 645 return true; 646 } 647 648 649 /* push.<bwl> rs */ 650 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a) 651 { 652 TCGv val; 653 val = tcg_temp_new(); 654 tcg_gen_mov_i32(val, cpu_regs[a->rs]); 655 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 656 rx_gen_st(a->sz, val, cpu_sp); 657 return true; 658 } 659 660 /* push.<bwl> dsp[rs] */ 661 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a) 662 { 663 TCGv mem, val, addr; 664 mem = tcg_temp_new(); 665 val = tcg_temp_new(); 666 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs); 667 rx_gen_ld(a->sz, val, addr); 668 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); 669 rx_gen_st(a->sz, val, cpu_sp); 670 return true; 671 } 672 673 /* pushc rx */ 674 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a) 675 { 676 TCGv val; 677 val = tcg_temp_new(); 678 move_from_cr(ctx, val, a->cr, ctx->pc); 679 push(val); 680 return true; 681 } 682 683 /* pushm rs-rs2 */ 684 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a) 685 { 686 int r; 687 688 if (a->rs == 0 || a->rs >= a->rs2) { 689 qemu_log_mask(LOG_GUEST_ERROR, 690 "Invalid register ranges r%d-r%d", a->rs, a->rs2); 691 } 692 r = a->rs2; 693 while (r >= a->rs && r >= 0) { 694 push(cpu_regs[r--]); 695 } 696 return true; 697 } 698 699 /* xchg rs,rd */ 700 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a) 701 { 702 TCGv tmp; 703 tmp = tcg_temp_new(); 704 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]); 705 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]); 706 tcg_gen_mov_i32(cpu_regs[a->rd], tmp); 707 return true; 708 } 709 710 /* xchg dsp[rs].<mi>,rd */ 711 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a) 712 { 713 TCGv mem, addr; 714 mem = tcg_temp_new(); 715 switch (a->mi) { 716 case 0: /* dsp[rs].b */ 717 case 1: /* dsp[rs].w */ 718 case 2: /* dsp[rs].l */ 719 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs); 720 break; 721 case 3: /* dsp[rs].uw */ 722 case 4: /* dsp[rs].ub */ 723 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs); 724 break; 725 default: 726 g_assert_not_reached(); 727 } 728 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd], 729 0, mi_to_mop(a->mi)); 730 return true; 731 } 732 733 static inline void stcond(TCGCond cond, int rd, int imm) 734 { 735 TCGv z; 736 TCGv _imm; 737 z = tcg_constant_i32(0); 738 _imm = tcg_constant_i32(imm); 739 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z, 740 _imm, cpu_regs[rd]); 741 } 742 743 /* stz #imm,rd */ 744 static bool trans_STZ(DisasContext *ctx, arg_STZ *a) 745 { 746 stcond(TCG_COND_EQ, a->rd, a->imm); 747 return true; 748 } 749 750 /* stnz #imm,rd */ 751 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a) 752 { 753 stcond(TCG_COND_NE, a->rd, a->imm); 754 return true; 755 } 756 757 /* sccnd.<bwl> rd */ 758 /* sccnd.<bwl> dsp:[rd] */ 759 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a) 760 { 761 DisasCompare dc; 762 TCGv val, mem, addr; 763 dc.temp = tcg_temp_new(); 764 psw_cond(&dc, a->cd); 765 if (a->ld < 3) { 766 val = tcg_temp_new(); 767 mem = tcg_temp_new(); 768 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0); 769 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd); 770 rx_gen_st(a->sz, val, addr); 771 } else { 772 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0); 773 } 774 return true; 775 } 776 777 /* rtsd #imm */ 778 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a) 779 { 780 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2); 781 pop(cpu_pc); 782 ctx->base.is_jmp = DISAS_JUMP; 783 return true; 784 } 785 786 /* rtsd #imm, rd-rd2 */ 787 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a) 788 { 789 int dst; 790 int adj; 791 792 if (a->rd2 >= a->rd) { 793 adj = a->imm - (a->rd2 - a->rd + 1); 794 } else { 795 adj = a->imm - (15 - a->rd + 1); 796 } 797 798 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2); 799 dst = a->rd; 800 while (dst <= a->rd2 && dst < 16) { 801 pop(cpu_regs[dst++]); 802 } 803 pop(cpu_pc); 804 ctx->base.is_jmp = DISAS_JUMP; 805 return true; 806 } 807 808 typedef void (*op2fn)(TCGv ret, TCGv arg1); 809 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2); 810 811 static inline void rx_gen_op_rr(op2fn opr, int dst, int src) 812 { 813 opr(cpu_regs[dst], cpu_regs[src]); 814 } 815 816 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2) 817 { 818 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]); 819 } 820 821 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2) 822 { 823 TCGv imm = tcg_constant_i32(src2); 824 opr(cpu_regs[dst], cpu_regs[src], imm); 825 } 826 827 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx, 828 int dst, int src, int ld, int mi) 829 { 830 TCGv val, mem; 831 mem = tcg_temp_new(); 832 val = rx_load_source(ctx, mem, ld, mi, src); 833 opr(cpu_regs[dst], cpu_regs[dst], val); 834 } 835 836 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2) 837 { 838 tcg_gen_and_i32(cpu_psw_s, arg1, arg2); 839 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 840 tcg_gen_mov_i32(ret, cpu_psw_s); 841 } 842 843 /* and #uimm:4, rd */ 844 /* and #imm, rd */ 845 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a) 846 { 847 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm); 848 return true; 849 } 850 851 /* and dsp[rs], rd */ 852 /* and rs,rd */ 853 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a) 854 { 855 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi); 856 return true; 857 } 858 859 /* and rs,rs2,rd */ 860 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a) 861 { 862 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2); 863 return true; 864 } 865 866 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2) 867 { 868 tcg_gen_or_i32(cpu_psw_s, arg1, arg2); 869 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 870 tcg_gen_mov_i32(ret, cpu_psw_s); 871 } 872 873 /* or #uimm:4, rd */ 874 /* or #imm, rd */ 875 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a) 876 { 877 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm); 878 return true; 879 } 880 881 /* or dsp[rs], rd */ 882 /* or rs,rd */ 883 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a) 884 { 885 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi); 886 return true; 887 } 888 889 /* or rs,rs2,rd */ 890 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a) 891 { 892 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2); 893 return true; 894 } 895 896 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2) 897 { 898 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2); 899 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 900 tcg_gen_mov_i32(ret, cpu_psw_s); 901 } 902 903 /* xor #imm, rd */ 904 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a) 905 { 906 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm); 907 return true; 908 } 909 910 /* xor dsp[rs], rd */ 911 /* xor rs,rd */ 912 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a) 913 { 914 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi); 915 return true; 916 } 917 918 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2) 919 { 920 tcg_gen_and_i32(cpu_psw_s, arg1, arg2); 921 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 922 } 923 924 /* tst #imm, rd */ 925 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a) 926 { 927 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm); 928 return true; 929 } 930 931 /* tst dsp[rs], rd */ 932 /* tst rs, rd */ 933 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a) 934 { 935 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi); 936 return true; 937 } 938 939 static void rx_not(TCGv ret, TCGv arg1) 940 { 941 tcg_gen_not_i32(ret, arg1); 942 tcg_gen_mov_i32(cpu_psw_z, ret); 943 tcg_gen_mov_i32(cpu_psw_s, ret); 944 } 945 946 /* not rd */ 947 /* not rs, rd */ 948 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a) 949 { 950 rx_gen_op_rr(rx_not, a->rd, a->rs); 951 return true; 952 } 953 954 static void rx_neg(TCGv ret, TCGv arg1) 955 { 956 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000); 957 tcg_gen_neg_i32(ret, arg1); 958 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0); 959 tcg_gen_mov_i32(cpu_psw_z, ret); 960 tcg_gen_mov_i32(cpu_psw_s, ret); 961 } 962 963 964 /* neg rd */ 965 /* neg rs, rd */ 966 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a) 967 { 968 rx_gen_op_rr(rx_neg, a->rd, a->rs); 969 return true; 970 } 971 972 /* ret = arg1 + arg2 + psw_c */ 973 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2) 974 { 975 TCGv z = tcg_constant_i32(0); 976 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z); 977 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z); 978 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 979 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); 980 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); 981 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 982 tcg_gen_mov_i32(ret, cpu_psw_s); 983 } 984 985 /* adc #imm, rd */ 986 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a) 987 { 988 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm); 989 return true; 990 } 991 992 /* adc rs, rd */ 993 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a) 994 { 995 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs); 996 return true; 997 } 998 999 /* adc dsp[rs], rd */ 1000 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a) 1001 { 1002 /* mi only 2 */ 1003 if (a->mi != 2) { 1004 return false; 1005 } 1006 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi); 1007 return true; 1008 } 1009 1010 /* ret = arg1 + arg2 */ 1011 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2) 1012 { 1013 TCGv z = tcg_constant_i32(0); 1014 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z); 1015 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 1016 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); 1017 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); 1018 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 1019 tcg_gen_mov_i32(ret, cpu_psw_s); 1020 } 1021 1022 /* add #uimm4, rd */ 1023 /* add #imm, rs, rd */ 1024 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a) 1025 { 1026 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm); 1027 return true; 1028 } 1029 1030 /* add rs, rd */ 1031 /* add dsp[rs], rd */ 1032 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a) 1033 { 1034 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi); 1035 return true; 1036 } 1037 1038 /* add rs, rs2, rd */ 1039 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a) 1040 { 1041 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2); 1042 return true; 1043 } 1044 1045 /* ret = arg1 - arg2 */ 1046 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2) 1047 { 1048 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2); 1049 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2); 1050 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); 1051 tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); 1052 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, cpu_psw_z); 1053 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); 1054 /* CMP not required return */ 1055 if (ret) { 1056 tcg_gen_mov_i32(ret, cpu_psw_s); 1057 } 1058 } 1059 1060 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2) 1061 { 1062 rx_sub(NULL, arg1, arg2); 1063 } 1064 1065 /* ret = arg1 - arg2 - !psw_c */ 1066 /* -> ret = arg1 + ~arg2 + psw_c */ 1067 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2) 1068 { 1069 TCGv temp; 1070 temp = tcg_temp_new(); 1071 tcg_gen_not_i32(temp, arg2); 1072 rx_adc(ret, arg1, temp); 1073 } 1074 1075 /* cmp #imm4, rs2 */ 1076 /* cmp #imm8, rs2 */ 1077 /* cmp #imm, rs2 */ 1078 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a) 1079 { 1080 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm); 1081 return true; 1082 } 1083 1084 /* cmp rs, rs2 */ 1085 /* cmp dsp[rs], rs2 */ 1086 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a) 1087 { 1088 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi); 1089 return true; 1090 } 1091 1092 /* sub #imm4, rd */ 1093 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a) 1094 { 1095 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm); 1096 return true; 1097 } 1098 1099 /* sub rs, rd */ 1100 /* sub dsp[rs], rd */ 1101 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a) 1102 { 1103 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi); 1104 return true; 1105 } 1106 1107 /* sub rs2, rs, rd */ 1108 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a) 1109 { 1110 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs); 1111 return true; 1112 } 1113 1114 /* sbb rs, rd */ 1115 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a) 1116 { 1117 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs); 1118 return true; 1119 } 1120 1121 /* sbb dsp[rs], rd */ 1122 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a) 1123 { 1124 /* mi only 2 */ 1125 if (a->mi != 2) { 1126 return false; 1127 } 1128 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi); 1129 return true; 1130 } 1131 1132 /* abs rd */ 1133 /* abs rs, rd */ 1134 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a) 1135 { 1136 rx_gen_op_rr(tcg_gen_abs_i32, a->rd, a->rs); 1137 return true; 1138 } 1139 1140 /* max #imm, rd */ 1141 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a) 1142 { 1143 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm); 1144 return true; 1145 } 1146 1147 /* max rs, rd */ 1148 /* max dsp[rs], rd */ 1149 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a) 1150 { 1151 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1152 return true; 1153 } 1154 1155 /* min #imm, rd */ 1156 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a) 1157 { 1158 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm); 1159 return true; 1160 } 1161 1162 /* min rs, rd */ 1163 /* min dsp[rs], rd */ 1164 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a) 1165 { 1166 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1167 return true; 1168 } 1169 1170 /* mul #uimm4, rd */ 1171 /* mul #imm, rd */ 1172 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a) 1173 { 1174 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm); 1175 return true; 1176 } 1177 1178 /* mul rs, rd */ 1179 /* mul dsp[rs], rd */ 1180 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a) 1181 { 1182 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi); 1183 return true; 1184 } 1185 1186 /* mul rs, rs2, rd */ 1187 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a) 1188 { 1189 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2); 1190 return true; 1191 } 1192 1193 /* emul #imm, rd */ 1194 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) 1195 { 1196 TCGv imm = tcg_constant_i32(a->imm); 1197 if (a->rd > 14) { 1198 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1199 } 1200 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1201 cpu_regs[a->rd], imm); 1202 return true; 1203 } 1204 1205 /* emul rs, rd */ 1206 /* emul dsp[rs], rd */ 1207 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) 1208 { 1209 TCGv val, mem; 1210 if (a->rd > 14) { 1211 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1212 } 1213 mem = tcg_temp_new(); 1214 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1215 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1216 cpu_regs[a->rd], val); 1217 return true; 1218 } 1219 1220 /* emulu #imm, rd */ 1221 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) 1222 { 1223 TCGv imm = tcg_constant_i32(a->imm); 1224 if (a->rd > 14) { 1225 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1226 } 1227 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1228 cpu_regs[a->rd], imm); 1229 return true; 1230 } 1231 1232 /* emulu rs, rd */ 1233 /* emulu dsp[rs], rd */ 1234 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a) 1235 { 1236 TCGv val, mem; 1237 if (a->rd > 14) { 1238 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); 1239 } 1240 mem = tcg_temp_new(); 1241 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1242 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], 1243 cpu_regs[a->rd], val); 1244 return true; 1245 } 1246 1247 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2) 1248 { 1249 gen_helper_div(ret, cpu_env, arg1, arg2); 1250 } 1251 1252 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2) 1253 { 1254 gen_helper_divu(ret, cpu_env, arg1, arg2); 1255 } 1256 1257 /* div #imm, rd */ 1258 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a) 1259 { 1260 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm); 1261 return true; 1262 } 1263 1264 /* div rs, rd */ 1265 /* div dsp[rs], rd */ 1266 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a) 1267 { 1268 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi); 1269 return true; 1270 } 1271 1272 /* divu #imm, rd */ 1273 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a) 1274 { 1275 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm); 1276 return true; 1277 } 1278 1279 /* divu rs, rd */ 1280 /* divu dsp[rs], rd */ 1281 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a) 1282 { 1283 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi); 1284 return true; 1285 } 1286 1287 1288 /* shll #imm:5, rd */ 1289 /* shll #imm:5, rs2, rd */ 1290 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a) 1291 { 1292 TCGv tmp; 1293 tmp = tcg_temp_new(); 1294 if (a->imm) { 1295 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm); 1296 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm); 1297 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0); 1298 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff); 1299 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp); 1300 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0); 1301 } else { 1302 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]); 1303 tcg_gen_movi_i32(cpu_psw_c, 0); 1304 tcg_gen_movi_i32(cpu_psw_o, 0); 1305 } 1306 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1307 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1308 return true; 1309 } 1310 1311 /* shll rs, rd */ 1312 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) 1313 { 1314 TCGLabel *noshift, *done; 1315 TCGv count, tmp; 1316 1317 noshift = gen_new_label(); 1318 done = gen_new_label(); 1319 /* if (cpu_regs[a->rs]) { */ 1320 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift); 1321 count = tcg_temp_new(); 1322 tmp = tcg_temp_new(); 1323 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31); 1324 tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp); 1325 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count); 1326 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); 1327 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0); 1328 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff); 1329 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp); 1330 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0); 1331 tcg_gen_br(done); 1332 /* } else { */ 1333 gen_set_label(noshift); 1334 tcg_gen_movi_i32(cpu_psw_c, 0); 1335 tcg_gen_movi_i32(cpu_psw_o, 0); 1336 /* } */ 1337 gen_set_label(done); 1338 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1339 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1340 return true; 1341 } 1342 1343 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm, 1344 unsigned int alith) 1345 { 1346 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = { 1347 tcg_gen_shri_i32, tcg_gen_sari_i32, 1348 }; 1349 tcg_debug_assert(alith < 2); 1350 if (imm) { 1351 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1); 1352 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1353 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1); 1354 } else { 1355 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]); 1356 tcg_gen_movi_i32(cpu_psw_c, 0); 1357 } 1358 tcg_gen_movi_i32(cpu_psw_o, 0); 1359 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1360 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1361 } 1362 1363 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith) 1364 { 1365 TCGLabel *noshift, *done; 1366 TCGv count; 1367 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = { 1368 tcg_gen_shri_i32, tcg_gen_sari_i32, 1369 }; 1370 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = { 1371 tcg_gen_shr_i32, tcg_gen_sar_i32, 1372 }; 1373 tcg_debug_assert(alith < 2); 1374 noshift = gen_new_label(); 1375 done = gen_new_label(); 1376 count = tcg_temp_new(); 1377 /* if (cpu_regs[rs]) { */ 1378 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift); 1379 tcg_gen_andi_i32(count, cpu_regs[rs], 31); 1380 tcg_gen_subi_i32(count, count, 1); 1381 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count); 1382 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1383 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1); 1384 tcg_gen_br(done); 1385 /* } else { */ 1386 gen_set_label(noshift); 1387 tcg_gen_movi_i32(cpu_psw_c, 0); 1388 /* } */ 1389 gen_set_label(done); 1390 tcg_gen_movi_i32(cpu_psw_o, 0); 1391 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1392 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1393 } 1394 1395 /* shar #imm:5, rd */ 1396 /* shar #imm:5, rs2, rd */ 1397 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a) 1398 { 1399 shiftr_imm(a->rd, a->rs2, a->imm, 1); 1400 return true; 1401 } 1402 1403 /* shar rs, rd */ 1404 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a) 1405 { 1406 shiftr_reg(a->rd, a->rs, 1); 1407 return true; 1408 } 1409 1410 /* shlr #imm:5, rd */ 1411 /* shlr #imm:5, rs2, rd */ 1412 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a) 1413 { 1414 shiftr_imm(a->rd, a->rs2, a->imm, 0); 1415 return true; 1416 } 1417 1418 /* shlr rs, rd */ 1419 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a) 1420 { 1421 shiftr_reg(a->rd, a->rs, 0); 1422 return true; 1423 } 1424 1425 /* rolc rd */ 1426 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a) 1427 { 1428 TCGv tmp; 1429 tmp = tcg_temp_new(); 1430 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31); 1431 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1); 1432 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c); 1433 tcg_gen_mov_i32(cpu_psw_c, tmp); 1434 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1435 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1436 return true; 1437 } 1438 1439 /* rorc rd */ 1440 static bool trans_RORC(DisasContext *ctx, arg_RORC *a) 1441 { 1442 TCGv tmp; 1443 tmp = tcg_temp_new(); 1444 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001); 1445 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1); 1446 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31); 1447 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c); 1448 tcg_gen_mov_i32(cpu_psw_c, tmp); 1449 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]); 1450 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]); 1451 return true; 1452 } 1453 1454 enum {ROTR = 0, ROTL = 1}; 1455 enum {ROT_IMM = 0, ROT_REG = 1}; 1456 static inline void rx_rot(int ir, int dir, int rd, int src) 1457 { 1458 switch (dir) { 1459 case ROTL: 1460 if (ir == ROT_IMM) { 1461 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src); 1462 } else { 1463 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]); 1464 } 1465 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001); 1466 break; 1467 case ROTR: 1468 if (ir == ROT_IMM) { 1469 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src); 1470 } else { 1471 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]); 1472 } 1473 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31); 1474 break; 1475 } 1476 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]); 1477 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); 1478 } 1479 1480 /* rotl #imm, rd */ 1481 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a) 1482 { 1483 rx_rot(ROT_IMM, ROTL, a->rd, a->imm); 1484 return true; 1485 } 1486 1487 /* rotl rs, rd */ 1488 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a) 1489 { 1490 rx_rot(ROT_REG, ROTL, a->rd, a->rs); 1491 return true; 1492 } 1493 1494 /* rotr #imm, rd */ 1495 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a) 1496 { 1497 rx_rot(ROT_IMM, ROTR, a->rd, a->imm); 1498 return true; 1499 } 1500 1501 /* rotr rs, rd */ 1502 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a) 1503 { 1504 rx_rot(ROT_REG, ROTR, a->rd, a->rs); 1505 return true; 1506 } 1507 1508 /* revl rs, rd */ 1509 static bool trans_REVL(DisasContext *ctx, arg_REVL *a) 1510 { 1511 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]); 1512 return true; 1513 } 1514 1515 /* revw rs, rd */ 1516 static bool trans_REVW(DisasContext *ctx, arg_REVW *a) 1517 { 1518 TCGv tmp; 1519 tmp = tcg_temp_new(); 1520 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff); 1521 tcg_gen_shli_i32(tmp, tmp, 8); 1522 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8); 1523 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff); 1524 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp); 1525 return true; 1526 } 1527 1528 /* conditional branch helper */ 1529 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst) 1530 { 1531 DisasCompare dc; 1532 TCGLabel *t, *done; 1533 1534 switch (cd) { 1535 case 0 ... 13: 1536 dc.temp = tcg_temp_new(); 1537 psw_cond(&dc, cd); 1538 t = gen_new_label(); 1539 done = gen_new_label(); 1540 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t); 1541 gen_goto_tb(ctx, 0, ctx->base.pc_next); 1542 tcg_gen_br(done); 1543 gen_set_label(t); 1544 gen_goto_tb(ctx, 1, ctx->pc + dst); 1545 gen_set_label(done); 1546 break; 1547 case 14: 1548 /* always true case */ 1549 gen_goto_tb(ctx, 0, ctx->pc + dst); 1550 break; 1551 case 15: 1552 /* always false case */ 1553 /* Nothing do */ 1554 break; 1555 } 1556 } 1557 1558 /* beq dsp:3 / bne dsp:3 */ 1559 /* beq dsp:8 / bne dsp:8 */ 1560 /* bc dsp:8 / bnc dsp:8 */ 1561 /* bgtu dsp:8 / bleu dsp:8 */ 1562 /* bpz dsp:8 / bn dsp:8 */ 1563 /* bge dsp:8 / blt dsp:8 */ 1564 /* bgt dsp:8 / ble dsp:8 */ 1565 /* bo dsp:8 / bno dsp:8 */ 1566 /* beq dsp:16 / bne dsp:16 */ 1567 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a) 1568 { 1569 rx_bcnd_main(ctx, a->cd, a->dsp); 1570 return true; 1571 } 1572 1573 /* bra dsp:3 */ 1574 /* bra dsp:8 */ 1575 /* bra dsp:16 */ 1576 /* bra dsp:24 */ 1577 static bool trans_BRA(DisasContext *ctx, arg_BRA *a) 1578 { 1579 rx_bcnd_main(ctx, 14, a->dsp); 1580 return true; 1581 } 1582 1583 /* bra rs */ 1584 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a) 1585 { 1586 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc); 1587 ctx->base.is_jmp = DISAS_JUMP; 1588 return true; 1589 } 1590 1591 static inline void rx_save_pc(DisasContext *ctx) 1592 { 1593 TCGv pc = tcg_constant_i32(ctx->base.pc_next); 1594 push(pc); 1595 } 1596 1597 /* jmp rs */ 1598 static bool trans_JMP(DisasContext *ctx, arg_JMP *a) 1599 { 1600 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]); 1601 ctx->base.is_jmp = DISAS_JUMP; 1602 return true; 1603 } 1604 1605 /* jsr rs */ 1606 static bool trans_JSR(DisasContext *ctx, arg_JSR *a) 1607 { 1608 rx_save_pc(ctx); 1609 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]); 1610 ctx->base.is_jmp = DISAS_JUMP; 1611 return true; 1612 } 1613 1614 /* bsr dsp:16 */ 1615 /* bsr dsp:24 */ 1616 static bool trans_BSR(DisasContext *ctx, arg_BSR *a) 1617 { 1618 rx_save_pc(ctx); 1619 rx_bcnd_main(ctx, 14, a->dsp); 1620 return true; 1621 } 1622 1623 /* bsr rs */ 1624 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a) 1625 { 1626 rx_save_pc(ctx); 1627 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc); 1628 ctx->base.is_jmp = DISAS_JUMP; 1629 return true; 1630 } 1631 1632 /* rts */ 1633 static bool trans_RTS(DisasContext *ctx, arg_RTS *a) 1634 { 1635 pop(cpu_pc); 1636 ctx->base.is_jmp = DISAS_JUMP; 1637 return true; 1638 } 1639 1640 /* nop */ 1641 static bool trans_NOP(DisasContext *ctx, arg_NOP *a) 1642 { 1643 return true; 1644 } 1645 1646 /* scmpu */ 1647 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a) 1648 { 1649 gen_helper_scmpu(cpu_env); 1650 return true; 1651 } 1652 1653 /* smovu */ 1654 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a) 1655 { 1656 gen_helper_smovu(cpu_env); 1657 return true; 1658 } 1659 1660 /* smovf */ 1661 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a) 1662 { 1663 gen_helper_smovf(cpu_env); 1664 return true; 1665 } 1666 1667 /* smovb */ 1668 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a) 1669 { 1670 gen_helper_smovb(cpu_env); 1671 return true; 1672 } 1673 1674 #define STRING(op) \ 1675 do { \ 1676 TCGv size = tcg_constant_i32(a->sz); \ 1677 gen_helper_##op(cpu_env, size); \ 1678 } while (0) 1679 1680 /* suntile.<bwl> */ 1681 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a) 1682 { 1683 STRING(suntil); 1684 return true; 1685 } 1686 1687 /* swhile.<bwl> */ 1688 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a) 1689 { 1690 STRING(swhile); 1691 return true; 1692 } 1693 /* sstr.<bwl> */ 1694 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a) 1695 { 1696 STRING(sstr); 1697 return true; 1698 } 1699 1700 /* rmpa.<bwl> */ 1701 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a) 1702 { 1703 STRING(rmpa); 1704 return true; 1705 } 1706 1707 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2) 1708 { 1709 TCGv_i64 tmp0, tmp1; 1710 tmp0 = tcg_temp_new_i64(); 1711 tmp1 = tcg_temp_new_i64(); 1712 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]); 1713 tcg_gen_sari_i64(tmp0, tmp0, 16); 1714 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]); 1715 tcg_gen_sari_i64(tmp1, tmp1, 16); 1716 tcg_gen_mul_i64(ret, tmp0, tmp1); 1717 tcg_gen_shli_i64(ret, ret, 16); 1718 } 1719 1720 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2) 1721 { 1722 TCGv_i64 tmp0, tmp1; 1723 tmp0 = tcg_temp_new_i64(); 1724 tmp1 = tcg_temp_new_i64(); 1725 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]); 1726 tcg_gen_ext16s_i64(tmp0, tmp0); 1727 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]); 1728 tcg_gen_ext16s_i64(tmp1, tmp1); 1729 tcg_gen_mul_i64(ret, tmp0, tmp1); 1730 tcg_gen_shli_i64(ret, ret, 16); 1731 } 1732 1733 /* mulhi rs,rs2 */ 1734 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a) 1735 { 1736 rx_mul64hi(cpu_acc, a->rs, a->rs2); 1737 return true; 1738 } 1739 1740 /* mullo rs,rs2 */ 1741 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a) 1742 { 1743 rx_mul64lo(cpu_acc, a->rs, a->rs2); 1744 return true; 1745 } 1746 1747 /* machi rs,rs2 */ 1748 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a) 1749 { 1750 TCGv_i64 tmp; 1751 tmp = tcg_temp_new_i64(); 1752 rx_mul64hi(tmp, a->rs, a->rs2); 1753 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); 1754 return true; 1755 } 1756 1757 /* maclo rs,rs2 */ 1758 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a) 1759 { 1760 TCGv_i64 tmp; 1761 tmp = tcg_temp_new_i64(); 1762 rx_mul64lo(tmp, a->rs, a->rs2); 1763 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp); 1764 return true; 1765 } 1766 1767 /* mvfachi rd */ 1768 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a) 1769 { 1770 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc); 1771 return true; 1772 } 1773 1774 /* mvfacmi rd */ 1775 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a) 1776 { 1777 TCGv_i64 rd64; 1778 rd64 = tcg_temp_new_i64(); 1779 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32); 1780 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64); 1781 return true; 1782 } 1783 1784 /* mvtachi rs */ 1785 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a) 1786 { 1787 TCGv_i64 rs64; 1788 rs64 = tcg_temp_new_i64(); 1789 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); 1790 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32); 1791 return true; 1792 } 1793 1794 /* mvtaclo rs */ 1795 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a) 1796 { 1797 TCGv_i64 rs64; 1798 rs64 = tcg_temp_new_i64(); 1799 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]); 1800 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32); 1801 return true; 1802 } 1803 1804 /* racw #imm */ 1805 static bool trans_RACW(DisasContext *ctx, arg_RACW *a) 1806 { 1807 TCGv imm = tcg_constant_i32(a->imm + 1); 1808 gen_helper_racw(cpu_env, imm); 1809 return true; 1810 } 1811 1812 /* sat rd */ 1813 static bool trans_SAT(DisasContext *ctx, arg_SAT *a) 1814 { 1815 TCGv tmp, z; 1816 tmp = tcg_temp_new(); 1817 z = tcg_constant_i32(0); 1818 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */ 1819 tcg_gen_sari_i32(tmp, cpu_psw_s, 31); 1820 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */ 1821 tcg_gen_xori_i32(tmp, tmp, 0x80000000); 1822 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd], 1823 cpu_psw_o, z, tmp, cpu_regs[a->rd]); 1824 return true; 1825 } 1826 1827 /* satr */ 1828 static bool trans_SATR(DisasContext *ctx, arg_SATR *a) 1829 { 1830 gen_helper_satr(cpu_env); 1831 return true; 1832 } 1833 1834 #define cat3(a, b, c) a##b##c 1835 #define FOP(name, op) \ 1836 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ 1837 cat3(arg_, name, _ir) * a) \ 1838 { \ 1839 TCGv imm = tcg_constant_i32(li(ctx, 0)); \ 1840 gen_helper_##op(cpu_regs[a->rd], cpu_env, \ 1841 cpu_regs[a->rd], imm); \ 1842 return true; \ 1843 } \ 1844 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \ 1845 cat3(arg_, name, _mr) * a) \ 1846 { \ 1847 TCGv val, mem; \ 1848 mem = tcg_temp_new(); \ 1849 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ 1850 gen_helper_##op(cpu_regs[a->rd], cpu_env, \ 1851 cpu_regs[a->rd], val); \ 1852 return true; \ 1853 } 1854 1855 #define FCONVOP(name, op) \ 1856 static bool trans_##name(DisasContext *ctx, arg_##name * a) \ 1857 { \ 1858 TCGv val, mem; \ 1859 mem = tcg_temp_new(); \ 1860 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ 1861 gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \ 1862 return true; \ 1863 } 1864 1865 FOP(FADD, fadd) 1866 FOP(FSUB, fsub) 1867 FOP(FMUL, fmul) 1868 FOP(FDIV, fdiv) 1869 1870 /* fcmp #imm, rd */ 1871 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a) 1872 { 1873 TCGv imm = tcg_constant_i32(li(ctx, 0)); 1874 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm); 1875 return true; 1876 } 1877 1878 /* fcmp dsp[rs], rd */ 1879 /* fcmp rs, rd */ 1880 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a) 1881 { 1882 TCGv val, mem; 1883 mem = tcg_temp_new(); 1884 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); 1885 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val); 1886 return true; 1887 } 1888 1889 FCONVOP(FTOI, ftoi) 1890 FCONVOP(ROUND, round) 1891 1892 /* itof rs, rd */ 1893 /* itof dsp[rs], rd */ 1894 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a) 1895 { 1896 TCGv val, mem; 1897 mem = tcg_temp_new(); 1898 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); 1899 gen_helper_itof(cpu_regs[a->rd], cpu_env, val); 1900 return true; 1901 } 1902 1903 static void rx_bsetm(TCGv mem, TCGv mask) 1904 { 1905 TCGv val; 1906 val = tcg_temp_new(); 1907 rx_gen_ld(MO_8, val, mem); 1908 tcg_gen_or_i32(val, val, mask); 1909 rx_gen_st(MO_8, val, mem); 1910 } 1911 1912 static void rx_bclrm(TCGv mem, TCGv mask) 1913 { 1914 TCGv val; 1915 val = tcg_temp_new(); 1916 rx_gen_ld(MO_8, val, mem); 1917 tcg_gen_andc_i32(val, val, mask); 1918 rx_gen_st(MO_8, val, mem); 1919 } 1920 1921 static void rx_btstm(TCGv mem, TCGv mask) 1922 { 1923 TCGv val; 1924 val = tcg_temp_new(); 1925 rx_gen_ld(MO_8, val, mem); 1926 tcg_gen_and_i32(val, val, mask); 1927 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0); 1928 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); 1929 } 1930 1931 static void rx_bnotm(TCGv mem, TCGv mask) 1932 { 1933 TCGv val; 1934 val = tcg_temp_new(); 1935 rx_gen_ld(MO_8, val, mem); 1936 tcg_gen_xor_i32(val, val, mask); 1937 rx_gen_st(MO_8, val, mem); 1938 } 1939 1940 static void rx_bsetr(TCGv reg, TCGv mask) 1941 { 1942 tcg_gen_or_i32(reg, reg, mask); 1943 } 1944 1945 static void rx_bclrr(TCGv reg, TCGv mask) 1946 { 1947 tcg_gen_andc_i32(reg, reg, mask); 1948 } 1949 1950 static inline void rx_btstr(TCGv reg, TCGv mask) 1951 { 1952 TCGv t0; 1953 t0 = tcg_temp_new(); 1954 tcg_gen_and_i32(t0, reg, mask); 1955 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0); 1956 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); 1957 } 1958 1959 static inline void rx_bnotr(TCGv reg, TCGv mask) 1960 { 1961 tcg_gen_xor_i32(reg, reg, mask); 1962 } 1963 1964 #define BITOP(name, op) \ 1965 static bool cat3(trans_, name, _im)(DisasContext *ctx, \ 1966 cat3(arg_, name, _im) * a) \ 1967 { \ 1968 TCGv mask, mem, addr; \ 1969 mem = tcg_temp_new(); \ 1970 mask = tcg_constant_i32(1 << a->imm); \ 1971 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ 1972 cat3(rx_, op, m)(addr, mask); \ 1973 return true; \ 1974 } \ 1975 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ 1976 cat3(arg_, name, _ir) * a) \ 1977 { \ 1978 TCGv mask; \ 1979 mask = tcg_constant_i32(1 << a->imm); \ 1980 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ 1981 return true; \ 1982 } \ 1983 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \ 1984 cat3(arg_, name, _rr) * a) \ 1985 { \ 1986 TCGv mask, b; \ 1987 mask = tcg_temp_new(); \ 1988 b = tcg_temp_new(); \ 1989 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \ 1990 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ 1991 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ 1992 return true; \ 1993 } \ 1994 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \ 1995 cat3(arg_, name, _rm) * a) \ 1996 { \ 1997 TCGv mask, mem, addr, b; \ 1998 mask = tcg_temp_new(); \ 1999 b = tcg_temp_new(); \ 2000 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \ 2001 tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ 2002 mem = tcg_temp_new(); \ 2003 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ 2004 cat3(rx_, op, m)(addr, mask); \ 2005 return true; \ 2006 } 2007 2008 BITOP(BSET, bset) 2009 BITOP(BCLR, bclr) 2010 BITOP(BTST, btst) 2011 BITOP(BNOT, bnot) 2012 2013 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos) 2014 { 2015 TCGv bit; 2016 DisasCompare dc; 2017 dc.temp = tcg_temp_new(); 2018 bit = tcg_temp_new(); 2019 psw_cond(&dc, cond); 2020 tcg_gen_andi_i32(val, val, ~(1 << pos)); 2021 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0); 2022 tcg_gen_deposit_i32(val, val, bit, pos, 1); 2023 } 2024 2025 /* bmcnd #imm, dsp[rd] */ 2026 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a) 2027 { 2028 TCGv val, mem, addr; 2029 val = tcg_temp_new(); 2030 mem = tcg_temp_new(); 2031 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd); 2032 rx_gen_ld(MO_8, val, addr); 2033 bmcnd_op(val, a->cd, a->imm); 2034 rx_gen_st(MO_8, val, addr); 2035 return true; 2036 } 2037 2038 /* bmcond #imm, rd */ 2039 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a) 2040 { 2041 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm); 2042 return true; 2043 } 2044 2045 enum { 2046 PSW_C = 0, 2047 PSW_Z = 1, 2048 PSW_S = 2, 2049 PSW_O = 3, 2050 PSW_I = 8, 2051 PSW_U = 9, 2052 }; 2053 2054 static inline void clrsetpsw(DisasContext *ctx, int cb, int val) 2055 { 2056 if (cb < 8) { 2057 switch (cb) { 2058 case PSW_C: 2059 tcg_gen_movi_i32(cpu_psw_c, val); 2060 break; 2061 case PSW_Z: 2062 tcg_gen_movi_i32(cpu_psw_z, val == 0); 2063 break; 2064 case PSW_S: 2065 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0); 2066 break; 2067 case PSW_O: 2068 tcg_gen_movi_i32(cpu_psw_o, val << 31); 2069 break; 2070 default: 2071 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb); 2072 break; 2073 } 2074 } else if (is_privileged(ctx, 0)) { 2075 switch (cb) { 2076 case PSW_I: 2077 tcg_gen_movi_i32(cpu_psw_i, val); 2078 ctx->base.is_jmp = DISAS_UPDATE; 2079 break; 2080 case PSW_U: 2081 if (FIELD_EX32(ctx->tb_flags, PSW, U) != val) { 2082 ctx->tb_flags = FIELD_DP32(ctx->tb_flags, PSW, U, val); 2083 tcg_gen_movi_i32(cpu_psw_u, val); 2084 tcg_gen_mov_i32(val ? cpu_isp : cpu_usp, cpu_sp); 2085 tcg_gen_mov_i32(cpu_sp, val ? cpu_usp : cpu_isp); 2086 } 2087 break; 2088 default: 2089 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb); 2090 break; 2091 } 2092 } 2093 } 2094 2095 /* clrpsw psw */ 2096 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a) 2097 { 2098 clrsetpsw(ctx, a->cb, 0); 2099 return true; 2100 } 2101 2102 /* setpsw psw */ 2103 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a) 2104 { 2105 clrsetpsw(ctx, a->cb, 1); 2106 return true; 2107 } 2108 2109 /* mvtipl #imm */ 2110 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a) 2111 { 2112 if (is_privileged(ctx, 1)) { 2113 tcg_gen_movi_i32(cpu_psw_ipl, a->imm); 2114 ctx->base.is_jmp = DISAS_UPDATE; 2115 } 2116 return true; 2117 } 2118 2119 /* mvtc #imm, rd */ 2120 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) 2121 { 2122 TCGv imm; 2123 2124 imm = tcg_constant_i32(a->imm); 2125 move_to_cr(ctx, imm, a->cr); 2126 return true; 2127 } 2128 2129 /* mvtc rs, rd */ 2130 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a) 2131 { 2132 move_to_cr(ctx, cpu_regs[a->rs], a->cr); 2133 return true; 2134 } 2135 2136 /* mvfc rs, rd */ 2137 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a) 2138 { 2139 move_from_cr(ctx, cpu_regs[a->rd], a->cr, ctx->pc); 2140 return true; 2141 } 2142 2143 /* rtfi */ 2144 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a) 2145 { 2146 TCGv psw; 2147 if (is_privileged(ctx, 1)) { 2148 psw = tcg_temp_new(); 2149 tcg_gen_mov_i32(cpu_pc, cpu_bpc); 2150 tcg_gen_mov_i32(psw, cpu_bpsw); 2151 gen_helper_set_psw_rte(cpu_env, psw); 2152 ctx->base.is_jmp = DISAS_EXIT; 2153 } 2154 return true; 2155 } 2156 2157 /* rte */ 2158 static bool trans_RTE(DisasContext *ctx, arg_RTE *a) 2159 { 2160 TCGv psw; 2161 if (is_privileged(ctx, 1)) { 2162 psw = tcg_temp_new(); 2163 pop(cpu_pc); 2164 pop(psw); 2165 gen_helper_set_psw_rte(cpu_env, psw); 2166 ctx->base.is_jmp = DISAS_EXIT; 2167 } 2168 return true; 2169 } 2170 2171 /* brk */ 2172 static bool trans_BRK(DisasContext *ctx, arg_BRK *a) 2173 { 2174 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2175 gen_helper_rxbrk(cpu_env); 2176 ctx->base.is_jmp = DISAS_NORETURN; 2177 return true; 2178 } 2179 2180 /* int #imm */ 2181 static bool trans_INT(DisasContext *ctx, arg_INT *a) 2182 { 2183 TCGv vec; 2184 2185 tcg_debug_assert(a->imm < 0x100); 2186 vec = tcg_constant_i32(a->imm); 2187 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2188 gen_helper_rxint(cpu_env, vec); 2189 ctx->base.is_jmp = DISAS_NORETURN; 2190 return true; 2191 } 2192 2193 /* wait */ 2194 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a) 2195 { 2196 if (is_privileged(ctx, 1)) { 2197 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2198 gen_helper_wait(cpu_env); 2199 } 2200 return true; 2201 } 2202 2203 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 2204 { 2205 CPURXState *env = cs->env_ptr; 2206 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2207 ctx->env = env; 2208 ctx->tb_flags = ctx->base.tb->flags; 2209 } 2210 2211 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 2212 { 2213 } 2214 2215 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 2216 { 2217 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2218 2219 tcg_gen_insn_start(ctx->base.pc_next); 2220 } 2221 2222 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 2223 { 2224 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2225 uint32_t insn; 2226 2227 ctx->pc = ctx->base.pc_next; 2228 insn = decode_load(ctx); 2229 if (!decode(ctx, insn)) { 2230 gen_helper_raise_illegal_instruction(cpu_env); 2231 } 2232 } 2233 2234 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 2235 { 2236 DisasContext *ctx = container_of(dcbase, DisasContext, base); 2237 2238 switch (ctx->base.is_jmp) { 2239 case DISAS_NEXT: 2240 case DISAS_TOO_MANY: 2241 gen_goto_tb(ctx, 0, dcbase->pc_next); 2242 break; 2243 case DISAS_JUMP: 2244 tcg_gen_lookup_and_goto_ptr(); 2245 break; 2246 case DISAS_UPDATE: 2247 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); 2248 /* fall through */ 2249 case DISAS_EXIT: 2250 tcg_gen_exit_tb(NULL, 0); 2251 break; 2252 case DISAS_NORETURN: 2253 break; 2254 default: 2255 g_assert_not_reached(); 2256 } 2257 } 2258 2259 static void rx_tr_disas_log(const DisasContextBase *dcbase, 2260 CPUState *cs, FILE *logfile) 2261 { 2262 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); 2263 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size); 2264 } 2265 2266 static const TranslatorOps rx_tr_ops = { 2267 .init_disas_context = rx_tr_init_disas_context, 2268 .tb_start = rx_tr_tb_start, 2269 .insn_start = rx_tr_insn_start, 2270 .translate_insn = rx_tr_translate_insn, 2271 .tb_stop = rx_tr_tb_stop, 2272 .disas_log = rx_tr_disas_log, 2273 }; 2274 2275 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 2276 target_ulong pc, void *host_pc) 2277 { 2278 DisasContext dc; 2279 2280 translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base); 2281 } 2282 2283 #define ALLOC_REGISTER(sym, name) \ 2284 cpu_##sym = tcg_global_mem_new_i32(cpu_env, \ 2285 offsetof(CPURXState, sym), name) 2286 2287 void rx_translate_init(void) 2288 { 2289 static const char * const regnames[NUM_REGS] = { 2290 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", 2291 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15" 2292 }; 2293 int i; 2294 2295 for (i = 0; i < NUM_REGS; i++) { 2296 cpu_regs[i] = tcg_global_mem_new_i32(cpu_env, 2297 offsetof(CPURXState, regs[i]), 2298 regnames[i]); 2299 } 2300 ALLOC_REGISTER(pc, "PC"); 2301 ALLOC_REGISTER(psw_o, "PSW(O)"); 2302 ALLOC_REGISTER(psw_s, "PSW(S)"); 2303 ALLOC_REGISTER(psw_z, "PSW(Z)"); 2304 ALLOC_REGISTER(psw_c, "PSW(C)"); 2305 ALLOC_REGISTER(psw_u, "PSW(U)"); 2306 ALLOC_REGISTER(psw_i, "PSW(I)"); 2307 ALLOC_REGISTER(psw_pm, "PSW(PM)"); 2308 ALLOC_REGISTER(psw_ipl, "PSW(IPL)"); 2309 ALLOC_REGISTER(usp, "USP"); 2310 ALLOC_REGISTER(fpsw, "FPSW"); 2311 ALLOC_REGISTER(bpsw, "BPSW"); 2312 ALLOC_REGISTER(bpc, "BPC"); 2313 ALLOC_REGISTER(isp, "ISP"); 2314 ALLOC_REGISTER(fintv, "FINTV"); 2315 ALLOC_REGISTER(intb, "INTB"); 2316 cpu_acc = tcg_global_mem_new_i64(cpu_env, 2317 offsetof(CPURXState, acc), "ACC"); 2318 } 2319