1 /* 2 * SH4 translation 3 * 4 * Copyright (c) 2005 Samuel Tardieu 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #define DEBUG_DISAS 21 22 #include "qemu/osdep.h" 23 #include "cpu.h" 24 #include "disas/disas.h" 25 #include "exec/exec-all.h" 26 #include "tcg-op.h" 27 #include "exec/cpu_ldst.h" 28 29 #include "exec/helper-proto.h" 30 #include "exec/helper-gen.h" 31 32 #include "trace-tcg.h" 33 #include "exec/log.h" 34 35 36 typedef struct DisasContext { 37 struct TranslationBlock *tb; 38 target_ulong pc; 39 uint16_t opcode; 40 uint32_t tbflags; /* should stay unmodified during the TB translation */ 41 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */ 42 int bstate; 43 int memidx; 44 uint32_t delayed_pc; 45 int singlestep_enabled; 46 uint32_t features; 47 int has_movcal; 48 } DisasContext; 49 50 #if defined(CONFIG_USER_ONLY) 51 #define IS_USER(ctx) 1 52 #else 53 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD))) 54 #endif 55 56 enum { 57 BS_NONE = 0, /* We go out of the TB without reaching a branch or an 58 * exception condition 59 */ 60 BS_STOP = 1, /* We want to stop translation for any reason */ 61 BS_BRANCH = 2, /* We reached a branch condition */ 62 BS_EXCP = 3, /* We reached an exception condition */ 63 }; 64 65 /* global register indexes */ 66 static TCGv_env cpu_env; 67 static TCGv cpu_gregs[24]; 68 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t; 69 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr; 70 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl; 71 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst; 72 static TCGv cpu_fregs[32]; 73 74 /* internal register indexes */ 75 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond; 76 77 #include "exec/gen-icount.h" 78 79 void sh4_translate_init(void) 80 { 81 int i; 82 static int done_init = 0; 83 static const char * const gregnames[24] = { 84 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0", 85 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0", 86 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", 87 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1", 88 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1" 89 }; 90 static const char * const fregnames[32] = { 91 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0", 92 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0", 93 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0", 94 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0", 95 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1", 96 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1", 97 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1", 98 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1", 99 }; 100 101 if (done_init) 102 return; 103 104 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); 105 tcg_ctx.tcg_env = cpu_env; 106 107 for (i = 0; i < 24; i++) 108 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env, 109 offsetof(CPUSH4State, gregs[i]), 110 gregnames[i]); 111 112 cpu_pc = tcg_global_mem_new_i32(cpu_env, 113 offsetof(CPUSH4State, pc), "PC"); 114 cpu_sr = tcg_global_mem_new_i32(cpu_env, 115 offsetof(CPUSH4State, sr), "SR"); 116 cpu_sr_m = tcg_global_mem_new_i32(cpu_env, 117 offsetof(CPUSH4State, sr_m), "SR_M"); 118 cpu_sr_q = tcg_global_mem_new_i32(cpu_env, 119 offsetof(CPUSH4State, sr_q), "SR_Q"); 120 cpu_sr_t = tcg_global_mem_new_i32(cpu_env, 121 offsetof(CPUSH4State, sr_t), "SR_T"); 122 cpu_ssr = tcg_global_mem_new_i32(cpu_env, 123 offsetof(CPUSH4State, ssr), "SSR"); 124 cpu_spc = tcg_global_mem_new_i32(cpu_env, 125 offsetof(CPUSH4State, spc), "SPC"); 126 cpu_gbr = tcg_global_mem_new_i32(cpu_env, 127 offsetof(CPUSH4State, gbr), "GBR"); 128 cpu_vbr = tcg_global_mem_new_i32(cpu_env, 129 offsetof(CPUSH4State, vbr), "VBR"); 130 cpu_sgr = tcg_global_mem_new_i32(cpu_env, 131 offsetof(CPUSH4State, sgr), "SGR"); 132 cpu_dbr = tcg_global_mem_new_i32(cpu_env, 133 offsetof(CPUSH4State, dbr), "DBR"); 134 cpu_mach = tcg_global_mem_new_i32(cpu_env, 135 offsetof(CPUSH4State, mach), "MACH"); 136 cpu_macl = tcg_global_mem_new_i32(cpu_env, 137 offsetof(CPUSH4State, macl), "MACL"); 138 cpu_pr = tcg_global_mem_new_i32(cpu_env, 139 offsetof(CPUSH4State, pr), "PR"); 140 cpu_fpscr = tcg_global_mem_new_i32(cpu_env, 141 offsetof(CPUSH4State, fpscr), "FPSCR"); 142 cpu_fpul = tcg_global_mem_new_i32(cpu_env, 143 offsetof(CPUSH4State, fpul), "FPUL"); 144 145 cpu_flags = tcg_global_mem_new_i32(cpu_env, 146 offsetof(CPUSH4State, flags), "_flags_"); 147 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env, 148 offsetof(CPUSH4State, delayed_pc), 149 "_delayed_pc_"); 150 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env, 151 offsetof(CPUSH4State, 152 delayed_cond), 153 "_delayed_cond_"); 154 cpu_ldst = tcg_global_mem_new_i32(cpu_env, 155 offsetof(CPUSH4State, ldst), "_ldst_"); 156 157 for (i = 0; i < 32; i++) 158 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env, 159 offsetof(CPUSH4State, fregs[i]), 160 fregnames[i]); 161 162 done_init = 1; 163 } 164 165 void superh_cpu_dump_state(CPUState *cs, FILE *f, 166 fprintf_function cpu_fprintf, int flags) 167 { 168 SuperHCPU *cpu = SUPERH_CPU(cs); 169 CPUSH4State *env = &cpu->env; 170 int i; 171 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n", 172 env->pc, cpu_read_sr(env), env->pr, env->fpscr); 173 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n", 174 env->spc, env->ssr, env->gbr, env->vbr); 175 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n", 176 env->sgr, env->dbr, env->delayed_pc, env->fpul); 177 for (i = 0; i < 24; i += 4) { 178 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n", 179 i, env->gregs[i], i + 1, env->gregs[i + 1], 180 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]); 181 } 182 if (env->flags & DELAY_SLOT) { 183 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n", 184 env->delayed_pc); 185 } else if (env->flags & DELAY_SLOT_CONDITIONAL) { 186 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n", 187 env->delayed_pc); 188 } 189 } 190 191 static void gen_read_sr(TCGv dst) 192 { 193 TCGv t0 = tcg_temp_new(); 194 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q); 195 tcg_gen_or_i32(dst, dst, t0); 196 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M); 197 tcg_gen_or_i32(dst, dst, t0); 198 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T); 199 tcg_gen_or_i32(dst, cpu_sr, t0); 200 tcg_temp_free_i32(t0); 201 } 202 203 static void gen_write_sr(TCGv src) 204 { 205 tcg_gen_andi_i32(cpu_sr, src, 206 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T))); 207 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1); 208 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1); 209 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1); 210 } 211 212 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc) 213 { 214 if (save_pc) { 215 tcg_gen_movi_i32(cpu_pc, ctx->pc); 216 } 217 if (ctx->delayed_pc != (uint32_t) -1) { 218 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); 219 } 220 if ((ctx->tbflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) 221 != ctx->envflags) { 222 tcg_gen_movi_i32(cpu_flags, ctx->envflags); 223 } 224 } 225 226 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) 227 { 228 if (unlikely(ctx->singlestep_enabled)) { 229 return false; 230 } 231 232 #ifndef CONFIG_USER_ONLY 233 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 234 #else 235 return true; 236 #endif 237 } 238 239 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) 240 { 241 if (use_goto_tb(ctx, dest)) { 242 /* Use a direct jump if in same page and singlestep not enabled */ 243 tcg_gen_goto_tb(n); 244 tcg_gen_movi_i32(cpu_pc, dest); 245 tcg_gen_exit_tb((uintptr_t)ctx->tb + n); 246 } else { 247 tcg_gen_movi_i32(cpu_pc, dest); 248 if (ctx->singlestep_enabled) 249 gen_helper_debug(cpu_env); 250 tcg_gen_exit_tb(0); 251 } 252 } 253 254 static void gen_jump(DisasContext * ctx) 255 { 256 if (ctx->delayed_pc == (uint32_t) - 1) { 257 /* Target is not statically known, it comes necessarily from a 258 delayed jump as immediate jump are conditinal jumps */ 259 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc); 260 tcg_gen_discard_i32(cpu_delayed_pc); 261 if (ctx->singlestep_enabled) 262 gen_helper_debug(cpu_env); 263 tcg_gen_exit_tb(0); 264 } else { 265 gen_goto_tb(ctx, 0, ctx->delayed_pc); 266 } 267 } 268 269 /* Immediate conditional jump (bt or bf) */ 270 static void gen_conditional_jump(DisasContext * ctx, 271 target_ulong ift, target_ulong ifnott) 272 { 273 TCGLabel *l1 = gen_new_label(); 274 gen_save_cpu_state(ctx, false); 275 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1); 276 gen_goto_tb(ctx, 0, ifnott); 277 gen_set_label(l1); 278 gen_goto_tb(ctx, 1, ift); 279 ctx->bstate = BS_BRANCH; 280 } 281 282 /* Delayed conditional jump (bt or bf) */ 283 static void gen_delayed_conditional_jump(DisasContext * ctx) 284 { 285 TCGLabel *l1; 286 TCGv ds; 287 288 l1 = gen_new_label(); 289 ds = tcg_temp_new(); 290 tcg_gen_mov_i32(ds, cpu_delayed_cond); 291 tcg_gen_discard_i32(cpu_delayed_cond); 292 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1); 293 gen_goto_tb(ctx, 1, ctx->pc + 2); 294 gen_set_label(l1); 295 gen_jump(ctx); 296 } 297 298 static inline void gen_load_fpr64(TCGv_i64 t, int reg) 299 { 300 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]); 301 } 302 303 static inline void gen_store_fpr64 (TCGv_i64 t, int reg) 304 { 305 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t); 306 } 307 308 #define B3_0 (ctx->opcode & 0xf) 309 #define B6_4 ((ctx->opcode >> 4) & 0x7) 310 #define B7_4 ((ctx->opcode >> 4) & 0xf) 311 #define B7_0 (ctx->opcode & 0xff) 312 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff)) 313 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \ 314 (ctx->opcode & 0xfff)) 315 #define B11_8 ((ctx->opcode >> 8) & 0xf) 316 #define B15_12 ((ctx->opcode >> 12) & 0xf) 317 318 #define REG(x) ((x) < 8 && (ctx->tbflags & (1u << SR_MD))\ 319 && (ctx->tbflags & (1u << SR_RB))\ 320 ? (cpu_gregs[x + 16]) : (cpu_gregs[x])) 321 322 #define ALTREG(x) ((x) < 8 && (!(ctx->tbflags & (1u << SR_MD))\ 323 || !(ctx->tbflags & (1u << SR_RB)))\ 324 ? (cpu_gregs[x + 16]) : (cpu_gregs[x])) 325 326 #define FREG(x) (ctx->tbflags & FPSCR_FR ? (x) ^ 0x10 : (x)) 327 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe)) 328 #define XREG(x) (ctx->tbflags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x)) 329 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */ 330 331 #define CHECK_NOT_DELAY_SLOT \ 332 if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \ 333 gen_save_cpu_state(ctx, true); \ 334 gen_helper_raise_slot_illegal_instruction(cpu_env); \ 335 ctx->bstate = BS_EXCP; \ 336 return; \ 337 } 338 339 #define CHECK_PRIVILEGED \ 340 if (IS_USER(ctx)) { \ 341 gen_save_cpu_state(ctx, true); \ 342 if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \ 343 gen_helper_raise_slot_illegal_instruction(cpu_env); \ 344 } else { \ 345 gen_helper_raise_illegal_instruction(cpu_env); \ 346 } \ 347 ctx->bstate = BS_EXCP; \ 348 return; \ 349 } 350 351 #define CHECK_FPU_ENABLED \ 352 if (ctx->tbflags & (1u << SR_FD)) { \ 353 gen_save_cpu_state(ctx, true); \ 354 if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \ 355 gen_helper_raise_slot_fpu_disable(cpu_env); \ 356 } else { \ 357 gen_helper_raise_fpu_disable(cpu_env); \ 358 } \ 359 ctx->bstate = BS_EXCP; \ 360 return; \ 361 } 362 363 static void _decode_opc(DisasContext * ctx) 364 { 365 /* This code tries to make movcal emulation sufficiently 366 accurate for Linux purposes. This instruction writes 367 memory, and prior to that, always allocates a cache line. 368 It is used in two contexts: 369 - in memcpy, where data is copied in blocks, the first write 370 of to a block uses movca.l for performance. 371 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used 372 to flush the cache. Here, the data written by movcal.l is never 373 written to memory, and the data written is just bogus. 374 375 To simulate this, we simulate movcal.l, we store the value to memory, 376 but we also remember the previous content. If we see ocbi, we check 377 if movcal.l for that address was done previously. If so, the write should 378 not have hit the memory, so we restore the previous content. 379 When we see an instruction that is neither movca.l 380 nor ocbi, the previous content is discarded. 381 382 To optimize, we only try to flush stores when we're at the start of 383 TB, or if we already saw movca.l in this TB and did not flush stores 384 yet. */ 385 if (ctx->has_movcal) 386 { 387 int opcode = ctx->opcode & 0xf0ff; 388 if (opcode != 0x0093 /* ocbi */ 389 && opcode != 0x00c3 /* movca.l */) 390 { 391 gen_helper_discard_movcal_backup(cpu_env); 392 ctx->has_movcal = 0; 393 } 394 } 395 396 #if 0 397 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode); 398 #endif 399 400 switch (ctx->opcode) { 401 case 0x0019: /* div0u */ 402 tcg_gen_movi_i32(cpu_sr_m, 0); 403 tcg_gen_movi_i32(cpu_sr_q, 0); 404 tcg_gen_movi_i32(cpu_sr_t, 0); 405 return; 406 case 0x000b: /* rts */ 407 CHECK_NOT_DELAY_SLOT 408 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr); 409 ctx->envflags |= DELAY_SLOT; 410 ctx->delayed_pc = (uint32_t) - 1; 411 return; 412 case 0x0028: /* clrmac */ 413 tcg_gen_movi_i32(cpu_mach, 0); 414 tcg_gen_movi_i32(cpu_macl, 0); 415 return; 416 case 0x0048: /* clrs */ 417 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S)); 418 return; 419 case 0x0008: /* clrt */ 420 tcg_gen_movi_i32(cpu_sr_t, 0); 421 return; 422 case 0x0038: /* ldtlb */ 423 CHECK_PRIVILEGED 424 gen_helper_ldtlb(cpu_env); 425 return; 426 case 0x002b: /* rte */ 427 CHECK_PRIVILEGED 428 CHECK_NOT_DELAY_SLOT 429 gen_write_sr(cpu_ssr); 430 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc); 431 ctx->envflags |= DELAY_SLOT; 432 ctx->delayed_pc = (uint32_t) - 1; 433 return; 434 case 0x0058: /* sets */ 435 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S)); 436 return; 437 case 0x0018: /* sett */ 438 tcg_gen_movi_i32(cpu_sr_t, 1); 439 return; 440 case 0xfbfd: /* frchg */ 441 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR); 442 ctx->bstate = BS_STOP; 443 return; 444 case 0xf3fd: /* fschg */ 445 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ); 446 ctx->bstate = BS_STOP; 447 return; 448 case 0x0009: /* nop */ 449 return; 450 case 0x001b: /* sleep */ 451 CHECK_PRIVILEGED 452 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2); 453 gen_helper_sleep(cpu_env); 454 return; 455 } 456 457 switch (ctx->opcode & 0xf000) { 458 case 0x1000: /* mov.l Rm,@(disp,Rn) */ 459 { 460 TCGv addr = tcg_temp_new(); 461 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4); 462 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); 463 tcg_temp_free(addr); 464 } 465 return; 466 case 0x5000: /* mov.l @(disp,Rm),Rn */ 467 { 468 TCGv addr = tcg_temp_new(); 469 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4); 470 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); 471 tcg_temp_free(addr); 472 } 473 return; 474 case 0xe000: /* mov #imm,Rn */ 475 tcg_gen_movi_i32(REG(B11_8), B7_0s); 476 return; 477 case 0x9000: /* mov.w @(disp,PC),Rn */ 478 { 479 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2); 480 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); 481 tcg_temp_free(addr); 482 } 483 return; 484 case 0xd000: /* mov.l @(disp,PC),Rn */ 485 { 486 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3); 487 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); 488 tcg_temp_free(addr); 489 } 490 return; 491 case 0x7000: /* add #imm,Rn */ 492 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s); 493 return; 494 case 0xa000: /* bra disp */ 495 CHECK_NOT_DELAY_SLOT 496 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; 497 ctx->envflags |= DELAY_SLOT; 498 return; 499 case 0xb000: /* bsr disp */ 500 CHECK_NOT_DELAY_SLOT 501 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); 502 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; 503 ctx->envflags |= DELAY_SLOT; 504 return; 505 } 506 507 switch (ctx->opcode & 0xf00f) { 508 case 0x6003: /* mov Rm,Rn */ 509 tcg_gen_mov_i32(REG(B11_8), REG(B7_4)); 510 return; 511 case 0x2000: /* mov.b Rm,@Rn */ 512 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB); 513 return; 514 case 0x2001: /* mov.w Rm,@Rn */ 515 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW); 516 return; 517 case 0x2002: /* mov.l Rm,@Rn */ 518 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); 519 return; 520 case 0x6000: /* mov.b @Rm,Rn */ 521 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB); 522 return; 523 case 0x6001: /* mov.w @Rm,Rn */ 524 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW); 525 return; 526 case 0x6002: /* mov.l @Rm,Rn */ 527 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL); 528 return; 529 case 0x2004: /* mov.b Rm,@-Rn */ 530 { 531 TCGv addr = tcg_temp_new(); 532 tcg_gen_subi_i32(addr, REG(B11_8), 1); 533 /* might cause re-execution */ 534 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB); 535 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */ 536 tcg_temp_free(addr); 537 } 538 return; 539 case 0x2005: /* mov.w Rm,@-Rn */ 540 { 541 TCGv addr = tcg_temp_new(); 542 tcg_gen_subi_i32(addr, REG(B11_8), 2); 543 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW); 544 tcg_gen_mov_i32(REG(B11_8), addr); 545 tcg_temp_free(addr); 546 } 547 return; 548 case 0x2006: /* mov.l Rm,@-Rn */ 549 { 550 TCGv addr = tcg_temp_new(); 551 tcg_gen_subi_i32(addr, REG(B11_8), 4); 552 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); 553 tcg_gen_mov_i32(REG(B11_8), addr); 554 } 555 return; 556 case 0x6004: /* mov.b @Rm+,Rn */ 557 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB); 558 if ( B11_8 != B7_4 ) 559 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1); 560 return; 561 case 0x6005: /* mov.w @Rm+,Rn */ 562 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW); 563 if ( B11_8 != B7_4 ) 564 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); 565 return; 566 case 0x6006: /* mov.l @Rm+,Rn */ 567 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL); 568 if ( B11_8 != B7_4 ) 569 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); 570 return; 571 case 0x0004: /* mov.b Rm,@(R0,Rn) */ 572 { 573 TCGv addr = tcg_temp_new(); 574 tcg_gen_add_i32(addr, REG(B11_8), REG(0)); 575 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB); 576 tcg_temp_free(addr); 577 } 578 return; 579 case 0x0005: /* mov.w Rm,@(R0,Rn) */ 580 { 581 TCGv addr = tcg_temp_new(); 582 tcg_gen_add_i32(addr, REG(B11_8), REG(0)); 583 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW); 584 tcg_temp_free(addr); 585 } 586 return; 587 case 0x0006: /* mov.l Rm,@(R0,Rn) */ 588 { 589 TCGv addr = tcg_temp_new(); 590 tcg_gen_add_i32(addr, REG(B11_8), REG(0)); 591 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); 592 tcg_temp_free(addr); 593 } 594 return; 595 case 0x000c: /* mov.b @(R0,Rm),Rn */ 596 { 597 TCGv addr = tcg_temp_new(); 598 tcg_gen_add_i32(addr, REG(B7_4), REG(0)); 599 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB); 600 tcg_temp_free(addr); 601 } 602 return; 603 case 0x000d: /* mov.w @(R0,Rm),Rn */ 604 { 605 TCGv addr = tcg_temp_new(); 606 tcg_gen_add_i32(addr, REG(B7_4), REG(0)); 607 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); 608 tcg_temp_free(addr); 609 } 610 return; 611 case 0x000e: /* mov.l @(R0,Rm),Rn */ 612 { 613 TCGv addr = tcg_temp_new(); 614 tcg_gen_add_i32(addr, REG(B7_4), REG(0)); 615 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); 616 tcg_temp_free(addr); 617 } 618 return; 619 case 0x6008: /* swap.b Rm,Rn */ 620 { 621 TCGv low = tcg_temp_new();; 622 tcg_gen_ext16u_i32(low, REG(B7_4)); 623 tcg_gen_bswap16_i32(low, low); 624 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16); 625 tcg_temp_free(low); 626 } 627 return; 628 case 0x6009: /* swap.w Rm,Rn */ 629 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16); 630 return; 631 case 0x200d: /* xtrct Rm,Rn */ 632 { 633 TCGv high, low; 634 high = tcg_temp_new(); 635 tcg_gen_shli_i32(high, REG(B7_4), 16); 636 low = tcg_temp_new(); 637 tcg_gen_shri_i32(low, REG(B11_8), 16); 638 tcg_gen_or_i32(REG(B11_8), high, low); 639 tcg_temp_free(low); 640 tcg_temp_free(high); 641 } 642 return; 643 case 0x300c: /* add Rm,Rn */ 644 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 645 return; 646 case 0x300e: /* addc Rm,Rn */ 647 { 648 TCGv t0, t1; 649 t0 = tcg_const_tl(0); 650 t1 = tcg_temp_new(); 651 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); 652 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, 653 REG(B11_8), t0, t1, cpu_sr_t); 654 tcg_temp_free(t0); 655 tcg_temp_free(t1); 656 } 657 return; 658 case 0x300f: /* addv Rm,Rn */ 659 { 660 TCGv t0, t1, t2; 661 t0 = tcg_temp_new(); 662 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8)); 663 t1 = tcg_temp_new(); 664 tcg_gen_xor_i32(t1, t0, REG(B11_8)); 665 t2 = tcg_temp_new(); 666 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8)); 667 tcg_gen_andc_i32(cpu_sr_t, t1, t2); 668 tcg_temp_free(t2); 669 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31); 670 tcg_temp_free(t1); 671 tcg_gen_mov_i32(REG(B7_4), t0); 672 tcg_temp_free(t0); 673 } 674 return; 675 case 0x2009: /* and Rm,Rn */ 676 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 677 return; 678 case 0x3000: /* cmp/eq Rm,Rn */ 679 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4)); 680 return; 681 case 0x3003: /* cmp/ge Rm,Rn */ 682 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4)); 683 return; 684 case 0x3007: /* cmp/gt Rm,Rn */ 685 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4)); 686 return; 687 case 0x3006: /* cmp/hi Rm,Rn */ 688 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4)); 689 return; 690 case 0x3002: /* cmp/hs Rm,Rn */ 691 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4)); 692 return; 693 case 0x200c: /* cmp/str Rm,Rn */ 694 { 695 TCGv cmp1 = tcg_temp_new(); 696 TCGv cmp2 = tcg_temp_new(); 697 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8)); 698 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101); 699 tcg_gen_andc_i32(cmp1, cmp1, cmp2); 700 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080); 701 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0); 702 tcg_temp_free(cmp2); 703 tcg_temp_free(cmp1); 704 } 705 return; 706 case 0x2007: /* div0s Rm,Rn */ 707 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */ 708 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */ 709 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */ 710 return; 711 case 0x3004: /* div1 Rm,Rn */ 712 { 713 TCGv t0 = tcg_temp_new(); 714 TCGv t1 = tcg_temp_new(); 715 TCGv t2 = tcg_temp_new(); 716 TCGv zero = tcg_const_i32(0); 717 718 /* shift left arg1, saving the bit being pushed out and inserting 719 T on the right */ 720 tcg_gen_shri_i32(t0, REG(B11_8), 31); 721 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); 722 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t); 723 724 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid 725 using 64-bit temps, we compute arg0's high part from q ^ m, so 726 that it is 0x00000000 when adding the value or 0xffffffff when 727 subtracting it. */ 728 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m); 729 tcg_gen_subi_i32(t1, t1, 1); 730 tcg_gen_neg_i32(t2, REG(B7_4)); 731 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2); 732 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1); 733 734 /* compute T and Q depending on carry */ 735 tcg_gen_andi_i32(t1, t1, 1); 736 tcg_gen_xor_i32(t1, t1, t0); 737 tcg_gen_xori_i32(cpu_sr_t, t1, 1); 738 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1); 739 740 tcg_temp_free(zero); 741 tcg_temp_free(t2); 742 tcg_temp_free(t1); 743 tcg_temp_free(t0); 744 } 745 return; 746 case 0x300d: /* dmuls.l Rm,Rn */ 747 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8)); 748 return; 749 case 0x3005: /* dmulu.l Rm,Rn */ 750 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8)); 751 return; 752 case 0x600e: /* exts.b Rm,Rn */ 753 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4)); 754 return; 755 case 0x600f: /* exts.w Rm,Rn */ 756 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4)); 757 return; 758 case 0x600c: /* extu.b Rm,Rn */ 759 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4)); 760 return; 761 case 0x600d: /* extu.w Rm,Rn */ 762 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4)); 763 return; 764 case 0x000f: /* mac.l @Rm+,@Rn+ */ 765 { 766 TCGv arg0, arg1; 767 arg0 = tcg_temp_new(); 768 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL); 769 arg1 = tcg_temp_new(); 770 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); 771 gen_helper_macl(cpu_env, arg0, arg1); 772 tcg_temp_free(arg1); 773 tcg_temp_free(arg0); 774 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); 775 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 776 } 777 return; 778 case 0x400f: /* mac.w @Rm+,@Rn+ */ 779 { 780 TCGv arg0, arg1; 781 arg0 = tcg_temp_new(); 782 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL); 783 arg1 = tcg_temp_new(); 784 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); 785 gen_helper_macw(cpu_env, arg0, arg1); 786 tcg_temp_free(arg1); 787 tcg_temp_free(arg0); 788 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); 789 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); 790 } 791 return; 792 case 0x0007: /* mul.l Rm,Rn */ 793 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8)); 794 return; 795 case 0x200f: /* muls.w Rm,Rn */ 796 { 797 TCGv arg0, arg1; 798 arg0 = tcg_temp_new(); 799 tcg_gen_ext16s_i32(arg0, REG(B7_4)); 800 arg1 = tcg_temp_new(); 801 tcg_gen_ext16s_i32(arg1, REG(B11_8)); 802 tcg_gen_mul_i32(cpu_macl, arg0, arg1); 803 tcg_temp_free(arg1); 804 tcg_temp_free(arg0); 805 } 806 return; 807 case 0x200e: /* mulu.w Rm,Rn */ 808 { 809 TCGv arg0, arg1; 810 arg0 = tcg_temp_new(); 811 tcg_gen_ext16u_i32(arg0, REG(B7_4)); 812 arg1 = tcg_temp_new(); 813 tcg_gen_ext16u_i32(arg1, REG(B11_8)); 814 tcg_gen_mul_i32(cpu_macl, arg0, arg1); 815 tcg_temp_free(arg1); 816 tcg_temp_free(arg0); 817 } 818 return; 819 case 0x600b: /* neg Rm,Rn */ 820 tcg_gen_neg_i32(REG(B11_8), REG(B7_4)); 821 return; 822 case 0x600a: /* negc Rm,Rn */ 823 { 824 TCGv t0 = tcg_const_i32(0); 825 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, 826 REG(B7_4), t0, cpu_sr_t, t0); 827 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, 828 t0, t0, REG(B11_8), cpu_sr_t); 829 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); 830 tcg_temp_free(t0); 831 } 832 return; 833 case 0x6007: /* not Rm,Rn */ 834 tcg_gen_not_i32(REG(B11_8), REG(B7_4)); 835 return; 836 case 0x200b: /* or Rm,Rn */ 837 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 838 return; 839 case 0x400c: /* shad Rm,Rn */ 840 { 841 TCGv t0 = tcg_temp_new(); 842 TCGv t1 = tcg_temp_new(); 843 TCGv t2 = tcg_temp_new(); 844 845 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f); 846 847 /* positive case: shift to the left */ 848 tcg_gen_shl_i32(t1, REG(B11_8), t0); 849 850 /* negative case: shift to the right in two steps to 851 correctly handle the -32 case */ 852 tcg_gen_xori_i32(t0, t0, 0x1f); 853 tcg_gen_sar_i32(t2, REG(B11_8), t0); 854 tcg_gen_sari_i32(t2, t2, 1); 855 856 /* select between the two cases */ 857 tcg_gen_movi_i32(t0, 0); 858 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2); 859 860 tcg_temp_free(t0); 861 tcg_temp_free(t1); 862 tcg_temp_free(t2); 863 } 864 return; 865 case 0x400d: /* shld Rm,Rn */ 866 { 867 TCGv t0 = tcg_temp_new(); 868 TCGv t1 = tcg_temp_new(); 869 TCGv t2 = tcg_temp_new(); 870 871 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f); 872 873 /* positive case: shift to the left */ 874 tcg_gen_shl_i32(t1, REG(B11_8), t0); 875 876 /* negative case: shift to the right in two steps to 877 correctly handle the -32 case */ 878 tcg_gen_xori_i32(t0, t0, 0x1f); 879 tcg_gen_shr_i32(t2, REG(B11_8), t0); 880 tcg_gen_shri_i32(t2, t2, 1); 881 882 /* select between the two cases */ 883 tcg_gen_movi_i32(t0, 0); 884 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2); 885 886 tcg_temp_free(t0); 887 tcg_temp_free(t1); 888 tcg_temp_free(t2); 889 } 890 return; 891 case 0x3008: /* sub Rm,Rn */ 892 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 893 return; 894 case 0x300a: /* subc Rm,Rn */ 895 { 896 TCGv t0, t1; 897 t0 = tcg_const_tl(0); 898 t1 = tcg_temp_new(); 899 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); 900 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, 901 REG(B11_8), t0, t1, cpu_sr_t); 902 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); 903 tcg_temp_free(t0); 904 tcg_temp_free(t1); 905 } 906 return; 907 case 0x300b: /* subv Rm,Rn */ 908 { 909 TCGv t0, t1, t2; 910 t0 = tcg_temp_new(); 911 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4)); 912 t1 = tcg_temp_new(); 913 tcg_gen_xor_i32(t1, t0, REG(B7_4)); 914 t2 = tcg_temp_new(); 915 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4)); 916 tcg_gen_and_i32(t1, t1, t2); 917 tcg_temp_free(t2); 918 tcg_gen_shri_i32(cpu_sr_t, t1, 31); 919 tcg_temp_free(t1); 920 tcg_gen_mov_i32(REG(B11_8), t0); 921 tcg_temp_free(t0); 922 } 923 return; 924 case 0x2008: /* tst Rm,Rn */ 925 { 926 TCGv val = tcg_temp_new(); 927 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8)); 928 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); 929 tcg_temp_free(val); 930 } 931 return; 932 case 0x200a: /* xor Rm,Rn */ 933 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 934 return; 935 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */ 936 CHECK_FPU_ENABLED 937 if (ctx->tbflags & FPSCR_SZ) { 938 TCGv_i64 fp = tcg_temp_new_i64(); 939 gen_load_fpr64(fp, XREG(B7_4)); 940 gen_store_fpr64(fp, XREG(B11_8)); 941 tcg_temp_free_i64(fp); 942 } else { 943 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]); 944 } 945 return; 946 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */ 947 CHECK_FPU_ENABLED 948 if (ctx->tbflags & FPSCR_SZ) { 949 TCGv addr_hi = tcg_temp_new(); 950 int fr = XREG(B7_4); 951 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4); 952 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8), 953 ctx->memidx, MO_TEUL); 954 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi, 955 ctx->memidx, MO_TEUL); 956 tcg_temp_free(addr_hi); 957 } else { 958 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8), 959 ctx->memidx, MO_TEUL); 960 } 961 return; 962 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ 963 CHECK_FPU_ENABLED 964 if (ctx->tbflags & FPSCR_SZ) { 965 TCGv addr_hi = tcg_temp_new(); 966 int fr = XREG(B11_8); 967 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); 968 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL); 969 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL); 970 tcg_temp_free(addr_hi); 971 } else { 972 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4), 973 ctx->memidx, MO_TEUL); 974 } 975 return; 976 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ 977 CHECK_FPU_ENABLED 978 if (ctx->tbflags & FPSCR_SZ) { 979 TCGv addr_hi = tcg_temp_new(); 980 int fr = XREG(B11_8); 981 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); 982 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL); 983 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL); 984 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); 985 tcg_temp_free(addr_hi); 986 } else { 987 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4), 988 ctx->memidx, MO_TEUL); 989 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); 990 } 991 return; 992 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */ 993 CHECK_FPU_ENABLED 994 TCGv addr = tcg_temp_new_i32(); 995 tcg_gen_subi_i32(addr, REG(B11_8), 4); 996 if (ctx->tbflags & FPSCR_SZ) { 997 int fr = XREG(B7_4); 998 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL); 999 tcg_gen_subi_i32(addr, addr, 4); 1000 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL); 1001 } else { 1002 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr, 1003 ctx->memidx, MO_TEUL); 1004 } 1005 tcg_gen_mov_i32(REG(B11_8), addr); 1006 tcg_temp_free(addr); 1007 return; 1008 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */ 1009 CHECK_FPU_ENABLED 1010 { 1011 TCGv addr = tcg_temp_new_i32(); 1012 tcg_gen_add_i32(addr, REG(B7_4), REG(0)); 1013 if (ctx->tbflags & FPSCR_SZ) { 1014 int fr = XREG(B11_8); 1015 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr, 1016 ctx->memidx, MO_TEUL); 1017 tcg_gen_addi_i32(addr, addr, 4); 1018 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr, 1019 ctx->memidx, MO_TEUL); 1020 } else { 1021 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr, 1022 ctx->memidx, MO_TEUL); 1023 } 1024 tcg_temp_free(addr); 1025 } 1026 return; 1027 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */ 1028 CHECK_FPU_ENABLED 1029 { 1030 TCGv addr = tcg_temp_new(); 1031 tcg_gen_add_i32(addr, REG(B11_8), REG(0)); 1032 if (ctx->tbflags & FPSCR_SZ) { 1033 int fr = XREG(B7_4); 1034 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr, 1035 ctx->memidx, MO_TEUL); 1036 tcg_gen_addi_i32(addr, addr, 4); 1037 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr, 1038 ctx->memidx, MO_TEUL); 1039 } else { 1040 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr, 1041 ctx->memidx, MO_TEUL); 1042 } 1043 tcg_temp_free(addr); 1044 } 1045 return; 1046 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ 1047 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ 1048 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ 1049 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ 1050 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ 1051 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ 1052 { 1053 CHECK_FPU_ENABLED 1054 if (ctx->tbflags & FPSCR_PR) { 1055 TCGv_i64 fp0, fp1; 1056 1057 if (ctx->opcode & 0x0110) 1058 break; /* illegal instruction */ 1059 fp0 = tcg_temp_new_i64(); 1060 fp1 = tcg_temp_new_i64(); 1061 gen_load_fpr64(fp0, DREG(B11_8)); 1062 gen_load_fpr64(fp1, DREG(B7_4)); 1063 switch (ctx->opcode & 0xf00f) { 1064 case 0xf000: /* fadd Rm,Rn */ 1065 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1); 1066 break; 1067 case 0xf001: /* fsub Rm,Rn */ 1068 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1); 1069 break; 1070 case 0xf002: /* fmul Rm,Rn */ 1071 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1); 1072 break; 1073 case 0xf003: /* fdiv Rm,Rn */ 1074 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1); 1075 break; 1076 case 0xf004: /* fcmp/eq Rm,Rn */ 1077 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1); 1078 return; 1079 case 0xf005: /* fcmp/gt Rm,Rn */ 1080 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1); 1081 return; 1082 } 1083 gen_store_fpr64(fp0, DREG(B11_8)); 1084 tcg_temp_free_i64(fp0); 1085 tcg_temp_free_i64(fp1); 1086 } else { 1087 switch (ctx->opcode & 0xf00f) { 1088 case 0xf000: /* fadd Rm,Rn */ 1089 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env, 1090 cpu_fregs[FREG(B11_8)], 1091 cpu_fregs[FREG(B7_4)]); 1092 break; 1093 case 0xf001: /* fsub Rm,Rn */ 1094 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env, 1095 cpu_fregs[FREG(B11_8)], 1096 cpu_fregs[FREG(B7_4)]); 1097 break; 1098 case 0xf002: /* fmul Rm,Rn */ 1099 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env, 1100 cpu_fregs[FREG(B11_8)], 1101 cpu_fregs[FREG(B7_4)]); 1102 break; 1103 case 0xf003: /* fdiv Rm,Rn */ 1104 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env, 1105 cpu_fregs[FREG(B11_8)], 1106 cpu_fregs[FREG(B7_4)]); 1107 break; 1108 case 0xf004: /* fcmp/eq Rm,Rn */ 1109 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)], 1110 cpu_fregs[FREG(B7_4)]); 1111 return; 1112 case 0xf005: /* fcmp/gt Rm,Rn */ 1113 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)], 1114 cpu_fregs[FREG(B7_4)]); 1115 return; 1116 } 1117 } 1118 } 1119 return; 1120 case 0xf00e: /* fmac FR0,RM,Rn */ 1121 { 1122 CHECK_FPU_ENABLED 1123 if (ctx->tbflags & FPSCR_PR) { 1124 break; /* illegal instruction */ 1125 } else { 1126 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env, 1127 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], 1128 cpu_fregs[FREG(B11_8)]); 1129 return; 1130 } 1131 } 1132 } 1133 1134 switch (ctx->opcode & 0xff00) { 1135 case 0xc900: /* and #imm,R0 */ 1136 tcg_gen_andi_i32(REG(0), REG(0), B7_0); 1137 return; 1138 case 0xcd00: /* and.b #imm,@(R0,GBR) */ 1139 { 1140 TCGv addr, val; 1141 addr = tcg_temp_new(); 1142 tcg_gen_add_i32(addr, REG(0), cpu_gbr); 1143 val = tcg_temp_new(); 1144 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); 1145 tcg_gen_andi_i32(val, val, B7_0); 1146 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); 1147 tcg_temp_free(val); 1148 tcg_temp_free(addr); 1149 } 1150 return; 1151 case 0x8b00: /* bf label */ 1152 CHECK_NOT_DELAY_SLOT 1153 gen_conditional_jump(ctx, ctx->pc + 2, ctx->pc + 4 + B7_0s * 2); 1154 return; 1155 case 0x8f00: /* bf/s label */ 1156 CHECK_NOT_DELAY_SLOT 1157 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1); 1158 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2; 1159 ctx->envflags |= DELAY_SLOT_CONDITIONAL; 1160 return; 1161 case 0x8900: /* bt label */ 1162 CHECK_NOT_DELAY_SLOT 1163 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, ctx->pc + 2); 1164 return; 1165 case 0x8d00: /* bt/s label */ 1166 CHECK_NOT_DELAY_SLOT 1167 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t); 1168 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2; 1169 ctx->envflags |= DELAY_SLOT_CONDITIONAL; 1170 return; 1171 case 0x8800: /* cmp/eq #imm,R0 */ 1172 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s); 1173 return; 1174 case 0xc400: /* mov.b @(disp,GBR),R0 */ 1175 { 1176 TCGv addr = tcg_temp_new(); 1177 tcg_gen_addi_i32(addr, cpu_gbr, B7_0); 1178 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB); 1179 tcg_temp_free(addr); 1180 } 1181 return; 1182 case 0xc500: /* mov.w @(disp,GBR),R0 */ 1183 { 1184 TCGv addr = tcg_temp_new(); 1185 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); 1186 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW); 1187 tcg_temp_free(addr); 1188 } 1189 return; 1190 case 0xc600: /* mov.l @(disp,GBR),R0 */ 1191 { 1192 TCGv addr = tcg_temp_new(); 1193 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); 1194 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL); 1195 tcg_temp_free(addr); 1196 } 1197 return; 1198 case 0xc000: /* mov.b R0,@(disp,GBR) */ 1199 { 1200 TCGv addr = tcg_temp_new(); 1201 tcg_gen_addi_i32(addr, cpu_gbr, B7_0); 1202 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB); 1203 tcg_temp_free(addr); 1204 } 1205 return; 1206 case 0xc100: /* mov.w R0,@(disp,GBR) */ 1207 { 1208 TCGv addr = tcg_temp_new(); 1209 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); 1210 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW); 1211 tcg_temp_free(addr); 1212 } 1213 return; 1214 case 0xc200: /* mov.l R0,@(disp,GBR) */ 1215 { 1216 TCGv addr = tcg_temp_new(); 1217 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); 1218 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL); 1219 tcg_temp_free(addr); 1220 } 1221 return; 1222 case 0x8000: /* mov.b R0,@(disp,Rn) */ 1223 { 1224 TCGv addr = tcg_temp_new(); 1225 tcg_gen_addi_i32(addr, REG(B7_4), B3_0); 1226 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB); 1227 tcg_temp_free(addr); 1228 } 1229 return; 1230 case 0x8100: /* mov.w R0,@(disp,Rn) */ 1231 { 1232 TCGv addr = tcg_temp_new(); 1233 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); 1234 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW); 1235 tcg_temp_free(addr); 1236 } 1237 return; 1238 case 0x8400: /* mov.b @(disp,Rn),R0 */ 1239 { 1240 TCGv addr = tcg_temp_new(); 1241 tcg_gen_addi_i32(addr, REG(B7_4), B3_0); 1242 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB); 1243 tcg_temp_free(addr); 1244 } 1245 return; 1246 case 0x8500: /* mov.w @(disp,Rn),R0 */ 1247 { 1248 TCGv addr = tcg_temp_new(); 1249 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); 1250 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW); 1251 tcg_temp_free(addr); 1252 } 1253 return; 1254 case 0xc700: /* mova @(disp,PC),R0 */ 1255 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3); 1256 return; 1257 case 0xcb00: /* or #imm,R0 */ 1258 tcg_gen_ori_i32(REG(0), REG(0), B7_0); 1259 return; 1260 case 0xcf00: /* or.b #imm,@(R0,GBR) */ 1261 { 1262 TCGv addr, val; 1263 addr = tcg_temp_new(); 1264 tcg_gen_add_i32(addr, REG(0), cpu_gbr); 1265 val = tcg_temp_new(); 1266 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); 1267 tcg_gen_ori_i32(val, val, B7_0); 1268 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); 1269 tcg_temp_free(val); 1270 tcg_temp_free(addr); 1271 } 1272 return; 1273 case 0xc300: /* trapa #imm */ 1274 { 1275 TCGv imm; 1276 CHECK_NOT_DELAY_SLOT 1277 gen_save_cpu_state(ctx, true); 1278 imm = tcg_const_i32(B7_0); 1279 gen_helper_trapa(cpu_env, imm); 1280 tcg_temp_free(imm); 1281 ctx->bstate = BS_EXCP; 1282 } 1283 return; 1284 case 0xc800: /* tst #imm,R0 */ 1285 { 1286 TCGv val = tcg_temp_new(); 1287 tcg_gen_andi_i32(val, REG(0), B7_0); 1288 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); 1289 tcg_temp_free(val); 1290 } 1291 return; 1292 case 0xcc00: /* tst.b #imm,@(R0,GBR) */ 1293 { 1294 TCGv val = tcg_temp_new(); 1295 tcg_gen_add_i32(val, REG(0), cpu_gbr); 1296 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB); 1297 tcg_gen_andi_i32(val, val, B7_0); 1298 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); 1299 tcg_temp_free(val); 1300 } 1301 return; 1302 case 0xca00: /* xor #imm,R0 */ 1303 tcg_gen_xori_i32(REG(0), REG(0), B7_0); 1304 return; 1305 case 0xce00: /* xor.b #imm,@(R0,GBR) */ 1306 { 1307 TCGv addr, val; 1308 addr = tcg_temp_new(); 1309 tcg_gen_add_i32(addr, REG(0), cpu_gbr); 1310 val = tcg_temp_new(); 1311 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); 1312 tcg_gen_xori_i32(val, val, B7_0); 1313 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); 1314 tcg_temp_free(val); 1315 tcg_temp_free(addr); 1316 } 1317 return; 1318 } 1319 1320 switch (ctx->opcode & 0xf08f) { 1321 case 0x408e: /* ldc Rm,Rn_BANK */ 1322 CHECK_PRIVILEGED 1323 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8)); 1324 return; 1325 case 0x4087: /* ldc.l @Rm+,Rn_BANK */ 1326 CHECK_PRIVILEGED 1327 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL); 1328 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 1329 return; 1330 case 0x0082: /* stc Rm_BANK,Rn */ 1331 CHECK_PRIVILEGED 1332 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4)); 1333 return; 1334 case 0x4083: /* stc.l Rm_BANK,@-Rn */ 1335 CHECK_PRIVILEGED 1336 { 1337 TCGv addr = tcg_temp_new(); 1338 tcg_gen_subi_i32(addr, REG(B11_8), 4); 1339 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL); 1340 tcg_gen_mov_i32(REG(B11_8), addr); 1341 tcg_temp_free(addr); 1342 } 1343 return; 1344 } 1345 1346 switch (ctx->opcode & 0xf0ff) { 1347 case 0x0023: /* braf Rn */ 1348 CHECK_NOT_DELAY_SLOT 1349 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4); 1350 ctx->envflags |= DELAY_SLOT; 1351 ctx->delayed_pc = (uint32_t) - 1; 1352 return; 1353 case 0x0003: /* bsrf Rn */ 1354 CHECK_NOT_DELAY_SLOT 1355 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); 1356 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr); 1357 ctx->envflags |= DELAY_SLOT; 1358 ctx->delayed_pc = (uint32_t) - 1; 1359 return; 1360 case 0x4015: /* cmp/pl Rn */ 1361 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0); 1362 return; 1363 case 0x4011: /* cmp/pz Rn */ 1364 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0); 1365 return; 1366 case 0x4010: /* dt Rn */ 1367 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1); 1368 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0); 1369 return; 1370 case 0x402b: /* jmp @Rn */ 1371 CHECK_NOT_DELAY_SLOT 1372 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); 1373 ctx->envflags |= DELAY_SLOT; 1374 ctx->delayed_pc = (uint32_t) - 1; 1375 return; 1376 case 0x400b: /* jsr @Rn */ 1377 CHECK_NOT_DELAY_SLOT 1378 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); 1379 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); 1380 ctx->envflags |= DELAY_SLOT; 1381 ctx->delayed_pc = (uint32_t) - 1; 1382 return; 1383 case 0x400e: /* ldc Rm,SR */ 1384 CHECK_PRIVILEGED 1385 { 1386 TCGv val = tcg_temp_new(); 1387 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3); 1388 gen_write_sr(val); 1389 tcg_temp_free(val); 1390 ctx->bstate = BS_STOP; 1391 } 1392 return; 1393 case 0x4007: /* ldc.l @Rm+,SR */ 1394 CHECK_PRIVILEGED 1395 { 1396 TCGv val = tcg_temp_new(); 1397 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL); 1398 tcg_gen_andi_i32(val, val, 0x700083f3); 1399 gen_write_sr(val); 1400 tcg_temp_free(val); 1401 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 1402 ctx->bstate = BS_STOP; 1403 } 1404 return; 1405 case 0x0002: /* stc SR,Rn */ 1406 CHECK_PRIVILEGED 1407 gen_read_sr(REG(B11_8)); 1408 return; 1409 case 0x4003: /* stc SR,@-Rn */ 1410 CHECK_PRIVILEGED 1411 { 1412 TCGv addr = tcg_temp_new(); 1413 TCGv val = tcg_temp_new(); 1414 tcg_gen_subi_i32(addr, REG(B11_8), 4); 1415 gen_read_sr(val); 1416 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); 1417 tcg_gen_mov_i32(REG(B11_8), addr); 1418 tcg_temp_free(val); 1419 tcg_temp_free(addr); 1420 } 1421 return; 1422 #define LD(reg,ldnum,ldpnum,prechk) \ 1423 case ldnum: \ 1424 prechk \ 1425 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \ 1426 return; \ 1427 case ldpnum: \ 1428 prechk \ 1429 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \ 1430 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \ 1431 return; 1432 #define ST(reg,stnum,stpnum,prechk) \ 1433 case stnum: \ 1434 prechk \ 1435 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \ 1436 return; \ 1437 case stpnum: \ 1438 prechk \ 1439 { \ 1440 TCGv addr = tcg_temp_new(); \ 1441 tcg_gen_subi_i32(addr, REG(B11_8), 4); \ 1442 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \ 1443 tcg_gen_mov_i32(REG(B11_8), addr); \ 1444 tcg_temp_free(addr); \ 1445 } \ 1446 return; 1447 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \ 1448 LD(reg,ldnum,ldpnum,prechk) \ 1449 ST(reg,stnum,stpnum,prechk) 1450 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {}) 1451 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED) 1452 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED) 1453 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED) 1454 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED) 1455 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;) 1456 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED) 1457 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {}) 1458 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {}) 1459 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {}) 1460 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED}) 1461 case 0x406a: /* lds Rm,FPSCR */ 1462 CHECK_FPU_ENABLED 1463 gen_helper_ld_fpscr(cpu_env, REG(B11_8)); 1464 ctx->bstate = BS_STOP; 1465 return; 1466 case 0x4066: /* lds.l @Rm+,FPSCR */ 1467 CHECK_FPU_ENABLED 1468 { 1469 TCGv addr = tcg_temp_new(); 1470 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL); 1471 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 1472 gen_helper_ld_fpscr(cpu_env, addr); 1473 tcg_temp_free(addr); 1474 ctx->bstate = BS_STOP; 1475 } 1476 return; 1477 case 0x006a: /* sts FPSCR,Rn */ 1478 CHECK_FPU_ENABLED 1479 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff); 1480 return; 1481 case 0x4062: /* sts FPSCR,@-Rn */ 1482 CHECK_FPU_ENABLED 1483 { 1484 TCGv addr, val; 1485 val = tcg_temp_new(); 1486 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff); 1487 addr = tcg_temp_new(); 1488 tcg_gen_subi_i32(addr, REG(B11_8), 4); 1489 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); 1490 tcg_gen_mov_i32(REG(B11_8), addr); 1491 tcg_temp_free(addr); 1492 tcg_temp_free(val); 1493 } 1494 return; 1495 case 0x00c3: /* movca.l R0,@Rm */ 1496 { 1497 TCGv val = tcg_temp_new(); 1498 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL); 1499 gen_helper_movcal(cpu_env, REG(B11_8), val); 1500 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); 1501 } 1502 ctx->has_movcal = 1; 1503 return; 1504 case 0x40a9: /* movua.l @Rm,R0 */ 1505 /* Load non-boundary-aligned data */ 1506 if (ctx->features & SH_FEATURE_SH4A) { 1507 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, 1508 MO_TEUL | MO_UNALN); 1509 return; 1510 } 1511 break; 1512 case 0x40e9: /* movua.l @Rm+,R0 */ 1513 /* Load non-boundary-aligned data */ 1514 if (ctx->features & SH_FEATURE_SH4A) { 1515 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, 1516 MO_TEUL | MO_UNALN); 1517 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 1518 return; 1519 } 1520 break; 1521 case 0x0029: /* movt Rn */ 1522 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t); 1523 return; 1524 case 0x0073: 1525 /* MOVCO.L 1526 LDST -> T 1527 If (T == 1) R0 -> (Rn) 1528 0 -> LDST 1529 */ 1530 if (ctx->features & SH_FEATURE_SH4A) { 1531 TCGLabel *label = gen_new_label(); 1532 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst); 1533 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label); 1534 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); 1535 gen_set_label(label); 1536 tcg_gen_movi_i32(cpu_ldst, 0); 1537 return; 1538 } else 1539 break; 1540 case 0x0063: 1541 /* MOVLI.L @Rm,R0 1542 1 -> LDST 1543 (Rm) -> R0 1544 When interrupt/exception 1545 occurred 0 -> LDST 1546 */ 1547 if (ctx->features & SH_FEATURE_SH4A) { 1548 tcg_gen_movi_i32(cpu_ldst, 0); 1549 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); 1550 tcg_gen_movi_i32(cpu_ldst, 1); 1551 return; 1552 } else 1553 break; 1554 case 0x0093: /* ocbi @Rn */ 1555 { 1556 gen_helper_ocbi(cpu_env, REG(B11_8)); 1557 } 1558 return; 1559 case 0x00a3: /* ocbp @Rn */ 1560 case 0x00b3: /* ocbwb @Rn */ 1561 /* These instructions are supposed to do nothing in case of 1562 a cache miss. Given that we only partially emulate caches 1563 it is safe to simply ignore them. */ 1564 return; 1565 case 0x0083: /* pref @Rn */ 1566 return; 1567 case 0x00d3: /* prefi @Rn */ 1568 if (ctx->features & SH_FEATURE_SH4A) 1569 return; 1570 else 1571 break; 1572 case 0x00e3: /* icbi @Rn */ 1573 if (ctx->features & SH_FEATURE_SH4A) 1574 return; 1575 else 1576 break; 1577 case 0x00ab: /* synco */ 1578 if (ctx->features & SH_FEATURE_SH4A) { 1579 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 1580 return; 1581 } 1582 break; 1583 case 0x4024: /* rotcl Rn */ 1584 { 1585 TCGv tmp = tcg_temp_new(); 1586 tcg_gen_mov_i32(tmp, cpu_sr_t); 1587 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31); 1588 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); 1589 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp); 1590 tcg_temp_free(tmp); 1591 } 1592 return; 1593 case 0x4025: /* rotcr Rn */ 1594 { 1595 TCGv tmp = tcg_temp_new(); 1596 tcg_gen_shli_i32(tmp, cpu_sr_t, 31); 1597 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1); 1598 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); 1599 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp); 1600 tcg_temp_free(tmp); 1601 } 1602 return; 1603 case 0x4004: /* rotl Rn */ 1604 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1); 1605 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0); 1606 return; 1607 case 0x4005: /* rotr Rn */ 1608 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0); 1609 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1); 1610 return; 1611 case 0x4000: /* shll Rn */ 1612 case 0x4020: /* shal Rn */ 1613 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31); 1614 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); 1615 return; 1616 case 0x4021: /* shar Rn */ 1617 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1); 1618 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1); 1619 return; 1620 case 0x4001: /* shlr Rn */ 1621 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1); 1622 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); 1623 return; 1624 case 0x4008: /* shll2 Rn */ 1625 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2); 1626 return; 1627 case 0x4018: /* shll8 Rn */ 1628 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8); 1629 return; 1630 case 0x4028: /* shll16 Rn */ 1631 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16); 1632 return; 1633 case 0x4009: /* shlr2 Rn */ 1634 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2); 1635 return; 1636 case 0x4019: /* shlr8 Rn */ 1637 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8); 1638 return; 1639 case 0x4029: /* shlr16 Rn */ 1640 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16); 1641 return; 1642 case 0x401b: /* tas.b @Rn */ 1643 { 1644 TCGv val = tcg_const_i32(0x80); 1645 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val, 1646 ctx->memidx, MO_UB); 1647 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); 1648 tcg_temp_free(val); 1649 } 1650 return; 1651 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ 1652 CHECK_FPU_ENABLED 1653 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul); 1654 return; 1655 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */ 1656 CHECK_FPU_ENABLED 1657 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]); 1658 return; 1659 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */ 1660 CHECK_FPU_ENABLED 1661 if (ctx->tbflags & FPSCR_PR) { 1662 TCGv_i64 fp; 1663 if (ctx->opcode & 0x0100) 1664 break; /* illegal instruction */ 1665 fp = tcg_temp_new_i64(); 1666 gen_helper_float_DT(fp, cpu_env, cpu_fpul); 1667 gen_store_fpr64(fp, DREG(B11_8)); 1668 tcg_temp_free_i64(fp); 1669 } 1670 else { 1671 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul); 1672 } 1673 return; 1674 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ 1675 CHECK_FPU_ENABLED 1676 if (ctx->tbflags & FPSCR_PR) { 1677 TCGv_i64 fp; 1678 if (ctx->opcode & 0x0100) 1679 break; /* illegal instruction */ 1680 fp = tcg_temp_new_i64(); 1681 gen_load_fpr64(fp, DREG(B11_8)); 1682 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp); 1683 tcg_temp_free_i64(fp); 1684 } 1685 else { 1686 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]); 1687 } 1688 return; 1689 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */ 1690 CHECK_FPU_ENABLED 1691 { 1692 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); 1693 } 1694 return; 1695 case 0xf05d: /* fabs FRn/DRn */ 1696 CHECK_FPU_ENABLED 1697 if (ctx->tbflags & FPSCR_PR) { 1698 if (ctx->opcode & 0x0100) 1699 break; /* illegal instruction */ 1700 TCGv_i64 fp = tcg_temp_new_i64(); 1701 gen_load_fpr64(fp, DREG(B11_8)); 1702 gen_helper_fabs_DT(fp, fp); 1703 gen_store_fpr64(fp, DREG(B11_8)); 1704 tcg_temp_free_i64(fp); 1705 } else { 1706 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]); 1707 } 1708 return; 1709 case 0xf06d: /* fsqrt FRn */ 1710 CHECK_FPU_ENABLED 1711 if (ctx->tbflags & FPSCR_PR) { 1712 if (ctx->opcode & 0x0100) 1713 break; /* illegal instruction */ 1714 TCGv_i64 fp = tcg_temp_new_i64(); 1715 gen_load_fpr64(fp, DREG(B11_8)); 1716 gen_helper_fsqrt_DT(fp, cpu_env, fp); 1717 gen_store_fpr64(fp, DREG(B11_8)); 1718 tcg_temp_free_i64(fp); 1719 } else { 1720 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env, 1721 cpu_fregs[FREG(B11_8)]); 1722 } 1723 return; 1724 case 0xf07d: /* fsrra FRn */ 1725 CHECK_FPU_ENABLED 1726 break; 1727 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */ 1728 CHECK_FPU_ENABLED 1729 if (!(ctx->tbflags & FPSCR_PR)) { 1730 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0); 1731 } 1732 return; 1733 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */ 1734 CHECK_FPU_ENABLED 1735 if (!(ctx->tbflags & FPSCR_PR)) { 1736 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000); 1737 } 1738 return; 1739 case 0xf0ad: /* fcnvsd FPUL,DRn */ 1740 CHECK_FPU_ENABLED 1741 { 1742 TCGv_i64 fp = tcg_temp_new_i64(); 1743 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul); 1744 gen_store_fpr64(fp, DREG(B11_8)); 1745 tcg_temp_free_i64(fp); 1746 } 1747 return; 1748 case 0xf0bd: /* fcnvds DRn,FPUL */ 1749 CHECK_FPU_ENABLED 1750 { 1751 TCGv_i64 fp = tcg_temp_new_i64(); 1752 gen_load_fpr64(fp, DREG(B11_8)); 1753 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp); 1754 tcg_temp_free_i64(fp); 1755 } 1756 return; 1757 case 0xf0ed: /* fipr FVm,FVn */ 1758 CHECK_FPU_ENABLED 1759 if ((ctx->tbflags & FPSCR_PR) == 0) { 1760 TCGv m, n; 1761 m = tcg_const_i32((ctx->opcode >> 8) & 3); 1762 n = tcg_const_i32((ctx->opcode >> 10) & 3); 1763 gen_helper_fipr(cpu_env, m, n); 1764 tcg_temp_free(m); 1765 tcg_temp_free(n); 1766 return; 1767 } 1768 break; 1769 case 0xf0fd: /* ftrv XMTRX,FVn */ 1770 CHECK_FPU_ENABLED 1771 if ((ctx->opcode & 0x0300) == 0x0100 && 1772 (ctx->tbflags & FPSCR_PR) == 0) { 1773 TCGv n; 1774 n = tcg_const_i32((ctx->opcode >> 10) & 3); 1775 gen_helper_ftrv(cpu_env, n); 1776 tcg_temp_free(n); 1777 return; 1778 } 1779 break; 1780 } 1781 #if 0 1782 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n", 1783 ctx->opcode, ctx->pc); 1784 fflush(stderr); 1785 #endif 1786 gen_save_cpu_state(ctx, true); 1787 if (ctx->envflags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { 1788 gen_helper_raise_slot_illegal_instruction(cpu_env); 1789 } else { 1790 gen_helper_raise_illegal_instruction(cpu_env); 1791 } 1792 ctx->bstate = BS_EXCP; 1793 } 1794 1795 static void decode_opc(DisasContext * ctx) 1796 { 1797 uint32_t old_flags = ctx->envflags; 1798 1799 _decode_opc(ctx); 1800 1801 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { 1802 /* go out of the delay slot */ 1803 ctx->envflags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 1804 tcg_gen_movi_i32(cpu_flags, ctx->envflags); 1805 ctx->bstate = BS_BRANCH; 1806 if (old_flags & DELAY_SLOT_CONDITIONAL) { 1807 gen_delayed_conditional_jump(ctx); 1808 } else if (old_flags & DELAY_SLOT) { 1809 gen_jump(ctx); 1810 } 1811 1812 } 1813 } 1814 1815 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) 1816 { 1817 SuperHCPU *cpu = sh_env_get_cpu(env); 1818 CPUState *cs = CPU(cpu); 1819 DisasContext ctx; 1820 target_ulong pc_start; 1821 int num_insns; 1822 int max_insns; 1823 1824 pc_start = tb->pc; 1825 ctx.pc = pc_start; 1826 ctx.tbflags = (uint32_t)tb->flags; 1827 ctx.envflags = tb->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 1828 ctx.bstate = BS_NONE; 1829 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0; 1830 /* We don't know if the delayed pc came from a dynamic or static branch, 1831 so assume it is a dynamic branch. */ 1832 ctx.delayed_pc = -1; /* use delayed pc from env pointer */ 1833 ctx.tb = tb; 1834 ctx.singlestep_enabled = cs->singlestep_enabled; 1835 ctx.features = env->features; 1836 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA); 1837 1838 num_insns = 0; 1839 max_insns = tb->cflags & CF_COUNT_MASK; 1840 if (max_insns == 0) { 1841 max_insns = CF_COUNT_MASK; 1842 } 1843 if (max_insns > TCG_MAX_INSNS) { 1844 max_insns = TCG_MAX_INSNS; 1845 } 1846 1847 gen_tb_start(tb); 1848 while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) { 1849 tcg_gen_insn_start(ctx.pc, ctx.envflags); 1850 num_insns++; 1851 1852 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) { 1853 /* We have hit a breakpoint - make sure PC is up-to-date */ 1854 gen_save_cpu_state(&ctx, true); 1855 gen_helper_debug(cpu_env); 1856 ctx.bstate = BS_EXCP; 1857 /* The address covered by the breakpoint must be included in 1858 [tb->pc, tb->pc + tb->size) in order to for it to be 1859 properly cleared -- thus we increment the PC here so that 1860 the logic setting tb->size below does the right thing. */ 1861 ctx.pc += 2; 1862 break; 1863 } 1864 1865 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { 1866 gen_io_start(); 1867 } 1868 1869 ctx.opcode = cpu_lduw_code(env, ctx.pc); 1870 decode_opc(&ctx); 1871 ctx.pc += 2; 1872 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) 1873 break; 1874 if (cs->singlestep_enabled) { 1875 break; 1876 } 1877 if (num_insns >= max_insns) 1878 break; 1879 if (singlestep) 1880 break; 1881 } 1882 if (tb->cflags & CF_LAST_IO) 1883 gen_io_end(); 1884 if (cs->singlestep_enabled) { 1885 gen_save_cpu_state(&ctx, true); 1886 gen_helper_debug(cpu_env); 1887 } else { 1888 switch (ctx.bstate) { 1889 case BS_STOP: 1890 gen_save_cpu_state(&ctx, true); 1891 tcg_gen_exit_tb(0); 1892 break; 1893 case BS_NONE: 1894 gen_save_cpu_state(&ctx, false); 1895 gen_goto_tb(&ctx, 0, ctx.pc); 1896 break; 1897 case BS_EXCP: 1898 /* fall through */ 1899 case BS_BRANCH: 1900 default: 1901 break; 1902 } 1903 } 1904 1905 gen_tb_end(tb, num_insns); 1906 1907 tb->size = ctx.pc - pc_start; 1908 tb->icount = num_insns; 1909 1910 #ifdef DEBUG_DISAS 1911 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 1912 && qemu_log_in_addr_range(pc_start)) { 1913 qemu_log_lock(); 1914 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */ 1915 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0); 1916 qemu_log("\n"); 1917 qemu_log_unlock(); 1918 } 1919 #endif 1920 } 1921 1922 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, 1923 target_ulong *data) 1924 { 1925 env->pc = data[0]; 1926 env->flags = data[1]; 1927 /* Theoretically delayed_pc should also be restored. In practice the 1928 branch instruction is re-executed after exception, so the delayed 1929 branch target will be recomputed. */ 1930 } 1931