1 /* 2 * SH4 translation 3 * 4 * Copyright (c) 2005 Samuel Tardieu 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #define DEBUG_DISAS 21 22 #include "qemu/osdep.h" 23 #include "cpu.h" 24 #include "disas/disas.h" 25 #include "exec/exec-all.h" 26 #include "tcg-op.h" 27 #include "exec/cpu_ldst.h" 28 29 #include "exec/helper-proto.h" 30 #include "exec/helper-gen.h" 31 32 #include "trace-tcg.h" 33 #include "exec/log.h" 34 35 36 typedef struct DisasContext { 37 struct TranslationBlock *tb; 38 target_ulong pc; 39 uint16_t opcode; 40 uint32_t tbflags; /* should stay unmodified during the TB translation */ 41 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */ 42 int bstate; 43 int memidx; 44 int gbank; 45 uint32_t delayed_pc; 46 int singlestep_enabled; 47 uint32_t features; 48 int has_movcal; 49 } DisasContext; 50 51 #if defined(CONFIG_USER_ONLY) 52 #define IS_USER(ctx) 1 53 #else 54 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD))) 55 #endif 56 57 enum { 58 BS_NONE = 0, /* We go out of the TB without reaching a branch or an 59 * exception condition 60 */ 61 BS_STOP = 1, /* We want to stop translation for any reason */ 62 BS_BRANCH = 2, /* We reached a branch condition */ 63 BS_EXCP = 3, /* We reached an exception condition */ 64 }; 65 66 /* global register indexes */ 67 static TCGv_env cpu_env; 68 static TCGv cpu_gregs[32]; 69 static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t; 70 static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr; 71 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl; 72 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst; 73 static TCGv cpu_fregs[32]; 74 75 /* internal register indexes */ 76 static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond; 77 78 #include "exec/gen-icount.h" 79 80 void sh4_translate_init(void) 81 { 82 int i; 83 static int done_init = 0; 84 static const char * const gregnames[24] = { 85 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0", 86 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0", 87 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", 88 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1", 89 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1" 90 }; 91 static const char * const fregnames[32] = { 92 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0", 93 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0", 94 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0", 95 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0", 96 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1", 97 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1", 98 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1", 99 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1", 100 }; 101 102 if (done_init) { 103 return; 104 } 105 106 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); 107 tcg_ctx.tcg_env = cpu_env; 108 109 for (i = 0; i < 24; i++) { 110 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env, 111 offsetof(CPUSH4State, gregs[i]), 112 gregnames[i]); 113 } 114 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv)); 115 116 cpu_pc = tcg_global_mem_new_i32(cpu_env, 117 offsetof(CPUSH4State, pc), "PC"); 118 cpu_sr = tcg_global_mem_new_i32(cpu_env, 119 offsetof(CPUSH4State, sr), "SR"); 120 cpu_sr_m = tcg_global_mem_new_i32(cpu_env, 121 offsetof(CPUSH4State, sr_m), "SR_M"); 122 cpu_sr_q = tcg_global_mem_new_i32(cpu_env, 123 offsetof(CPUSH4State, sr_q), "SR_Q"); 124 cpu_sr_t = tcg_global_mem_new_i32(cpu_env, 125 offsetof(CPUSH4State, sr_t), "SR_T"); 126 cpu_ssr = tcg_global_mem_new_i32(cpu_env, 127 offsetof(CPUSH4State, ssr), "SSR"); 128 cpu_spc = tcg_global_mem_new_i32(cpu_env, 129 offsetof(CPUSH4State, spc), "SPC"); 130 cpu_gbr = tcg_global_mem_new_i32(cpu_env, 131 offsetof(CPUSH4State, gbr), "GBR"); 132 cpu_vbr = tcg_global_mem_new_i32(cpu_env, 133 offsetof(CPUSH4State, vbr), "VBR"); 134 cpu_sgr = tcg_global_mem_new_i32(cpu_env, 135 offsetof(CPUSH4State, sgr), "SGR"); 136 cpu_dbr = tcg_global_mem_new_i32(cpu_env, 137 offsetof(CPUSH4State, dbr), "DBR"); 138 cpu_mach = tcg_global_mem_new_i32(cpu_env, 139 offsetof(CPUSH4State, mach), "MACH"); 140 cpu_macl = tcg_global_mem_new_i32(cpu_env, 141 offsetof(CPUSH4State, macl), "MACL"); 142 cpu_pr = tcg_global_mem_new_i32(cpu_env, 143 offsetof(CPUSH4State, pr), "PR"); 144 cpu_fpscr = tcg_global_mem_new_i32(cpu_env, 145 offsetof(CPUSH4State, fpscr), "FPSCR"); 146 cpu_fpul = tcg_global_mem_new_i32(cpu_env, 147 offsetof(CPUSH4State, fpul), "FPUL"); 148 149 cpu_flags = tcg_global_mem_new_i32(cpu_env, 150 offsetof(CPUSH4State, flags), "_flags_"); 151 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env, 152 offsetof(CPUSH4State, delayed_pc), 153 "_delayed_pc_"); 154 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env, 155 offsetof(CPUSH4State, 156 delayed_cond), 157 "_delayed_cond_"); 158 cpu_ldst = tcg_global_mem_new_i32(cpu_env, 159 offsetof(CPUSH4State, ldst), "_ldst_"); 160 161 for (i = 0; i < 32; i++) 162 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env, 163 offsetof(CPUSH4State, fregs[i]), 164 fregnames[i]); 165 166 done_init = 1; 167 } 168 169 void superh_cpu_dump_state(CPUState *cs, FILE *f, 170 fprintf_function cpu_fprintf, int flags) 171 { 172 SuperHCPU *cpu = SUPERH_CPU(cs); 173 CPUSH4State *env = &cpu->env; 174 int i; 175 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n", 176 env->pc, cpu_read_sr(env), env->pr, env->fpscr); 177 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n", 178 env->spc, env->ssr, env->gbr, env->vbr); 179 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n", 180 env->sgr, env->dbr, env->delayed_pc, env->fpul); 181 for (i = 0; i < 24; i += 4) { 182 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n", 183 i, env->gregs[i], i + 1, env->gregs[i + 1], 184 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]); 185 } 186 if (env->flags & DELAY_SLOT) { 187 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n", 188 env->delayed_pc); 189 } else if (env->flags & DELAY_SLOT_CONDITIONAL) { 190 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n", 191 env->delayed_pc); 192 } else if (env->flags & DELAY_SLOT_RTE) { 193 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n", 194 env->delayed_pc); 195 } 196 } 197 198 static void gen_read_sr(TCGv dst) 199 { 200 TCGv t0 = tcg_temp_new(); 201 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q); 202 tcg_gen_or_i32(dst, dst, t0); 203 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M); 204 tcg_gen_or_i32(dst, dst, t0); 205 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T); 206 tcg_gen_or_i32(dst, cpu_sr, t0); 207 tcg_temp_free_i32(t0); 208 } 209 210 static void gen_write_sr(TCGv src) 211 { 212 tcg_gen_andi_i32(cpu_sr, src, 213 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T))); 214 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1); 215 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1); 216 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1); 217 } 218 219 static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc) 220 { 221 if (save_pc) { 222 tcg_gen_movi_i32(cpu_pc, ctx->pc); 223 } 224 if (ctx->delayed_pc != (uint32_t) -1) { 225 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc); 226 } 227 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) { 228 tcg_gen_movi_i32(cpu_flags, ctx->envflags); 229 } 230 } 231 232 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) 233 { 234 if (unlikely(ctx->singlestep_enabled)) { 235 return false; 236 } 237 if (ctx->tbflags & GUSA_EXCLUSIVE) { 238 return false; 239 } 240 #ifndef CONFIG_USER_ONLY 241 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 242 #else 243 return true; 244 #endif 245 } 246 247 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) 248 { 249 if (use_goto_tb(ctx, dest)) { 250 /* Use a direct jump if in same page and singlestep not enabled */ 251 tcg_gen_goto_tb(n); 252 tcg_gen_movi_i32(cpu_pc, dest); 253 tcg_gen_exit_tb((uintptr_t)ctx->tb + n); 254 } else { 255 tcg_gen_movi_i32(cpu_pc, dest); 256 if (ctx->singlestep_enabled) 257 gen_helper_debug(cpu_env); 258 tcg_gen_exit_tb(0); 259 } 260 } 261 262 static void gen_jump(DisasContext * ctx) 263 { 264 if (ctx->delayed_pc == (uint32_t) - 1) { 265 /* Target is not statically known, it comes necessarily from a 266 delayed jump as immediate jump are conditinal jumps */ 267 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc); 268 tcg_gen_discard_i32(cpu_delayed_pc); 269 if (ctx->singlestep_enabled) 270 gen_helper_debug(cpu_env); 271 tcg_gen_exit_tb(0); 272 } else { 273 gen_goto_tb(ctx, 0, ctx->delayed_pc); 274 } 275 } 276 277 /* Immediate conditional jump (bt or bf) */ 278 static void gen_conditional_jump(DisasContext *ctx, target_ulong dest, 279 bool jump_if_true) 280 { 281 TCGLabel *l1 = gen_new_label(); 282 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE; 283 284 if (ctx->tbflags & GUSA_EXCLUSIVE) { 285 /* When in an exclusive region, we must continue to the end. 286 Therefore, exit the region on a taken branch, but otherwise 287 fall through to the next instruction. */ 288 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1); 289 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK); 290 /* Note that this won't actually use a goto_tb opcode because we 291 disallow it in use_goto_tb, but it handles exit + singlestep. */ 292 gen_goto_tb(ctx, 0, dest); 293 gen_set_label(l1); 294 return; 295 } 296 297 gen_save_cpu_state(ctx, false); 298 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1); 299 gen_goto_tb(ctx, 0, dest); 300 gen_set_label(l1); 301 gen_goto_tb(ctx, 1, ctx->pc + 2); 302 ctx->bstate = BS_BRANCH; 303 } 304 305 /* Delayed conditional jump (bt or bf) */ 306 static void gen_delayed_conditional_jump(DisasContext * ctx) 307 { 308 TCGLabel *l1 = gen_new_label(); 309 TCGv ds = tcg_temp_new(); 310 311 tcg_gen_mov_i32(ds, cpu_delayed_cond); 312 tcg_gen_discard_i32(cpu_delayed_cond); 313 314 if (ctx->tbflags & GUSA_EXCLUSIVE) { 315 /* When in an exclusive region, we must continue to the end. 316 Therefore, exit the region on a taken branch, but otherwise 317 fall through to the next instruction. */ 318 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1); 319 320 /* Leave the gUSA region. */ 321 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK); 322 gen_jump(ctx); 323 324 gen_set_label(l1); 325 return; 326 } 327 328 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1); 329 gen_goto_tb(ctx, 1, ctx->pc + 2); 330 gen_set_label(l1); 331 gen_jump(ctx); 332 } 333 334 static inline void gen_load_fpr64(TCGv_i64 t, int reg) 335 { 336 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]); 337 } 338 339 static inline void gen_store_fpr64 (TCGv_i64 t, int reg) 340 { 341 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t); 342 } 343 344 #define B3_0 (ctx->opcode & 0xf) 345 #define B6_4 ((ctx->opcode >> 4) & 0x7) 346 #define B7_4 ((ctx->opcode >> 4) & 0xf) 347 #define B7_0 (ctx->opcode & 0xff) 348 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff)) 349 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \ 350 (ctx->opcode & 0xfff)) 351 #define B11_8 ((ctx->opcode >> 8) & 0xf) 352 #define B15_12 ((ctx->opcode >> 12) & 0xf) 353 354 #define REG(x) cpu_gregs[(x) ^ ctx->gbank] 355 #define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10] 356 357 #define FREG(x) cpu_fregs[ctx->tbflags & FPSCR_FR ? (x) ^ 0x10 : (x)] 358 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe)) 359 #define XREG(x) FREG(XHACK(x)) 360 /* Assumes lsb of (x) is always 0 */ 361 #define DREG(x) (ctx->tbflags & FPSCR_FR ? (x) ^ 0x10 : (x)) 362 363 #define CHECK_NOT_DELAY_SLOT \ 364 if (ctx->envflags & DELAY_SLOT_MASK) { \ 365 gen_save_cpu_state(ctx, true); \ 366 gen_helper_raise_slot_illegal_instruction(cpu_env); \ 367 ctx->bstate = BS_EXCP; \ 368 return; \ 369 } 370 371 #define CHECK_PRIVILEGED \ 372 if (IS_USER(ctx)) { \ 373 gen_save_cpu_state(ctx, true); \ 374 if (ctx->envflags & DELAY_SLOT_MASK) { \ 375 gen_helper_raise_slot_illegal_instruction(cpu_env); \ 376 } else { \ 377 gen_helper_raise_illegal_instruction(cpu_env); \ 378 } \ 379 ctx->bstate = BS_EXCP; \ 380 return; \ 381 } 382 383 #define CHECK_FPU_ENABLED \ 384 if (ctx->tbflags & (1u << SR_FD)) { \ 385 gen_save_cpu_state(ctx, true); \ 386 if (ctx->envflags & DELAY_SLOT_MASK) { \ 387 gen_helper_raise_slot_fpu_disable(cpu_env); \ 388 } else { \ 389 gen_helper_raise_fpu_disable(cpu_env); \ 390 } \ 391 ctx->bstate = BS_EXCP; \ 392 return; \ 393 } 394 395 static void _decode_opc(DisasContext * ctx) 396 { 397 /* This code tries to make movcal emulation sufficiently 398 accurate for Linux purposes. This instruction writes 399 memory, and prior to that, always allocates a cache line. 400 It is used in two contexts: 401 - in memcpy, where data is copied in blocks, the first write 402 of to a block uses movca.l for performance. 403 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used 404 to flush the cache. Here, the data written by movcal.l is never 405 written to memory, and the data written is just bogus. 406 407 To simulate this, we simulate movcal.l, we store the value to memory, 408 but we also remember the previous content. If we see ocbi, we check 409 if movcal.l for that address was done previously. If so, the write should 410 not have hit the memory, so we restore the previous content. 411 When we see an instruction that is neither movca.l 412 nor ocbi, the previous content is discarded. 413 414 To optimize, we only try to flush stores when we're at the start of 415 TB, or if we already saw movca.l in this TB and did not flush stores 416 yet. */ 417 if (ctx->has_movcal) 418 { 419 int opcode = ctx->opcode & 0xf0ff; 420 if (opcode != 0x0093 /* ocbi */ 421 && opcode != 0x00c3 /* movca.l */) 422 { 423 gen_helper_discard_movcal_backup(cpu_env); 424 ctx->has_movcal = 0; 425 } 426 } 427 428 #if 0 429 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode); 430 #endif 431 432 switch (ctx->opcode) { 433 case 0x0019: /* div0u */ 434 tcg_gen_movi_i32(cpu_sr_m, 0); 435 tcg_gen_movi_i32(cpu_sr_q, 0); 436 tcg_gen_movi_i32(cpu_sr_t, 0); 437 return; 438 case 0x000b: /* rts */ 439 CHECK_NOT_DELAY_SLOT 440 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr); 441 ctx->envflags |= DELAY_SLOT; 442 ctx->delayed_pc = (uint32_t) - 1; 443 return; 444 case 0x0028: /* clrmac */ 445 tcg_gen_movi_i32(cpu_mach, 0); 446 tcg_gen_movi_i32(cpu_macl, 0); 447 return; 448 case 0x0048: /* clrs */ 449 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S)); 450 return; 451 case 0x0008: /* clrt */ 452 tcg_gen_movi_i32(cpu_sr_t, 0); 453 return; 454 case 0x0038: /* ldtlb */ 455 CHECK_PRIVILEGED 456 gen_helper_ldtlb(cpu_env); 457 return; 458 case 0x002b: /* rte */ 459 CHECK_PRIVILEGED 460 CHECK_NOT_DELAY_SLOT 461 gen_write_sr(cpu_ssr); 462 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc); 463 ctx->envflags |= DELAY_SLOT_RTE; 464 ctx->delayed_pc = (uint32_t) - 1; 465 ctx->bstate = BS_STOP; 466 return; 467 case 0x0058: /* sets */ 468 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S)); 469 return; 470 case 0x0018: /* sett */ 471 tcg_gen_movi_i32(cpu_sr_t, 1); 472 return; 473 case 0xfbfd: /* frchg */ 474 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR); 475 ctx->bstate = BS_STOP; 476 return; 477 case 0xf3fd: /* fschg */ 478 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ); 479 ctx->bstate = BS_STOP; 480 return; 481 case 0x0009: /* nop */ 482 return; 483 case 0x001b: /* sleep */ 484 CHECK_PRIVILEGED 485 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2); 486 gen_helper_sleep(cpu_env); 487 return; 488 } 489 490 switch (ctx->opcode & 0xf000) { 491 case 0x1000: /* mov.l Rm,@(disp,Rn) */ 492 { 493 TCGv addr = tcg_temp_new(); 494 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4); 495 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); 496 tcg_temp_free(addr); 497 } 498 return; 499 case 0x5000: /* mov.l @(disp,Rm),Rn */ 500 { 501 TCGv addr = tcg_temp_new(); 502 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4); 503 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); 504 tcg_temp_free(addr); 505 } 506 return; 507 case 0xe000: /* mov #imm,Rn */ 508 #ifdef CONFIG_USER_ONLY 509 /* Detect the start of a gUSA region. If so, update envflags 510 and end the TB. This will allow us to see the end of the 511 region (stored in R0) in the next TB. */ 512 if (B11_8 == 15 && B7_0s < 0 && parallel_cpus) { 513 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s); 514 ctx->bstate = BS_STOP; 515 } 516 #endif 517 tcg_gen_movi_i32(REG(B11_8), B7_0s); 518 return; 519 case 0x9000: /* mov.w @(disp,PC),Rn */ 520 { 521 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2); 522 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); 523 tcg_temp_free(addr); 524 } 525 return; 526 case 0xd000: /* mov.l @(disp,PC),Rn */ 527 { 528 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3); 529 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); 530 tcg_temp_free(addr); 531 } 532 return; 533 case 0x7000: /* add #imm,Rn */ 534 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s); 535 return; 536 case 0xa000: /* bra disp */ 537 CHECK_NOT_DELAY_SLOT 538 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; 539 ctx->envflags |= DELAY_SLOT; 540 return; 541 case 0xb000: /* bsr disp */ 542 CHECK_NOT_DELAY_SLOT 543 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); 544 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2; 545 ctx->envflags |= DELAY_SLOT; 546 return; 547 } 548 549 switch (ctx->opcode & 0xf00f) { 550 case 0x6003: /* mov Rm,Rn */ 551 tcg_gen_mov_i32(REG(B11_8), REG(B7_4)); 552 return; 553 case 0x2000: /* mov.b Rm,@Rn */ 554 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB); 555 return; 556 case 0x2001: /* mov.w Rm,@Rn */ 557 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW); 558 return; 559 case 0x2002: /* mov.l Rm,@Rn */ 560 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); 561 return; 562 case 0x6000: /* mov.b @Rm,Rn */ 563 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB); 564 return; 565 case 0x6001: /* mov.w @Rm,Rn */ 566 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW); 567 return; 568 case 0x6002: /* mov.l @Rm,Rn */ 569 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL); 570 return; 571 case 0x2004: /* mov.b Rm,@-Rn */ 572 { 573 TCGv addr = tcg_temp_new(); 574 tcg_gen_subi_i32(addr, REG(B11_8), 1); 575 /* might cause re-execution */ 576 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB); 577 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */ 578 tcg_temp_free(addr); 579 } 580 return; 581 case 0x2005: /* mov.w Rm,@-Rn */ 582 { 583 TCGv addr = tcg_temp_new(); 584 tcg_gen_subi_i32(addr, REG(B11_8), 2); 585 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW); 586 tcg_gen_mov_i32(REG(B11_8), addr); 587 tcg_temp_free(addr); 588 } 589 return; 590 case 0x2006: /* mov.l Rm,@-Rn */ 591 { 592 TCGv addr = tcg_temp_new(); 593 tcg_gen_subi_i32(addr, REG(B11_8), 4); 594 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); 595 tcg_gen_mov_i32(REG(B11_8), addr); 596 } 597 return; 598 case 0x6004: /* mov.b @Rm+,Rn */ 599 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB); 600 if ( B11_8 != B7_4 ) 601 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1); 602 return; 603 case 0x6005: /* mov.w @Rm+,Rn */ 604 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW); 605 if ( B11_8 != B7_4 ) 606 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); 607 return; 608 case 0x6006: /* mov.l @Rm+,Rn */ 609 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL); 610 if ( B11_8 != B7_4 ) 611 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); 612 return; 613 case 0x0004: /* mov.b Rm,@(R0,Rn) */ 614 { 615 TCGv addr = tcg_temp_new(); 616 tcg_gen_add_i32(addr, REG(B11_8), REG(0)); 617 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB); 618 tcg_temp_free(addr); 619 } 620 return; 621 case 0x0005: /* mov.w Rm,@(R0,Rn) */ 622 { 623 TCGv addr = tcg_temp_new(); 624 tcg_gen_add_i32(addr, REG(B11_8), REG(0)); 625 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW); 626 tcg_temp_free(addr); 627 } 628 return; 629 case 0x0006: /* mov.l Rm,@(R0,Rn) */ 630 { 631 TCGv addr = tcg_temp_new(); 632 tcg_gen_add_i32(addr, REG(B11_8), REG(0)); 633 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL); 634 tcg_temp_free(addr); 635 } 636 return; 637 case 0x000c: /* mov.b @(R0,Rm),Rn */ 638 { 639 TCGv addr = tcg_temp_new(); 640 tcg_gen_add_i32(addr, REG(B7_4), REG(0)); 641 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB); 642 tcg_temp_free(addr); 643 } 644 return; 645 case 0x000d: /* mov.w @(R0,Rm),Rn */ 646 { 647 TCGv addr = tcg_temp_new(); 648 tcg_gen_add_i32(addr, REG(B7_4), REG(0)); 649 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW); 650 tcg_temp_free(addr); 651 } 652 return; 653 case 0x000e: /* mov.l @(R0,Rm),Rn */ 654 { 655 TCGv addr = tcg_temp_new(); 656 tcg_gen_add_i32(addr, REG(B7_4), REG(0)); 657 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL); 658 tcg_temp_free(addr); 659 } 660 return; 661 case 0x6008: /* swap.b Rm,Rn */ 662 { 663 TCGv low = tcg_temp_new();; 664 tcg_gen_ext16u_i32(low, REG(B7_4)); 665 tcg_gen_bswap16_i32(low, low); 666 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16); 667 tcg_temp_free(low); 668 } 669 return; 670 case 0x6009: /* swap.w Rm,Rn */ 671 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16); 672 return; 673 case 0x200d: /* xtrct Rm,Rn */ 674 { 675 TCGv high, low; 676 high = tcg_temp_new(); 677 tcg_gen_shli_i32(high, REG(B7_4), 16); 678 low = tcg_temp_new(); 679 tcg_gen_shri_i32(low, REG(B11_8), 16); 680 tcg_gen_or_i32(REG(B11_8), high, low); 681 tcg_temp_free(low); 682 tcg_temp_free(high); 683 } 684 return; 685 case 0x300c: /* add Rm,Rn */ 686 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 687 return; 688 case 0x300e: /* addc Rm,Rn */ 689 { 690 TCGv t0, t1; 691 t0 = tcg_const_tl(0); 692 t1 = tcg_temp_new(); 693 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); 694 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, 695 REG(B11_8), t0, t1, cpu_sr_t); 696 tcg_temp_free(t0); 697 tcg_temp_free(t1); 698 } 699 return; 700 case 0x300f: /* addv Rm,Rn */ 701 { 702 TCGv t0, t1, t2; 703 t0 = tcg_temp_new(); 704 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8)); 705 t1 = tcg_temp_new(); 706 tcg_gen_xor_i32(t1, t0, REG(B11_8)); 707 t2 = tcg_temp_new(); 708 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8)); 709 tcg_gen_andc_i32(cpu_sr_t, t1, t2); 710 tcg_temp_free(t2); 711 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31); 712 tcg_temp_free(t1); 713 tcg_gen_mov_i32(REG(B7_4), t0); 714 tcg_temp_free(t0); 715 } 716 return; 717 case 0x2009: /* and Rm,Rn */ 718 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 719 return; 720 case 0x3000: /* cmp/eq Rm,Rn */ 721 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4)); 722 return; 723 case 0x3003: /* cmp/ge Rm,Rn */ 724 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4)); 725 return; 726 case 0x3007: /* cmp/gt Rm,Rn */ 727 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4)); 728 return; 729 case 0x3006: /* cmp/hi Rm,Rn */ 730 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4)); 731 return; 732 case 0x3002: /* cmp/hs Rm,Rn */ 733 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4)); 734 return; 735 case 0x200c: /* cmp/str Rm,Rn */ 736 { 737 TCGv cmp1 = tcg_temp_new(); 738 TCGv cmp2 = tcg_temp_new(); 739 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8)); 740 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101); 741 tcg_gen_andc_i32(cmp1, cmp1, cmp2); 742 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080); 743 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0); 744 tcg_temp_free(cmp2); 745 tcg_temp_free(cmp1); 746 } 747 return; 748 case 0x2007: /* div0s Rm,Rn */ 749 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */ 750 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */ 751 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */ 752 return; 753 case 0x3004: /* div1 Rm,Rn */ 754 { 755 TCGv t0 = tcg_temp_new(); 756 TCGv t1 = tcg_temp_new(); 757 TCGv t2 = tcg_temp_new(); 758 TCGv zero = tcg_const_i32(0); 759 760 /* shift left arg1, saving the bit being pushed out and inserting 761 T on the right */ 762 tcg_gen_shri_i32(t0, REG(B11_8), 31); 763 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); 764 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t); 765 766 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid 767 using 64-bit temps, we compute arg0's high part from q ^ m, so 768 that it is 0x00000000 when adding the value or 0xffffffff when 769 subtracting it. */ 770 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m); 771 tcg_gen_subi_i32(t1, t1, 1); 772 tcg_gen_neg_i32(t2, REG(B7_4)); 773 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2); 774 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1); 775 776 /* compute T and Q depending on carry */ 777 tcg_gen_andi_i32(t1, t1, 1); 778 tcg_gen_xor_i32(t1, t1, t0); 779 tcg_gen_xori_i32(cpu_sr_t, t1, 1); 780 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1); 781 782 tcg_temp_free(zero); 783 tcg_temp_free(t2); 784 tcg_temp_free(t1); 785 tcg_temp_free(t0); 786 } 787 return; 788 case 0x300d: /* dmuls.l Rm,Rn */ 789 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8)); 790 return; 791 case 0x3005: /* dmulu.l Rm,Rn */ 792 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8)); 793 return; 794 case 0x600e: /* exts.b Rm,Rn */ 795 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4)); 796 return; 797 case 0x600f: /* exts.w Rm,Rn */ 798 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4)); 799 return; 800 case 0x600c: /* extu.b Rm,Rn */ 801 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4)); 802 return; 803 case 0x600d: /* extu.w Rm,Rn */ 804 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4)); 805 return; 806 case 0x000f: /* mac.l @Rm+,@Rn+ */ 807 { 808 TCGv arg0, arg1; 809 arg0 = tcg_temp_new(); 810 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL); 811 arg1 = tcg_temp_new(); 812 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); 813 gen_helper_macl(cpu_env, arg0, arg1); 814 tcg_temp_free(arg1); 815 tcg_temp_free(arg0); 816 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); 817 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 818 } 819 return; 820 case 0x400f: /* mac.w @Rm+,@Rn+ */ 821 { 822 TCGv arg0, arg1; 823 arg0 = tcg_temp_new(); 824 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL); 825 arg1 = tcg_temp_new(); 826 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL); 827 gen_helper_macw(cpu_env, arg0, arg1); 828 tcg_temp_free(arg1); 829 tcg_temp_free(arg0); 830 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2); 831 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2); 832 } 833 return; 834 case 0x0007: /* mul.l Rm,Rn */ 835 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8)); 836 return; 837 case 0x200f: /* muls.w Rm,Rn */ 838 { 839 TCGv arg0, arg1; 840 arg0 = tcg_temp_new(); 841 tcg_gen_ext16s_i32(arg0, REG(B7_4)); 842 arg1 = tcg_temp_new(); 843 tcg_gen_ext16s_i32(arg1, REG(B11_8)); 844 tcg_gen_mul_i32(cpu_macl, arg0, arg1); 845 tcg_temp_free(arg1); 846 tcg_temp_free(arg0); 847 } 848 return; 849 case 0x200e: /* mulu.w Rm,Rn */ 850 { 851 TCGv arg0, arg1; 852 arg0 = tcg_temp_new(); 853 tcg_gen_ext16u_i32(arg0, REG(B7_4)); 854 arg1 = tcg_temp_new(); 855 tcg_gen_ext16u_i32(arg1, REG(B11_8)); 856 tcg_gen_mul_i32(cpu_macl, arg0, arg1); 857 tcg_temp_free(arg1); 858 tcg_temp_free(arg0); 859 } 860 return; 861 case 0x600b: /* neg Rm,Rn */ 862 tcg_gen_neg_i32(REG(B11_8), REG(B7_4)); 863 return; 864 case 0x600a: /* negc Rm,Rn */ 865 { 866 TCGv t0 = tcg_const_i32(0); 867 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t, 868 REG(B7_4), t0, cpu_sr_t, t0); 869 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, 870 t0, t0, REG(B11_8), cpu_sr_t); 871 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); 872 tcg_temp_free(t0); 873 } 874 return; 875 case 0x6007: /* not Rm,Rn */ 876 tcg_gen_not_i32(REG(B11_8), REG(B7_4)); 877 return; 878 case 0x200b: /* or Rm,Rn */ 879 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 880 return; 881 case 0x400c: /* shad Rm,Rn */ 882 { 883 TCGv t0 = tcg_temp_new(); 884 TCGv t1 = tcg_temp_new(); 885 TCGv t2 = tcg_temp_new(); 886 887 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f); 888 889 /* positive case: shift to the left */ 890 tcg_gen_shl_i32(t1, REG(B11_8), t0); 891 892 /* negative case: shift to the right in two steps to 893 correctly handle the -32 case */ 894 tcg_gen_xori_i32(t0, t0, 0x1f); 895 tcg_gen_sar_i32(t2, REG(B11_8), t0); 896 tcg_gen_sari_i32(t2, t2, 1); 897 898 /* select between the two cases */ 899 tcg_gen_movi_i32(t0, 0); 900 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2); 901 902 tcg_temp_free(t0); 903 tcg_temp_free(t1); 904 tcg_temp_free(t2); 905 } 906 return; 907 case 0x400d: /* shld Rm,Rn */ 908 { 909 TCGv t0 = tcg_temp_new(); 910 TCGv t1 = tcg_temp_new(); 911 TCGv t2 = tcg_temp_new(); 912 913 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f); 914 915 /* positive case: shift to the left */ 916 tcg_gen_shl_i32(t1, REG(B11_8), t0); 917 918 /* negative case: shift to the right in two steps to 919 correctly handle the -32 case */ 920 tcg_gen_xori_i32(t0, t0, 0x1f); 921 tcg_gen_shr_i32(t2, REG(B11_8), t0); 922 tcg_gen_shri_i32(t2, t2, 1); 923 924 /* select between the two cases */ 925 tcg_gen_movi_i32(t0, 0); 926 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2); 927 928 tcg_temp_free(t0); 929 tcg_temp_free(t1); 930 tcg_temp_free(t2); 931 } 932 return; 933 case 0x3008: /* sub Rm,Rn */ 934 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 935 return; 936 case 0x300a: /* subc Rm,Rn */ 937 { 938 TCGv t0, t1; 939 t0 = tcg_const_tl(0); 940 t1 = tcg_temp_new(); 941 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0); 942 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t, 943 REG(B11_8), t0, t1, cpu_sr_t); 944 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1); 945 tcg_temp_free(t0); 946 tcg_temp_free(t1); 947 } 948 return; 949 case 0x300b: /* subv Rm,Rn */ 950 { 951 TCGv t0, t1, t2; 952 t0 = tcg_temp_new(); 953 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4)); 954 t1 = tcg_temp_new(); 955 tcg_gen_xor_i32(t1, t0, REG(B7_4)); 956 t2 = tcg_temp_new(); 957 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4)); 958 tcg_gen_and_i32(t1, t1, t2); 959 tcg_temp_free(t2); 960 tcg_gen_shri_i32(cpu_sr_t, t1, 31); 961 tcg_temp_free(t1); 962 tcg_gen_mov_i32(REG(B11_8), t0); 963 tcg_temp_free(t0); 964 } 965 return; 966 case 0x2008: /* tst Rm,Rn */ 967 { 968 TCGv val = tcg_temp_new(); 969 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8)); 970 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); 971 tcg_temp_free(val); 972 } 973 return; 974 case 0x200a: /* xor Rm,Rn */ 975 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4)); 976 return; 977 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */ 978 CHECK_FPU_ENABLED 979 if (ctx->tbflags & FPSCR_SZ) { 980 TCGv_i64 fp = tcg_temp_new_i64(); 981 gen_load_fpr64(fp, XHACK(B7_4)); 982 gen_store_fpr64(fp, XHACK(B11_8)); 983 tcg_temp_free_i64(fp); 984 } else { 985 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4)); 986 } 987 return; 988 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */ 989 CHECK_FPU_ENABLED 990 if (ctx->tbflags & FPSCR_SZ) { 991 TCGv addr_hi = tcg_temp_new(); 992 int fr = XHACK(B7_4); 993 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4); 994 tcg_gen_qemu_st_i32(FREG(fr), REG(B11_8), ctx->memidx, MO_TEUL); 995 tcg_gen_qemu_st_i32(FREG(fr + 1), addr_hi, ctx->memidx, MO_TEUL); 996 tcg_temp_free(addr_hi); 997 } else { 998 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL); 999 } 1000 return; 1001 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */ 1002 CHECK_FPU_ENABLED 1003 if (ctx->tbflags & FPSCR_SZ) { 1004 TCGv addr_hi = tcg_temp_new(); 1005 int fr = XHACK(B11_8); 1006 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); 1007 tcg_gen_qemu_ld_i32(FREG(fr), REG(B7_4), ctx->memidx, MO_TEUL); 1008 tcg_gen_qemu_ld_i32(FREG(fr + 1), addr_hi, ctx->memidx, MO_TEUL); 1009 tcg_temp_free(addr_hi); 1010 } else { 1011 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); 1012 } 1013 return; 1014 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */ 1015 CHECK_FPU_ENABLED 1016 if (ctx->tbflags & FPSCR_SZ) { 1017 TCGv addr_hi = tcg_temp_new(); 1018 int fr = XHACK(B11_8); 1019 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4); 1020 tcg_gen_qemu_ld_i32(FREG(fr), REG(B7_4), ctx->memidx, MO_TEUL); 1021 tcg_gen_qemu_ld_i32(FREG(fr + 1), addr_hi, ctx->memidx, MO_TEUL); 1022 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8); 1023 tcg_temp_free(addr_hi); 1024 } else { 1025 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL); 1026 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4); 1027 } 1028 return; 1029 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */ 1030 CHECK_FPU_ENABLED 1031 TCGv addr = tcg_temp_new_i32(); 1032 tcg_gen_subi_i32(addr, REG(B11_8), 4); 1033 if (ctx->tbflags & FPSCR_SZ) { 1034 int fr = XHACK(B7_4); 1035 tcg_gen_qemu_st_i32(FREG(fr + 1), addr, ctx->memidx, MO_TEUL); 1036 tcg_gen_subi_i32(addr, addr, 4); 1037 tcg_gen_qemu_st_i32(FREG(fr), addr, ctx->memidx, MO_TEUL); 1038 } else { 1039 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); 1040 } 1041 tcg_gen_mov_i32(REG(B11_8), addr); 1042 tcg_temp_free(addr); 1043 return; 1044 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */ 1045 CHECK_FPU_ENABLED 1046 { 1047 TCGv addr = tcg_temp_new_i32(); 1048 tcg_gen_add_i32(addr, REG(B7_4), REG(0)); 1049 if (ctx->tbflags & FPSCR_SZ) { 1050 int fr = XHACK(B11_8); 1051 tcg_gen_qemu_ld_i32(FREG(fr), addr, ctx->memidx, MO_TEUL); 1052 tcg_gen_addi_i32(addr, addr, 4); 1053 tcg_gen_qemu_ld_i32(FREG(fr + 1), addr, ctx->memidx, MO_TEUL); 1054 } else { 1055 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL); 1056 } 1057 tcg_temp_free(addr); 1058 } 1059 return; 1060 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */ 1061 CHECK_FPU_ENABLED 1062 { 1063 TCGv addr = tcg_temp_new(); 1064 tcg_gen_add_i32(addr, REG(B11_8), REG(0)); 1065 if (ctx->tbflags & FPSCR_SZ) { 1066 int fr = XHACK(B7_4); 1067 tcg_gen_qemu_ld_i32(FREG(fr), addr, ctx->memidx, MO_TEUL); 1068 tcg_gen_addi_i32(addr, addr, 4); 1069 tcg_gen_qemu_ld_i32(FREG(fr + 1), addr, ctx->memidx, MO_TEUL); 1070 } else { 1071 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL); 1072 } 1073 tcg_temp_free(addr); 1074 } 1075 return; 1076 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ 1077 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ 1078 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ 1079 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */ 1080 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ 1081 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ 1082 { 1083 CHECK_FPU_ENABLED 1084 if (ctx->tbflags & FPSCR_PR) { 1085 TCGv_i64 fp0, fp1; 1086 1087 if (ctx->opcode & 0x0110) 1088 break; /* illegal instruction */ 1089 fp0 = tcg_temp_new_i64(); 1090 fp1 = tcg_temp_new_i64(); 1091 gen_load_fpr64(fp0, DREG(B11_8)); 1092 gen_load_fpr64(fp1, DREG(B7_4)); 1093 switch (ctx->opcode & 0xf00f) { 1094 case 0xf000: /* fadd Rm,Rn */ 1095 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1); 1096 break; 1097 case 0xf001: /* fsub Rm,Rn */ 1098 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1); 1099 break; 1100 case 0xf002: /* fmul Rm,Rn */ 1101 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1); 1102 break; 1103 case 0xf003: /* fdiv Rm,Rn */ 1104 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1); 1105 break; 1106 case 0xf004: /* fcmp/eq Rm,Rn */ 1107 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1); 1108 return; 1109 case 0xf005: /* fcmp/gt Rm,Rn */ 1110 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1); 1111 return; 1112 } 1113 gen_store_fpr64(fp0, DREG(B11_8)); 1114 tcg_temp_free_i64(fp0); 1115 tcg_temp_free_i64(fp1); 1116 } else { 1117 switch (ctx->opcode & 0xf00f) { 1118 case 0xf000: /* fadd Rm,Rn */ 1119 gen_helper_fadd_FT(FREG(B11_8), cpu_env, 1120 FREG(B11_8), FREG(B7_4)); 1121 break; 1122 case 0xf001: /* fsub Rm,Rn */ 1123 gen_helper_fsub_FT(FREG(B11_8), cpu_env, 1124 FREG(B11_8), FREG(B7_4)); 1125 break; 1126 case 0xf002: /* fmul Rm,Rn */ 1127 gen_helper_fmul_FT(FREG(B11_8), cpu_env, 1128 FREG(B11_8), FREG(B7_4)); 1129 break; 1130 case 0xf003: /* fdiv Rm,Rn */ 1131 gen_helper_fdiv_FT(FREG(B11_8), cpu_env, 1132 FREG(B11_8), FREG(B7_4)); 1133 break; 1134 case 0xf004: /* fcmp/eq Rm,Rn */ 1135 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env, 1136 FREG(B11_8), FREG(B7_4)); 1137 return; 1138 case 0xf005: /* fcmp/gt Rm,Rn */ 1139 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env, 1140 FREG(B11_8), FREG(B7_4)); 1141 return; 1142 } 1143 } 1144 } 1145 return; 1146 case 0xf00e: /* fmac FR0,RM,Rn */ 1147 { 1148 CHECK_FPU_ENABLED 1149 if (ctx->tbflags & FPSCR_PR) { 1150 break; /* illegal instruction */ 1151 } else { 1152 gen_helper_fmac_FT(FREG(B11_8), cpu_env, 1153 FREG(0), FREG(B7_4), FREG(B11_8)); 1154 return; 1155 } 1156 } 1157 } 1158 1159 switch (ctx->opcode & 0xff00) { 1160 case 0xc900: /* and #imm,R0 */ 1161 tcg_gen_andi_i32(REG(0), REG(0), B7_0); 1162 return; 1163 case 0xcd00: /* and.b #imm,@(R0,GBR) */ 1164 { 1165 TCGv addr, val; 1166 addr = tcg_temp_new(); 1167 tcg_gen_add_i32(addr, REG(0), cpu_gbr); 1168 val = tcg_temp_new(); 1169 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); 1170 tcg_gen_andi_i32(val, val, B7_0); 1171 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); 1172 tcg_temp_free(val); 1173 tcg_temp_free(addr); 1174 } 1175 return; 1176 case 0x8b00: /* bf label */ 1177 CHECK_NOT_DELAY_SLOT 1178 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, false); 1179 return; 1180 case 0x8f00: /* bf/s label */ 1181 CHECK_NOT_DELAY_SLOT 1182 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1); 1183 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2; 1184 ctx->envflags |= DELAY_SLOT_CONDITIONAL; 1185 return; 1186 case 0x8900: /* bt label */ 1187 CHECK_NOT_DELAY_SLOT 1188 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, true); 1189 return; 1190 case 0x8d00: /* bt/s label */ 1191 CHECK_NOT_DELAY_SLOT 1192 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t); 1193 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2; 1194 ctx->envflags |= DELAY_SLOT_CONDITIONAL; 1195 return; 1196 case 0x8800: /* cmp/eq #imm,R0 */ 1197 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s); 1198 return; 1199 case 0xc400: /* mov.b @(disp,GBR),R0 */ 1200 { 1201 TCGv addr = tcg_temp_new(); 1202 tcg_gen_addi_i32(addr, cpu_gbr, B7_0); 1203 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB); 1204 tcg_temp_free(addr); 1205 } 1206 return; 1207 case 0xc500: /* mov.w @(disp,GBR),R0 */ 1208 { 1209 TCGv addr = tcg_temp_new(); 1210 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); 1211 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW); 1212 tcg_temp_free(addr); 1213 } 1214 return; 1215 case 0xc600: /* mov.l @(disp,GBR),R0 */ 1216 { 1217 TCGv addr = tcg_temp_new(); 1218 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); 1219 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL); 1220 tcg_temp_free(addr); 1221 } 1222 return; 1223 case 0xc000: /* mov.b R0,@(disp,GBR) */ 1224 { 1225 TCGv addr = tcg_temp_new(); 1226 tcg_gen_addi_i32(addr, cpu_gbr, B7_0); 1227 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB); 1228 tcg_temp_free(addr); 1229 } 1230 return; 1231 case 0xc100: /* mov.w R0,@(disp,GBR) */ 1232 { 1233 TCGv addr = tcg_temp_new(); 1234 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2); 1235 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW); 1236 tcg_temp_free(addr); 1237 } 1238 return; 1239 case 0xc200: /* mov.l R0,@(disp,GBR) */ 1240 { 1241 TCGv addr = tcg_temp_new(); 1242 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4); 1243 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL); 1244 tcg_temp_free(addr); 1245 } 1246 return; 1247 case 0x8000: /* mov.b R0,@(disp,Rn) */ 1248 { 1249 TCGv addr = tcg_temp_new(); 1250 tcg_gen_addi_i32(addr, REG(B7_4), B3_0); 1251 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB); 1252 tcg_temp_free(addr); 1253 } 1254 return; 1255 case 0x8100: /* mov.w R0,@(disp,Rn) */ 1256 { 1257 TCGv addr = tcg_temp_new(); 1258 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); 1259 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW); 1260 tcg_temp_free(addr); 1261 } 1262 return; 1263 case 0x8400: /* mov.b @(disp,Rn),R0 */ 1264 { 1265 TCGv addr = tcg_temp_new(); 1266 tcg_gen_addi_i32(addr, REG(B7_4), B3_0); 1267 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB); 1268 tcg_temp_free(addr); 1269 } 1270 return; 1271 case 0x8500: /* mov.w @(disp,Rn),R0 */ 1272 { 1273 TCGv addr = tcg_temp_new(); 1274 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2); 1275 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW); 1276 tcg_temp_free(addr); 1277 } 1278 return; 1279 case 0xc700: /* mova @(disp,PC),R0 */ 1280 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3); 1281 return; 1282 case 0xcb00: /* or #imm,R0 */ 1283 tcg_gen_ori_i32(REG(0), REG(0), B7_0); 1284 return; 1285 case 0xcf00: /* or.b #imm,@(R0,GBR) */ 1286 { 1287 TCGv addr, val; 1288 addr = tcg_temp_new(); 1289 tcg_gen_add_i32(addr, REG(0), cpu_gbr); 1290 val = tcg_temp_new(); 1291 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); 1292 tcg_gen_ori_i32(val, val, B7_0); 1293 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); 1294 tcg_temp_free(val); 1295 tcg_temp_free(addr); 1296 } 1297 return; 1298 case 0xc300: /* trapa #imm */ 1299 { 1300 TCGv imm; 1301 CHECK_NOT_DELAY_SLOT 1302 gen_save_cpu_state(ctx, true); 1303 imm = tcg_const_i32(B7_0); 1304 gen_helper_trapa(cpu_env, imm); 1305 tcg_temp_free(imm); 1306 ctx->bstate = BS_EXCP; 1307 } 1308 return; 1309 case 0xc800: /* tst #imm,R0 */ 1310 { 1311 TCGv val = tcg_temp_new(); 1312 tcg_gen_andi_i32(val, REG(0), B7_0); 1313 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); 1314 tcg_temp_free(val); 1315 } 1316 return; 1317 case 0xcc00: /* tst.b #imm,@(R0,GBR) */ 1318 { 1319 TCGv val = tcg_temp_new(); 1320 tcg_gen_add_i32(val, REG(0), cpu_gbr); 1321 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB); 1322 tcg_gen_andi_i32(val, val, B7_0); 1323 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); 1324 tcg_temp_free(val); 1325 } 1326 return; 1327 case 0xca00: /* xor #imm,R0 */ 1328 tcg_gen_xori_i32(REG(0), REG(0), B7_0); 1329 return; 1330 case 0xce00: /* xor.b #imm,@(R0,GBR) */ 1331 { 1332 TCGv addr, val; 1333 addr = tcg_temp_new(); 1334 tcg_gen_add_i32(addr, REG(0), cpu_gbr); 1335 val = tcg_temp_new(); 1336 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB); 1337 tcg_gen_xori_i32(val, val, B7_0); 1338 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB); 1339 tcg_temp_free(val); 1340 tcg_temp_free(addr); 1341 } 1342 return; 1343 } 1344 1345 switch (ctx->opcode & 0xf08f) { 1346 case 0x408e: /* ldc Rm,Rn_BANK */ 1347 CHECK_PRIVILEGED 1348 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8)); 1349 return; 1350 case 0x4087: /* ldc.l @Rm+,Rn_BANK */ 1351 CHECK_PRIVILEGED 1352 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL); 1353 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 1354 return; 1355 case 0x0082: /* stc Rm_BANK,Rn */ 1356 CHECK_PRIVILEGED 1357 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4)); 1358 return; 1359 case 0x4083: /* stc.l Rm_BANK,@-Rn */ 1360 CHECK_PRIVILEGED 1361 { 1362 TCGv addr = tcg_temp_new(); 1363 tcg_gen_subi_i32(addr, REG(B11_8), 4); 1364 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL); 1365 tcg_gen_mov_i32(REG(B11_8), addr); 1366 tcg_temp_free(addr); 1367 } 1368 return; 1369 } 1370 1371 switch (ctx->opcode & 0xf0ff) { 1372 case 0x0023: /* braf Rn */ 1373 CHECK_NOT_DELAY_SLOT 1374 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4); 1375 ctx->envflags |= DELAY_SLOT; 1376 ctx->delayed_pc = (uint32_t) - 1; 1377 return; 1378 case 0x0003: /* bsrf Rn */ 1379 CHECK_NOT_DELAY_SLOT 1380 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); 1381 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr); 1382 ctx->envflags |= DELAY_SLOT; 1383 ctx->delayed_pc = (uint32_t) - 1; 1384 return; 1385 case 0x4015: /* cmp/pl Rn */ 1386 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0); 1387 return; 1388 case 0x4011: /* cmp/pz Rn */ 1389 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0); 1390 return; 1391 case 0x4010: /* dt Rn */ 1392 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1); 1393 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0); 1394 return; 1395 case 0x402b: /* jmp @Rn */ 1396 CHECK_NOT_DELAY_SLOT 1397 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); 1398 ctx->envflags |= DELAY_SLOT; 1399 ctx->delayed_pc = (uint32_t) - 1; 1400 return; 1401 case 0x400b: /* jsr @Rn */ 1402 CHECK_NOT_DELAY_SLOT 1403 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4); 1404 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8)); 1405 ctx->envflags |= DELAY_SLOT; 1406 ctx->delayed_pc = (uint32_t) - 1; 1407 return; 1408 case 0x400e: /* ldc Rm,SR */ 1409 CHECK_PRIVILEGED 1410 { 1411 TCGv val = tcg_temp_new(); 1412 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3); 1413 gen_write_sr(val); 1414 tcg_temp_free(val); 1415 ctx->bstate = BS_STOP; 1416 } 1417 return; 1418 case 0x4007: /* ldc.l @Rm+,SR */ 1419 CHECK_PRIVILEGED 1420 { 1421 TCGv val = tcg_temp_new(); 1422 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL); 1423 tcg_gen_andi_i32(val, val, 0x700083f3); 1424 gen_write_sr(val); 1425 tcg_temp_free(val); 1426 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 1427 ctx->bstate = BS_STOP; 1428 } 1429 return; 1430 case 0x0002: /* stc SR,Rn */ 1431 CHECK_PRIVILEGED 1432 gen_read_sr(REG(B11_8)); 1433 return; 1434 case 0x4003: /* stc SR,@-Rn */ 1435 CHECK_PRIVILEGED 1436 { 1437 TCGv addr = tcg_temp_new(); 1438 TCGv val = tcg_temp_new(); 1439 tcg_gen_subi_i32(addr, REG(B11_8), 4); 1440 gen_read_sr(val); 1441 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); 1442 tcg_gen_mov_i32(REG(B11_8), addr); 1443 tcg_temp_free(val); 1444 tcg_temp_free(addr); 1445 } 1446 return; 1447 #define LD(reg,ldnum,ldpnum,prechk) \ 1448 case ldnum: \ 1449 prechk \ 1450 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \ 1451 return; \ 1452 case ldpnum: \ 1453 prechk \ 1454 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \ 1455 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \ 1456 return; 1457 #define ST(reg,stnum,stpnum,prechk) \ 1458 case stnum: \ 1459 prechk \ 1460 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \ 1461 return; \ 1462 case stpnum: \ 1463 prechk \ 1464 { \ 1465 TCGv addr = tcg_temp_new(); \ 1466 tcg_gen_subi_i32(addr, REG(B11_8), 4); \ 1467 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \ 1468 tcg_gen_mov_i32(REG(B11_8), addr); \ 1469 tcg_temp_free(addr); \ 1470 } \ 1471 return; 1472 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \ 1473 LD(reg,ldnum,ldpnum,prechk) \ 1474 ST(reg,stnum,stpnum,prechk) 1475 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {}) 1476 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED) 1477 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED) 1478 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED) 1479 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED) 1480 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;) 1481 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED) 1482 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {}) 1483 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {}) 1484 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {}) 1485 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED}) 1486 case 0x406a: /* lds Rm,FPSCR */ 1487 CHECK_FPU_ENABLED 1488 gen_helper_ld_fpscr(cpu_env, REG(B11_8)); 1489 ctx->bstate = BS_STOP; 1490 return; 1491 case 0x4066: /* lds.l @Rm+,FPSCR */ 1492 CHECK_FPU_ENABLED 1493 { 1494 TCGv addr = tcg_temp_new(); 1495 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL); 1496 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 1497 gen_helper_ld_fpscr(cpu_env, addr); 1498 tcg_temp_free(addr); 1499 ctx->bstate = BS_STOP; 1500 } 1501 return; 1502 case 0x006a: /* sts FPSCR,Rn */ 1503 CHECK_FPU_ENABLED 1504 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff); 1505 return; 1506 case 0x4062: /* sts FPSCR,@-Rn */ 1507 CHECK_FPU_ENABLED 1508 { 1509 TCGv addr, val; 1510 val = tcg_temp_new(); 1511 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff); 1512 addr = tcg_temp_new(); 1513 tcg_gen_subi_i32(addr, REG(B11_8), 4); 1514 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL); 1515 tcg_gen_mov_i32(REG(B11_8), addr); 1516 tcg_temp_free(addr); 1517 tcg_temp_free(val); 1518 } 1519 return; 1520 case 0x00c3: /* movca.l R0,@Rm */ 1521 { 1522 TCGv val = tcg_temp_new(); 1523 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL); 1524 gen_helper_movcal(cpu_env, REG(B11_8), val); 1525 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); 1526 } 1527 ctx->has_movcal = 1; 1528 return; 1529 case 0x40a9: /* movua.l @Rm,R0 */ 1530 /* Load non-boundary-aligned data */ 1531 if (ctx->features & SH_FEATURE_SH4A) { 1532 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, 1533 MO_TEUL | MO_UNALN); 1534 return; 1535 } 1536 break; 1537 case 0x40e9: /* movua.l @Rm+,R0 */ 1538 /* Load non-boundary-aligned data */ 1539 if (ctx->features & SH_FEATURE_SH4A) { 1540 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, 1541 MO_TEUL | MO_UNALN); 1542 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); 1543 return; 1544 } 1545 break; 1546 case 0x0029: /* movt Rn */ 1547 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t); 1548 return; 1549 case 0x0073: 1550 /* MOVCO.L 1551 LDST -> T 1552 If (T == 1) R0 -> (Rn) 1553 0 -> LDST 1554 */ 1555 if (ctx->features & SH_FEATURE_SH4A) { 1556 TCGLabel *label = gen_new_label(); 1557 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst); 1558 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label); 1559 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL); 1560 gen_set_label(label); 1561 tcg_gen_movi_i32(cpu_ldst, 0); 1562 return; 1563 } else 1564 break; 1565 case 0x0063: 1566 /* MOVLI.L @Rm,R0 1567 1 -> LDST 1568 (Rm) -> R0 1569 When interrupt/exception 1570 occurred 0 -> LDST 1571 */ 1572 if (ctx->features & SH_FEATURE_SH4A) { 1573 tcg_gen_movi_i32(cpu_ldst, 0); 1574 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL); 1575 tcg_gen_movi_i32(cpu_ldst, 1); 1576 return; 1577 } else 1578 break; 1579 case 0x0093: /* ocbi @Rn */ 1580 { 1581 gen_helper_ocbi(cpu_env, REG(B11_8)); 1582 } 1583 return; 1584 case 0x00a3: /* ocbp @Rn */ 1585 case 0x00b3: /* ocbwb @Rn */ 1586 /* These instructions are supposed to do nothing in case of 1587 a cache miss. Given that we only partially emulate caches 1588 it is safe to simply ignore them. */ 1589 return; 1590 case 0x0083: /* pref @Rn */ 1591 return; 1592 case 0x00d3: /* prefi @Rn */ 1593 if (ctx->features & SH_FEATURE_SH4A) 1594 return; 1595 else 1596 break; 1597 case 0x00e3: /* icbi @Rn */ 1598 if (ctx->features & SH_FEATURE_SH4A) 1599 return; 1600 else 1601 break; 1602 case 0x00ab: /* synco */ 1603 if (ctx->features & SH_FEATURE_SH4A) { 1604 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 1605 return; 1606 } 1607 break; 1608 case 0x4024: /* rotcl Rn */ 1609 { 1610 TCGv tmp = tcg_temp_new(); 1611 tcg_gen_mov_i32(tmp, cpu_sr_t); 1612 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31); 1613 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); 1614 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp); 1615 tcg_temp_free(tmp); 1616 } 1617 return; 1618 case 0x4025: /* rotcr Rn */ 1619 { 1620 TCGv tmp = tcg_temp_new(); 1621 tcg_gen_shli_i32(tmp, cpu_sr_t, 31); 1622 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1); 1623 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); 1624 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp); 1625 tcg_temp_free(tmp); 1626 } 1627 return; 1628 case 0x4004: /* rotl Rn */ 1629 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1); 1630 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0); 1631 return; 1632 case 0x4005: /* rotr Rn */ 1633 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0); 1634 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1); 1635 return; 1636 case 0x4000: /* shll Rn */ 1637 case 0x4020: /* shal Rn */ 1638 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31); 1639 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1); 1640 return; 1641 case 0x4021: /* shar Rn */ 1642 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1); 1643 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1); 1644 return; 1645 case 0x4001: /* shlr Rn */ 1646 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1); 1647 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1); 1648 return; 1649 case 0x4008: /* shll2 Rn */ 1650 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2); 1651 return; 1652 case 0x4018: /* shll8 Rn */ 1653 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8); 1654 return; 1655 case 0x4028: /* shll16 Rn */ 1656 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16); 1657 return; 1658 case 0x4009: /* shlr2 Rn */ 1659 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2); 1660 return; 1661 case 0x4019: /* shlr8 Rn */ 1662 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8); 1663 return; 1664 case 0x4029: /* shlr16 Rn */ 1665 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16); 1666 return; 1667 case 0x401b: /* tas.b @Rn */ 1668 { 1669 TCGv val = tcg_const_i32(0x80); 1670 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val, 1671 ctx->memidx, MO_UB); 1672 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0); 1673 tcg_temp_free(val); 1674 } 1675 return; 1676 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */ 1677 CHECK_FPU_ENABLED 1678 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul); 1679 return; 1680 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */ 1681 CHECK_FPU_ENABLED 1682 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8)); 1683 return; 1684 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */ 1685 CHECK_FPU_ENABLED 1686 if (ctx->tbflags & FPSCR_PR) { 1687 TCGv_i64 fp; 1688 if (ctx->opcode & 0x0100) 1689 break; /* illegal instruction */ 1690 fp = tcg_temp_new_i64(); 1691 gen_helper_float_DT(fp, cpu_env, cpu_fpul); 1692 gen_store_fpr64(fp, DREG(B11_8)); 1693 tcg_temp_free_i64(fp); 1694 } 1695 else { 1696 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul); 1697 } 1698 return; 1699 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */ 1700 CHECK_FPU_ENABLED 1701 if (ctx->tbflags & FPSCR_PR) { 1702 TCGv_i64 fp; 1703 if (ctx->opcode & 0x0100) 1704 break; /* illegal instruction */ 1705 fp = tcg_temp_new_i64(); 1706 gen_load_fpr64(fp, DREG(B11_8)); 1707 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp); 1708 tcg_temp_free_i64(fp); 1709 } 1710 else { 1711 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8)); 1712 } 1713 return; 1714 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */ 1715 CHECK_FPU_ENABLED 1716 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000); 1717 return; 1718 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */ 1719 CHECK_FPU_ENABLED 1720 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff); 1721 return; 1722 case 0xf06d: /* fsqrt FRn */ 1723 CHECK_FPU_ENABLED 1724 if (ctx->tbflags & FPSCR_PR) { 1725 if (ctx->opcode & 0x0100) 1726 break; /* illegal instruction */ 1727 TCGv_i64 fp = tcg_temp_new_i64(); 1728 gen_load_fpr64(fp, DREG(B11_8)); 1729 gen_helper_fsqrt_DT(fp, cpu_env, fp); 1730 gen_store_fpr64(fp, DREG(B11_8)); 1731 tcg_temp_free_i64(fp); 1732 } else { 1733 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8)); 1734 } 1735 return; 1736 case 0xf07d: /* fsrra FRn */ 1737 CHECK_FPU_ENABLED 1738 break; 1739 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */ 1740 CHECK_FPU_ENABLED 1741 if (!(ctx->tbflags & FPSCR_PR)) { 1742 tcg_gen_movi_i32(FREG(B11_8), 0); 1743 } 1744 return; 1745 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */ 1746 CHECK_FPU_ENABLED 1747 if (!(ctx->tbflags & FPSCR_PR)) { 1748 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000); 1749 } 1750 return; 1751 case 0xf0ad: /* fcnvsd FPUL,DRn */ 1752 CHECK_FPU_ENABLED 1753 { 1754 TCGv_i64 fp = tcg_temp_new_i64(); 1755 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul); 1756 gen_store_fpr64(fp, DREG(B11_8)); 1757 tcg_temp_free_i64(fp); 1758 } 1759 return; 1760 case 0xf0bd: /* fcnvds DRn,FPUL */ 1761 CHECK_FPU_ENABLED 1762 { 1763 TCGv_i64 fp = tcg_temp_new_i64(); 1764 gen_load_fpr64(fp, DREG(B11_8)); 1765 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp); 1766 tcg_temp_free_i64(fp); 1767 } 1768 return; 1769 case 0xf0ed: /* fipr FVm,FVn */ 1770 CHECK_FPU_ENABLED 1771 if ((ctx->tbflags & FPSCR_PR) == 0) { 1772 TCGv m, n; 1773 m = tcg_const_i32((ctx->opcode >> 8) & 3); 1774 n = tcg_const_i32((ctx->opcode >> 10) & 3); 1775 gen_helper_fipr(cpu_env, m, n); 1776 tcg_temp_free(m); 1777 tcg_temp_free(n); 1778 return; 1779 } 1780 break; 1781 case 0xf0fd: /* ftrv XMTRX,FVn */ 1782 CHECK_FPU_ENABLED 1783 if ((ctx->opcode & 0x0300) == 0x0100 && 1784 (ctx->tbflags & FPSCR_PR) == 0) { 1785 TCGv n; 1786 n = tcg_const_i32((ctx->opcode >> 10) & 3); 1787 gen_helper_ftrv(cpu_env, n); 1788 tcg_temp_free(n); 1789 return; 1790 } 1791 break; 1792 } 1793 #if 0 1794 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n", 1795 ctx->opcode, ctx->pc); 1796 fflush(stderr); 1797 #endif 1798 gen_save_cpu_state(ctx, true); 1799 if (ctx->envflags & DELAY_SLOT_MASK) { 1800 gen_helper_raise_slot_illegal_instruction(cpu_env); 1801 } else { 1802 gen_helper_raise_illegal_instruction(cpu_env); 1803 } 1804 ctx->bstate = BS_EXCP; 1805 } 1806 1807 static void decode_opc(DisasContext * ctx) 1808 { 1809 uint32_t old_flags = ctx->envflags; 1810 1811 _decode_opc(ctx); 1812 1813 if (old_flags & DELAY_SLOT_MASK) { 1814 /* go out of the delay slot */ 1815 ctx->envflags &= ~DELAY_SLOT_MASK; 1816 1817 /* When in an exclusive region, we must continue to the end 1818 for conditional branches. */ 1819 if (ctx->tbflags & GUSA_EXCLUSIVE 1820 && old_flags & DELAY_SLOT_CONDITIONAL) { 1821 gen_delayed_conditional_jump(ctx); 1822 return; 1823 } 1824 /* Otherwise this is probably an invalid gUSA region. 1825 Drop the GUSA bits so the next TB doesn't see them. */ 1826 ctx->envflags &= ~GUSA_MASK; 1827 1828 tcg_gen_movi_i32(cpu_flags, ctx->envflags); 1829 ctx->bstate = BS_BRANCH; 1830 if (old_flags & DELAY_SLOT_CONDITIONAL) { 1831 gen_delayed_conditional_jump(ctx); 1832 } else { 1833 gen_jump(ctx); 1834 } 1835 } 1836 } 1837 1838 #ifdef CONFIG_USER_ONLY 1839 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences. 1840 Upon an interrupt, a real kernel would simply notice magic values in 1841 the registers and reset the PC to the start of the sequence. 1842 1843 For QEMU, we cannot do this in quite the same way. Instead, we notice 1844 the normal start of such a sequence (mov #-x,r15). While we can handle 1845 any sequence via cpu_exec_step_atomic, we can recognize the "normal" 1846 sequences and transform them into atomic operations as seen by the host. 1847 */ 1848 static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns) 1849 { 1850 uint16_t insns[5]; 1851 int ld_adr, ld_dst, ld_mop; 1852 int op_dst, op_src, op_opc; 1853 int mv_src, mt_dst, st_src, st_mop; 1854 TCGv op_arg; 1855 1856 uint32_t pc = ctx->pc; 1857 uint32_t pc_end = ctx->tb->cs_base; 1858 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8); 1859 int max_insns = (pc_end - pc) / 2; 1860 int i; 1861 1862 if (pc != pc_end + backup || max_insns < 2) { 1863 /* This is a malformed gUSA region. Don't do anything special, 1864 since the interpreter is likely to get confused. */ 1865 ctx->envflags &= ~GUSA_MASK; 1866 return 0; 1867 } 1868 1869 if (ctx->tbflags & GUSA_EXCLUSIVE) { 1870 /* Regardless of single-stepping or the end of the page, 1871 we must complete execution of the gUSA region while 1872 holding the exclusive lock. */ 1873 *pmax_insns = max_insns; 1874 return 0; 1875 } 1876 1877 /* The state machine below will consume only a few insns. 1878 If there are more than that in a region, fail now. */ 1879 if (max_insns > ARRAY_SIZE(insns)) { 1880 goto fail; 1881 } 1882 1883 /* Read all of the insns for the region. */ 1884 for (i = 0; i < max_insns; ++i) { 1885 insns[i] = cpu_lduw_code(env, pc + i * 2); 1886 } 1887 1888 ld_adr = ld_dst = ld_mop = -1; 1889 mv_src = -1; 1890 op_dst = op_src = op_opc = -1; 1891 mt_dst = -1; 1892 st_src = st_mop = -1; 1893 TCGV_UNUSED(op_arg); 1894 i = 0; 1895 1896 #define NEXT_INSN \ 1897 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0) 1898 1899 /* 1900 * Expect a load to begin the region. 1901 */ 1902 NEXT_INSN; 1903 switch (ctx->opcode & 0xf00f) { 1904 case 0x6000: /* mov.b @Rm,Rn */ 1905 ld_mop = MO_SB; 1906 break; 1907 case 0x6001: /* mov.w @Rm,Rn */ 1908 ld_mop = MO_TESW; 1909 break; 1910 case 0x6002: /* mov.l @Rm,Rn */ 1911 ld_mop = MO_TESL; 1912 break; 1913 default: 1914 goto fail; 1915 } 1916 ld_adr = B7_4; 1917 ld_dst = B11_8; 1918 if (ld_adr == ld_dst) { 1919 goto fail; 1920 } 1921 /* Unless we see a mov, any two-operand operation must use ld_dst. */ 1922 op_dst = ld_dst; 1923 1924 /* 1925 * Expect an optional register move. 1926 */ 1927 NEXT_INSN; 1928 switch (ctx->opcode & 0xf00f) { 1929 case 0x6003: /* mov Rm,Rn */ 1930 /* Here we want to recognize ld_dst being saved for later consumtion, 1931 or for another input register being copied so that ld_dst need not 1932 be clobbered during the operation. */ 1933 op_dst = B11_8; 1934 mv_src = B7_4; 1935 if (op_dst == ld_dst) { 1936 /* Overwriting the load output. */ 1937 goto fail; 1938 } 1939 if (mv_src != ld_dst) { 1940 /* Copying a new input; constrain op_src to match the load. */ 1941 op_src = ld_dst; 1942 } 1943 break; 1944 1945 default: 1946 /* Put back and re-examine as operation. */ 1947 --i; 1948 } 1949 1950 /* 1951 * Expect the operation. 1952 */ 1953 NEXT_INSN; 1954 switch (ctx->opcode & 0xf00f) { 1955 case 0x300c: /* add Rm,Rn */ 1956 op_opc = INDEX_op_add_i32; 1957 goto do_reg_op; 1958 case 0x2009: /* and Rm,Rn */ 1959 op_opc = INDEX_op_and_i32; 1960 goto do_reg_op; 1961 case 0x200a: /* xor Rm,Rn */ 1962 op_opc = INDEX_op_xor_i32; 1963 goto do_reg_op; 1964 case 0x200b: /* or Rm,Rn */ 1965 op_opc = INDEX_op_or_i32; 1966 do_reg_op: 1967 /* The operation register should be as expected, and the 1968 other input cannot depend on the load. */ 1969 if (op_dst != B11_8) { 1970 goto fail; 1971 } 1972 if (op_src < 0) { 1973 /* Unconstrainted input. */ 1974 op_src = B7_4; 1975 } else if (op_src == B7_4) { 1976 /* Constrained input matched load. All operations are 1977 commutative; "swap" them by "moving" the load output 1978 to the (implicit) first argument and the move source 1979 to the (explicit) second argument. */ 1980 op_src = mv_src; 1981 } else { 1982 goto fail; 1983 } 1984 op_arg = REG(op_src); 1985 break; 1986 1987 case 0x6007: /* not Rm,Rn */ 1988 if (ld_dst != B7_4 || mv_src >= 0) { 1989 goto fail; 1990 } 1991 op_dst = B11_8; 1992 op_opc = INDEX_op_xor_i32; 1993 op_arg = tcg_const_i32(-1); 1994 break; 1995 1996 case 0x7000 ... 0x700f: /* add #imm,Rn */ 1997 if (op_dst != B11_8 || mv_src >= 0) { 1998 goto fail; 1999 } 2000 op_opc = INDEX_op_add_i32; 2001 op_arg = tcg_const_i32(B7_0s); 2002 break; 2003 2004 case 0x3000: /* cmp/eq Rm,Rn */ 2005 /* Looking for the middle of a compare-and-swap sequence, 2006 beginning with the compare. Operands can be either order, 2007 but with only one overlapping the load. */ 2008 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) { 2009 goto fail; 2010 } 2011 op_opc = INDEX_op_setcond_i32; /* placeholder */ 2012 op_src = (ld_dst == B11_8 ? B7_4 : B11_8); 2013 op_arg = REG(op_src); 2014 2015 NEXT_INSN; 2016 switch (ctx->opcode & 0xff00) { 2017 case 0x8b00: /* bf label */ 2018 case 0x8f00: /* bf/s label */ 2019 if (pc + (i + 1 + B7_0s) * 2 != pc_end) { 2020 goto fail; 2021 } 2022 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */ 2023 break; 2024 } 2025 /* We're looking to unconditionally modify Rn with the 2026 result of the comparison, within the delay slot of 2027 the branch. This is used by older gcc. */ 2028 NEXT_INSN; 2029 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */ 2030 mt_dst = B11_8; 2031 } else { 2032 goto fail; 2033 } 2034 break; 2035 2036 default: 2037 goto fail; 2038 } 2039 break; 2040 2041 case 0x2008: /* tst Rm,Rn */ 2042 /* Looking for a compare-and-swap against zero. */ 2043 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) { 2044 goto fail; 2045 } 2046 op_opc = INDEX_op_setcond_i32; 2047 op_arg = tcg_const_i32(0); 2048 2049 NEXT_INSN; 2050 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */ 2051 || pc + (i + 1 + B7_0s) * 2 != pc_end) { 2052 goto fail; 2053 } 2054 break; 2055 2056 default: 2057 /* Put back and re-examine as store. */ 2058 --i; 2059 } 2060 2061 /* 2062 * Expect the store. 2063 */ 2064 /* The store must be the last insn. */ 2065 if (i != max_insns - 1) { 2066 goto fail; 2067 } 2068 NEXT_INSN; 2069 switch (ctx->opcode & 0xf00f) { 2070 case 0x2000: /* mov.b Rm,@Rn */ 2071 st_mop = MO_UB; 2072 break; 2073 case 0x2001: /* mov.w Rm,@Rn */ 2074 st_mop = MO_UW; 2075 break; 2076 case 0x2002: /* mov.l Rm,@Rn */ 2077 st_mop = MO_UL; 2078 break; 2079 default: 2080 goto fail; 2081 } 2082 /* The store must match the load. */ 2083 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) { 2084 goto fail; 2085 } 2086 st_src = B7_4; 2087 2088 #undef NEXT_INSN 2089 2090 /* 2091 * Emit the operation. 2092 */ 2093 tcg_gen_insn_start(pc, ctx->envflags); 2094 switch (op_opc) { 2095 case -1: 2096 /* No operation found. Look for exchange pattern. */ 2097 if (st_src == ld_dst || mv_src >= 0) { 2098 goto fail; 2099 } 2100 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src), 2101 ctx->memidx, ld_mop); 2102 break; 2103 2104 case INDEX_op_add_i32: 2105 if (op_dst != st_src) { 2106 goto fail; 2107 } 2108 if (op_dst == ld_dst && st_mop == MO_UL) { 2109 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr), 2110 op_arg, ctx->memidx, ld_mop); 2111 } else { 2112 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr), 2113 op_arg, ctx->memidx, ld_mop); 2114 if (op_dst != ld_dst) { 2115 /* Note that mop sizes < 4 cannot use add_fetch 2116 because it won't carry into the higher bits. */ 2117 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg); 2118 } 2119 } 2120 break; 2121 2122 case INDEX_op_and_i32: 2123 if (op_dst != st_src) { 2124 goto fail; 2125 } 2126 if (op_dst == ld_dst) { 2127 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr), 2128 op_arg, ctx->memidx, ld_mop); 2129 } else { 2130 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr), 2131 op_arg, ctx->memidx, ld_mop); 2132 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg); 2133 } 2134 break; 2135 2136 case INDEX_op_or_i32: 2137 if (op_dst != st_src) { 2138 goto fail; 2139 } 2140 if (op_dst == ld_dst) { 2141 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr), 2142 op_arg, ctx->memidx, ld_mop); 2143 } else { 2144 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr), 2145 op_arg, ctx->memidx, ld_mop); 2146 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg); 2147 } 2148 break; 2149 2150 case INDEX_op_xor_i32: 2151 if (op_dst != st_src) { 2152 goto fail; 2153 } 2154 if (op_dst == ld_dst) { 2155 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr), 2156 op_arg, ctx->memidx, ld_mop); 2157 } else { 2158 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr), 2159 op_arg, ctx->memidx, ld_mop); 2160 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg); 2161 } 2162 break; 2163 2164 case INDEX_op_setcond_i32: 2165 if (st_src == ld_dst) { 2166 goto fail; 2167 } 2168 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg, 2169 REG(st_src), ctx->memidx, ld_mop); 2170 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg); 2171 if (mt_dst >= 0) { 2172 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t); 2173 } 2174 break; 2175 2176 default: 2177 g_assert_not_reached(); 2178 } 2179 2180 /* If op_src is not a valid register, then op_arg was a constant. */ 2181 if (op_src < 0) { 2182 tcg_temp_free_i32(op_arg); 2183 } 2184 2185 /* The entire region has been translated. */ 2186 ctx->envflags &= ~GUSA_MASK; 2187 ctx->pc = pc_end; 2188 return max_insns; 2189 2190 fail: 2191 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n", 2192 pc, pc_end); 2193 2194 /* Restart with the EXCLUSIVE bit set, within a TB run via 2195 cpu_exec_step_atomic holding the exclusive lock. */ 2196 tcg_gen_insn_start(pc, ctx->envflags); 2197 ctx->envflags |= GUSA_EXCLUSIVE; 2198 gen_save_cpu_state(ctx, false); 2199 gen_helper_exclusive(cpu_env); 2200 ctx->bstate = BS_EXCP; 2201 2202 /* We're not executing an instruction, but we must report one for the 2203 purposes of accounting within the TB. We might as well report the 2204 entire region consumed via ctx->pc so that it's immediately available 2205 in the disassembly dump. */ 2206 ctx->pc = pc_end; 2207 return 1; 2208 } 2209 #endif 2210 2211 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb) 2212 { 2213 SuperHCPU *cpu = sh_env_get_cpu(env); 2214 CPUState *cs = CPU(cpu); 2215 DisasContext ctx; 2216 target_ulong pc_start; 2217 int num_insns; 2218 int max_insns; 2219 2220 pc_start = tb->pc; 2221 ctx.pc = pc_start; 2222 ctx.tbflags = (uint32_t)tb->flags; 2223 ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK; 2224 ctx.bstate = BS_NONE; 2225 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0; 2226 /* We don't know if the delayed pc came from a dynamic or static branch, 2227 so assume it is a dynamic branch. */ 2228 ctx.delayed_pc = -1; /* use delayed pc from env pointer */ 2229 ctx.tb = tb; 2230 ctx.singlestep_enabled = cs->singlestep_enabled; 2231 ctx.features = env->features; 2232 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA); 2233 ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) && 2234 (ctx.tbflags & (1 << SR_RB))) * 0x10; 2235 2236 max_insns = tb->cflags & CF_COUNT_MASK; 2237 if (max_insns == 0) { 2238 max_insns = CF_COUNT_MASK; 2239 } 2240 max_insns = MIN(max_insns, TCG_MAX_INSNS); 2241 2242 /* Since the ISA is fixed-width, we can bound by the number 2243 of instructions remaining on the page. */ 2244 num_insns = -(ctx.pc | TARGET_PAGE_MASK) / 2; 2245 max_insns = MIN(max_insns, num_insns); 2246 2247 /* Single stepping means just that. */ 2248 if (ctx.singlestep_enabled || singlestep) { 2249 max_insns = 1; 2250 } 2251 2252 gen_tb_start(tb); 2253 num_insns = 0; 2254 2255 #ifdef CONFIG_USER_ONLY 2256 if (ctx.tbflags & GUSA_MASK) { 2257 num_insns = decode_gusa(&ctx, env, &max_insns); 2258 } 2259 #endif 2260 2261 while (ctx.bstate == BS_NONE 2262 && num_insns < max_insns 2263 && !tcg_op_buf_full()) { 2264 tcg_gen_insn_start(ctx.pc, ctx.envflags); 2265 num_insns++; 2266 2267 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) { 2268 /* We have hit a breakpoint - make sure PC is up-to-date */ 2269 gen_save_cpu_state(&ctx, true); 2270 gen_helper_debug(cpu_env); 2271 ctx.bstate = BS_EXCP; 2272 /* The address covered by the breakpoint must be included in 2273 [tb->pc, tb->pc + tb->size) in order to for it to be 2274 properly cleared -- thus we increment the PC here so that 2275 the logic setting tb->size below does the right thing. */ 2276 ctx.pc += 2; 2277 break; 2278 } 2279 2280 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { 2281 gen_io_start(); 2282 } 2283 2284 ctx.opcode = cpu_lduw_code(env, ctx.pc); 2285 decode_opc(&ctx); 2286 ctx.pc += 2; 2287 } 2288 if (tb->cflags & CF_LAST_IO) { 2289 gen_io_end(); 2290 } 2291 2292 if (ctx.tbflags & GUSA_EXCLUSIVE) { 2293 /* Ending the region of exclusivity. Clear the bits. */ 2294 ctx.envflags &= ~GUSA_MASK; 2295 } 2296 2297 if (cs->singlestep_enabled) { 2298 gen_save_cpu_state(&ctx, true); 2299 gen_helper_debug(cpu_env); 2300 } else { 2301 switch (ctx.bstate) { 2302 case BS_STOP: 2303 gen_save_cpu_state(&ctx, true); 2304 tcg_gen_exit_tb(0); 2305 break; 2306 case BS_NONE: 2307 gen_save_cpu_state(&ctx, false); 2308 gen_goto_tb(&ctx, 0, ctx.pc); 2309 break; 2310 case BS_EXCP: 2311 /* fall through */ 2312 case BS_BRANCH: 2313 default: 2314 break; 2315 } 2316 } 2317 2318 gen_tb_end(tb, num_insns); 2319 2320 tb->size = ctx.pc - pc_start; 2321 tb->icount = num_insns; 2322 2323 #ifdef DEBUG_DISAS 2324 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 2325 && qemu_log_in_addr_range(pc_start)) { 2326 qemu_log_lock(); 2327 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */ 2328 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0); 2329 qemu_log("\n"); 2330 qemu_log_unlock(); 2331 } 2332 #endif 2333 } 2334 2335 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, 2336 target_ulong *data) 2337 { 2338 env->pc = data[0]; 2339 env->flags = data[1]; 2340 /* Theoretically delayed_pc should also be restored. In practice the 2341 branch instruction is re-executed after exception, so the delayed 2342 branch target will be recomputed. */ 2343 } 2344