1 /* 2 * m68k translation 3 * 4 * Copyright (c) 2005-2007 CodeSourcery 5 * Written by Paul Brook 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "disas/disas.h" 24 #include "exec/exec-all.h" 25 #include "tcg-op.h" 26 #include "qemu/log.h" 27 #include "exec/cpu_ldst.h" 28 #include "exec/translator.h" 29 30 #include "exec/helper-proto.h" 31 #include "exec/helper-gen.h" 32 33 #include "trace-tcg.h" 34 #include "exec/log.h" 35 #include "fpu/softfloat.h" 36 37 38 //#define DEBUG_DISPATCH 1 39 40 #define DEFO32(name, offset) static TCGv QREG_##name; 41 #define DEFO64(name, offset) static TCGv_i64 QREG_##name; 42 #include "qregs.def" 43 #undef DEFO32 44 #undef DEFO64 45 46 static TCGv_i32 cpu_halted; 47 static TCGv_i32 cpu_exception_index; 48 49 static char cpu_reg_names[2 * 8 * 3 + 5 * 4]; 50 static TCGv cpu_dregs[8]; 51 static TCGv cpu_aregs[8]; 52 static TCGv_i64 cpu_macc[4]; 53 54 #define REG(insn, pos) (((insn) >> (pos)) & 7) 55 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)] 56 #define AREG(insn, pos) get_areg(s, REG(insn, pos)) 57 #define MACREG(acc) cpu_macc[acc] 58 #define QREG_SP get_areg(s, 7) 59 60 static TCGv NULL_QREG; 61 #define IS_NULL_QREG(t) (t == NULL_QREG) 62 /* Used to distinguish stores from bad addressing modes. */ 63 static TCGv store_dummy; 64 65 #include "exec/gen-icount.h" 66 67 void m68k_tcg_init(void) 68 { 69 char *p; 70 int i; 71 72 #define DEFO32(name, offset) \ 73 QREG_##name = tcg_global_mem_new_i32(cpu_env, \ 74 offsetof(CPUM68KState, offset), #name); 75 #define DEFO64(name, offset) \ 76 QREG_##name = tcg_global_mem_new_i64(cpu_env, \ 77 offsetof(CPUM68KState, offset), #name); 78 #include "qregs.def" 79 #undef DEFO32 80 #undef DEFO64 81 82 cpu_halted = tcg_global_mem_new_i32(cpu_env, 83 -offsetof(M68kCPU, env) + 84 offsetof(CPUState, halted), "HALTED"); 85 cpu_exception_index = tcg_global_mem_new_i32(cpu_env, 86 -offsetof(M68kCPU, env) + 87 offsetof(CPUState, exception_index), 88 "EXCEPTION"); 89 90 p = cpu_reg_names; 91 for (i = 0; i < 8; i++) { 92 sprintf(p, "D%d", i); 93 cpu_dregs[i] = tcg_global_mem_new(cpu_env, 94 offsetof(CPUM68KState, dregs[i]), p); 95 p += 3; 96 sprintf(p, "A%d", i); 97 cpu_aregs[i] = tcg_global_mem_new(cpu_env, 98 offsetof(CPUM68KState, aregs[i]), p); 99 p += 3; 100 } 101 for (i = 0; i < 4; i++) { 102 sprintf(p, "ACC%d", i); 103 cpu_macc[i] = tcg_global_mem_new_i64(cpu_env, 104 offsetof(CPUM68KState, macc[i]), p); 105 p += 5; 106 } 107 108 NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL"); 109 store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL"); 110 } 111 112 /* internal defines */ 113 typedef struct DisasContext { 114 CPUM68KState *env; 115 target_ulong insn_pc; /* Start of the current instruction. */ 116 target_ulong pc; 117 int is_jmp; 118 CCOp cc_op; /* Current CC operation */ 119 int cc_op_synced; 120 struct TranslationBlock *tb; 121 int singlestep_enabled; 122 TCGv_i64 mactmp; 123 int done_mac; 124 int writeback_mask; 125 TCGv writeback[8]; 126 #define MAX_TO_RELEASE 8 127 int release_count; 128 TCGv release[MAX_TO_RELEASE]; 129 } DisasContext; 130 131 static void init_release_array(DisasContext *s) 132 { 133 #ifdef CONFIG_DEBUG_TCG 134 memset(s->release, 0, sizeof(s->release)); 135 #endif 136 s->release_count = 0; 137 } 138 139 static void do_release(DisasContext *s) 140 { 141 int i; 142 for (i = 0; i < s->release_count; i++) { 143 tcg_temp_free(s->release[i]); 144 } 145 init_release_array(s); 146 } 147 148 static TCGv mark_to_release(DisasContext *s, TCGv tmp) 149 { 150 g_assert(s->release_count < MAX_TO_RELEASE); 151 return s->release[s->release_count++] = tmp; 152 } 153 154 static TCGv get_areg(DisasContext *s, unsigned regno) 155 { 156 if (s->writeback_mask & (1 << regno)) { 157 return s->writeback[regno]; 158 } else { 159 return cpu_aregs[regno]; 160 } 161 } 162 163 static void delay_set_areg(DisasContext *s, unsigned regno, 164 TCGv val, bool give_temp) 165 { 166 if (s->writeback_mask & (1 << regno)) { 167 if (give_temp) { 168 tcg_temp_free(s->writeback[regno]); 169 s->writeback[regno] = val; 170 } else { 171 tcg_gen_mov_i32(s->writeback[regno], val); 172 } 173 } else { 174 s->writeback_mask |= 1 << regno; 175 if (give_temp) { 176 s->writeback[regno] = val; 177 } else { 178 TCGv tmp = tcg_temp_new(); 179 s->writeback[regno] = tmp; 180 tcg_gen_mov_i32(tmp, val); 181 } 182 } 183 } 184 185 static void do_writebacks(DisasContext *s) 186 { 187 unsigned mask = s->writeback_mask; 188 if (mask) { 189 s->writeback_mask = 0; 190 do { 191 unsigned regno = ctz32(mask); 192 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]); 193 tcg_temp_free(s->writeback[regno]); 194 mask &= mask - 1; 195 } while (mask); 196 } 197 } 198 199 /* is_jmp field values */ 200 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 201 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ 202 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */ 203 #define DISAS_JUMP_NEXT DISAS_TARGET_3 204 205 #if defined(CONFIG_USER_ONLY) 206 #define IS_USER(s) 1 207 #else 208 #define IS_USER(s) (!(s->tb->flags & TB_FLAGS_MSR_S)) 209 #define SFC_INDEX(s) ((s->tb->flags & TB_FLAGS_SFC_S) ? \ 210 MMU_KERNEL_IDX : MMU_USER_IDX) 211 #define DFC_INDEX(s) ((s->tb->flags & TB_FLAGS_DFC_S) ? \ 212 MMU_KERNEL_IDX : MMU_USER_IDX) 213 #endif 214 215 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn); 216 217 #ifdef DEBUG_DISPATCH 218 #define DISAS_INSN(name) \ 219 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ 220 uint16_t insn); \ 221 static void disas_##name(CPUM68KState *env, DisasContext *s, \ 222 uint16_t insn) \ 223 { \ 224 qemu_log("Dispatch " #name "\n"); \ 225 real_disas_##name(env, s, insn); \ 226 } \ 227 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ 228 uint16_t insn) 229 #else 230 #define DISAS_INSN(name) \ 231 static void disas_##name(CPUM68KState *env, DisasContext *s, \ 232 uint16_t insn) 233 #endif 234 235 static const uint8_t cc_op_live[CC_OP_NB] = { 236 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X, 237 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X, 238 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V, 239 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V, 240 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V, 241 [CC_OP_LOGIC] = CCF_X | CCF_N 242 }; 243 244 static void set_cc_op(DisasContext *s, CCOp op) 245 { 246 CCOp old_op = s->cc_op; 247 int dead; 248 249 if (old_op == op) { 250 return; 251 } 252 s->cc_op = op; 253 s->cc_op_synced = 0; 254 255 /* Discard CC computation that will no longer be used. 256 Note that X and N are never dead. */ 257 dead = cc_op_live[old_op] & ~cc_op_live[op]; 258 if (dead & CCF_C) { 259 tcg_gen_discard_i32(QREG_CC_C); 260 } 261 if (dead & CCF_Z) { 262 tcg_gen_discard_i32(QREG_CC_Z); 263 } 264 if (dead & CCF_V) { 265 tcg_gen_discard_i32(QREG_CC_V); 266 } 267 } 268 269 /* Update the CPU env CC_OP state. */ 270 static void update_cc_op(DisasContext *s) 271 { 272 if (!s->cc_op_synced) { 273 s->cc_op_synced = 1; 274 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op); 275 } 276 } 277 278 /* Generate a jump to an immediate address. */ 279 static void gen_jmp_im(DisasContext *s, uint32_t dest) 280 { 281 update_cc_op(s); 282 tcg_gen_movi_i32(QREG_PC, dest); 283 s->is_jmp = DISAS_JUMP; 284 } 285 286 /* Generate a jump to the address in qreg DEST. */ 287 static void gen_jmp(DisasContext *s, TCGv dest) 288 { 289 update_cc_op(s); 290 tcg_gen_mov_i32(QREG_PC, dest); 291 s->is_jmp = DISAS_JUMP; 292 } 293 294 static void gen_raise_exception(int nr) 295 { 296 TCGv_i32 tmp = tcg_const_i32(nr); 297 298 gen_helper_raise_exception(cpu_env, tmp); 299 tcg_temp_free_i32(tmp); 300 } 301 302 static void gen_exception(DisasContext *s, uint32_t where, int nr) 303 { 304 gen_jmp_im(s, where); 305 gen_raise_exception(nr); 306 } 307 308 static inline void gen_addr_fault(DisasContext *s) 309 { 310 gen_exception(s, s->insn_pc, EXCP_ADDRESS); 311 } 312 313 /* Generate a load from the specified address. Narrow values are 314 sign extended to full register width. */ 315 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr, 316 int sign, int index) 317 { 318 TCGv tmp; 319 tmp = tcg_temp_new_i32(); 320 switch(opsize) { 321 case OS_BYTE: 322 if (sign) 323 tcg_gen_qemu_ld8s(tmp, addr, index); 324 else 325 tcg_gen_qemu_ld8u(tmp, addr, index); 326 break; 327 case OS_WORD: 328 if (sign) 329 tcg_gen_qemu_ld16s(tmp, addr, index); 330 else 331 tcg_gen_qemu_ld16u(tmp, addr, index); 332 break; 333 case OS_LONG: 334 tcg_gen_qemu_ld32u(tmp, addr, index); 335 break; 336 default: 337 g_assert_not_reached(); 338 } 339 return tmp; 340 } 341 342 /* Generate a store. */ 343 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val, 344 int index) 345 { 346 switch(opsize) { 347 case OS_BYTE: 348 tcg_gen_qemu_st8(val, addr, index); 349 break; 350 case OS_WORD: 351 tcg_gen_qemu_st16(val, addr, index); 352 break; 353 case OS_LONG: 354 tcg_gen_qemu_st32(val, addr, index); 355 break; 356 default: 357 g_assert_not_reached(); 358 } 359 } 360 361 typedef enum { 362 EA_STORE, 363 EA_LOADU, 364 EA_LOADS 365 } ea_what; 366 367 /* Generate an unsigned load if VAL is 0 a signed load if val is -1, 368 otherwise generate a store. */ 369 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val, 370 ea_what what, int index) 371 { 372 if (what == EA_STORE) { 373 gen_store(s, opsize, addr, val, index); 374 return store_dummy; 375 } else { 376 return mark_to_release(s, gen_load(s, opsize, addr, 377 what == EA_LOADS, index)); 378 } 379 } 380 381 /* Read a 16-bit immediate constant */ 382 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s) 383 { 384 uint16_t im; 385 im = cpu_lduw_code(env, s->pc); 386 s->pc += 2; 387 return im; 388 } 389 390 /* Read an 8-bit immediate constant */ 391 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s) 392 { 393 return read_im16(env, s); 394 } 395 396 /* Read a 32-bit immediate constant. */ 397 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s) 398 { 399 uint32_t im; 400 im = read_im16(env, s) << 16; 401 im |= 0xffff & read_im16(env, s); 402 return im; 403 } 404 405 /* Read a 64-bit immediate constant. */ 406 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s) 407 { 408 uint64_t im; 409 im = (uint64_t)read_im32(env, s) << 32; 410 im |= (uint64_t)read_im32(env, s); 411 return im; 412 } 413 414 /* Calculate and address index. */ 415 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp) 416 { 417 TCGv add; 418 int scale; 419 420 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12); 421 if ((ext & 0x800) == 0) { 422 tcg_gen_ext16s_i32(tmp, add); 423 add = tmp; 424 } 425 scale = (ext >> 9) & 3; 426 if (scale != 0) { 427 tcg_gen_shli_i32(tmp, add, scale); 428 add = tmp; 429 } 430 return add; 431 } 432 433 /* Handle a base + index + displacement effective addresss. 434 A NULL_QREG base means pc-relative. */ 435 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base) 436 { 437 uint32_t offset; 438 uint16_t ext; 439 TCGv add; 440 TCGv tmp; 441 uint32_t bd, od; 442 443 offset = s->pc; 444 ext = read_im16(env, s); 445 446 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX)) 447 return NULL_QREG; 448 449 if (m68k_feature(s->env, M68K_FEATURE_M68000) && 450 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) { 451 ext &= ~(3 << 9); 452 } 453 454 if (ext & 0x100) { 455 /* full extension word format */ 456 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) 457 return NULL_QREG; 458 459 if ((ext & 0x30) > 0x10) { 460 /* base displacement */ 461 if ((ext & 0x30) == 0x20) { 462 bd = (int16_t)read_im16(env, s); 463 } else { 464 bd = read_im32(env, s); 465 } 466 } else { 467 bd = 0; 468 } 469 tmp = mark_to_release(s, tcg_temp_new()); 470 if ((ext & 0x44) == 0) { 471 /* pre-index */ 472 add = gen_addr_index(s, ext, tmp); 473 } else { 474 add = NULL_QREG; 475 } 476 if ((ext & 0x80) == 0) { 477 /* base not suppressed */ 478 if (IS_NULL_QREG(base)) { 479 base = mark_to_release(s, tcg_const_i32(offset + bd)); 480 bd = 0; 481 } 482 if (!IS_NULL_QREG(add)) { 483 tcg_gen_add_i32(tmp, add, base); 484 add = tmp; 485 } else { 486 add = base; 487 } 488 } 489 if (!IS_NULL_QREG(add)) { 490 if (bd != 0) { 491 tcg_gen_addi_i32(tmp, add, bd); 492 add = tmp; 493 } 494 } else { 495 add = mark_to_release(s, tcg_const_i32(bd)); 496 } 497 if ((ext & 3) != 0) { 498 /* memory indirect */ 499 base = mark_to_release(s, gen_load(s, OS_LONG, add, 0, IS_USER(s))); 500 if ((ext & 0x44) == 4) { 501 add = gen_addr_index(s, ext, tmp); 502 tcg_gen_add_i32(tmp, add, base); 503 add = tmp; 504 } else { 505 add = base; 506 } 507 if ((ext & 3) > 1) { 508 /* outer displacement */ 509 if ((ext & 3) == 2) { 510 od = (int16_t)read_im16(env, s); 511 } else { 512 od = read_im32(env, s); 513 } 514 } else { 515 od = 0; 516 } 517 if (od != 0) { 518 tcg_gen_addi_i32(tmp, add, od); 519 add = tmp; 520 } 521 } 522 } else { 523 /* brief extension word format */ 524 tmp = mark_to_release(s, tcg_temp_new()); 525 add = gen_addr_index(s, ext, tmp); 526 if (!IS_NULL_QREG(base)) { 527 tcg_gen_add_i32(tmp, add, base); 528 if ((int8_t)ext) 529 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext); 530 } else { 531 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext); 532 } 533 add = tmp; 534 } 535 return add; 536 } 537 538 /* Sign or zero extend a value. */ 539 540 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign) 541 { 542 switch (opsize) { 543 case OS_BYTE: 544 if (sign) { 545 tcg_gen_ext8s_i32(res, val); 546 } else { 547 tcg_gen_ext8u_i32(res, val); 548 } 549 break; 550 case OS_WORD: 551 if (sign) { 552 tcg_gen_ext16s_i32(res, val); 553 } else { 554 tcg_gen_ext16u_i32(res, val); 555 } 556 break; 557 case OS_LONG: 558 tcg_gen_mov_i32(res, val); 559 break; 560 default: 561 g_assert_not_reached(); 562 } 563 } 564 565 /* Evaluate all the CC flags. */ 566 567 static void gen_flush_flags(DisasContext *s) 568 { 569 TCGv t0, t1; 570 571 switch (s->cc_op) { 572 case CC_OP_FLAGS: 573 return; 574 575 case CC_OP_ADDB: 576 case CC_OP_ADDW: 577 case CC_OP_ADDL: 578 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X); 579 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); 580 /* Compute signed overflow for addition. */ 581 t0 = tcg_temp_new(); 582 t1 = tcg_temp_new(); 583 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V); 584 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1); 585 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V); 586 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0); 587 tcg_temp_free(t0); 588 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V); 589 tcg_temp_free(t1); 590 break; 591 592 case CC_OP_SUBB: 593 case CC_OP_SUBW: 594 case CC_OP_SUBL: 595 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X); 596 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); 597 /* Compute signed overflow for subtraction. */ 598 t0 = tcg_temp_new(); 599 t1 = tcg_temp_new(); 600 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V); 601 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1); 602 tcg_gen_xor_i32(t1, QREG_CC_N, t0); 603 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0); 604 tcg_temp_free(t0); 605 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1); 606 tcg_temp_free(t1); 607 break; 608 609 case CC_OP_CMPB: 610 case CC_OP_CMPW: 611 case CC_OP_CMPL: 612 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V); 613 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V); 614 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1); 615 /* Compute signed overflow for subtraction. */ 616 t0 = tcg_temp_new(); 617 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N); 618 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N); 619 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0); 620 tcg_temp_free(t0); 621 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z); 622 break; 623 624 case CC_OP_LOGIC: 625 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); 626 tcg_gen_movi_i32(QREG_CC_C, 0); 627 tcg_gen_movi_i32(QREG_CC_V, 0); 628 break; 629 630 case CC_OP_DYNAMIC: 631 gen_helper_flush_flags(cpu_env, QREG_CC_OP); 632 s->cc_op_synced = 1; 633 break; 634 635 default: 636 t0 = tcg_const_i32(s->cc_op); 637 gen_helper_flush_flags(cpu_env, t0); 638 tcg_temp_free(t0); 639 s->cc_op_synced = 1; 640 break; 641 } 642 643 /* Note that flush_flags also assigned to env->cc_op. */ 644 s->cc_op = CC_OP_FLAGS; 645 } 646 647 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign) 648 { 649 TCGv tmp; 650 651 if (opsize == OS_LONG) { 652 tmp = val; 653 } else { 654 tmp = mark_to_release(s, tcg_temp_new()); 655 gen_ext(tmp, val, opsize, sign); 656 } 657 658 return tmp; 659 } 660 661 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize) 662 { 663 gen_ext(QREG_CC_N, val, opsize, 1); 664 set_cc_op(s, CC_OP_LOGIC); 665 } 666 667 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize) 668 { 669 tcg_gen_mov_i32(QREG_CC_N, dest); 670 tcg_gen_mov_i32(QREG_CC_V, src); 671 set_cc_op(s, CC_OP_CMPB + opsize); 672 } 673 674 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize) 675 { 676 gen_ext(QREG_CC_N, dest, opsize, 1); 677 tcg_gen_mov_i32(QREG_CC_V, src); 678 } 679 680 static inline int opsize_bytes(int opsize) 681 { 682 switch (opsize) { 683 case OS_BYTE: return 1; 684 case OS_WORD: return 2; 685 case OS_LONG: return 4; 686 case OS_SINGLE: return 4; 687 case OS_DOUBLE: return 8; 688 case OS_EXTENDED: return 12; 689 case OS_PACKED: return 12; 690 default: 691 g_assert_not_reached(); 692 } 693 } 694 695 static inline int insn_opsize(int insn) 696 { 697 switch ((insn >> 6) & 3) { 698 case 0: return OS_BYTE; 699 case 1: return OS_WORD; 700 case 2: return OS_LONG; 701 default: 702 g_assert_not_reached(); 703 } 704 } 705 706 static inline int ext_opsize(int ext, int pos) 707 { 708 switch ((ext >> pos) & 7) { 709 case 0: return OS_LONG; 710 case 1: return OS_SINGLE; 711 case 2: return OS_EXTENDED; 712 case 3: return OS_PACKED; 713 case 4: return OS_WORD; 714 case 5: return OS_DOUBLE; 715 case 6: return OS_BYTE; 716 default: 717 g_assert_not_reached(); 718 } 719 } 720 721 /* Assign value to a register. If the width is less than the register width 722 only the low part of the register is set. */ 723 static void gen_partset_reg(int opsize, TCGv reg, TCGv val) 724 { 725 TCGv tmp; 726 switch (opsize) { 727 case OS_BYTE: 728 tcg_gen_andi_i32(reg, reg, 0xffffff00); 729 tmp = tcg_temp_new(); 730 tcg_gen_ext8u_i32(tmp, val); 731 tcg_gen_or_i32(reg, reg, tmp); 732 tcg_temp_free(tmp); 733 break; 734 case OS_WORD: 735 tcg_gen_andi_i32(reg, reg, 0xffff0000); 736 tmp = tcg_temp_new(); 737 tcg_gen_ext16u_i32(tmp, val); 738 tcg_gen_or_i32(reg, reg, tmp); 739 tcg_temp_free(tmp); 740 break; 741 case OS_LONG: 742 case OS_SINGLE: 743 tcg_gen_mov_i32(reg, val); 744 break; 745 default: 746 g_assert_not_reached(); 747 } 748 } 749 750 /* Generate code for an "effective address". Does not adjust the base 751 register for autoincrement addressing modes. */ 752 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s, 753 int mode, int reg0, int opsize) 754 { 755 TCGv reg; 756 TCGv tmp; 757 uint16_t ext; 758 uint32_t offset; 759 760 switch (mode) { 761 case 0: /* Data register direct. */ 762 case 1: /* Address register direct. */ 763 return NULL_QREG; 764 case 3: /* Indirect postincrement. */ 765 if (opsize == OS_UNSIZED) { 766 return NULL_QREG; 767 } 768 /* fallthru */ 769 case 2: /* Indirect register */ 770 return get_areg(s, reg0); 771 case 4: /* Indirect predecrememnt. */ 772 if (opsize == OS_UNSIZED) { 773 return NULL_QREG; 774 } 775 reg = get_areg(s, reg0); 776 tmp = mark_to_release(s, tcg_temp_new()); 777 if (reg0 == 7 && opsize == OS_BYTE && 778 m68k_feature(s->env, M68K_FEATURE_M68000)) { 779 tcg_gen_subi_i32(tmp, reg, 2); 780 } else { 781 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize)); 782 } 783 return tmp; 784 case 5: /* Indirect displacement. */ 785 reg = get_areg(s, reg0); 786 tmp = mark_to_release(s, tcg_temp_new()); 787 ext = read_im16(env, s); 788 tcg_gen_addi_i32(tmp, reg, (int16_t)ext); 789 return tmp; 790 case 6: /* Indirect index + displacement. */ 791 reg = get_areg(s, reg0); 792 return gen_lea_indexed(env, s, reg); 793 case 7: /* Other */ 794 switch (reg0) { 795 case 0: /* Absolute short. */ 796 offset = (int16_t)read_im16(env, s); 797 return mark_to_release(s, tcg_const_i32(offset)); 798 case 1: /* Absolute long. */ 799 offset = read_im32(env, s); 800 return mark_to_release(s, tcg_const_i32(offset)); 801 case 2: /* pc displacement */ 802 offset = s->pc; 803 offset += (int16_t)read_im16(env, s); 804 return mark_to_release(s, tcg_const_i32(offset)); 805 case 3: /* pc index+displacement. */ 806 return gen_lea_indexed(env, s, NULL_QREG); 807 case 4: /* Immediate. */ 808 default: 809 return NULL_QREG; 810 } 811 } 812 /* Should never happen. */ 813 return NULL_QREG; 814 } 815 816 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn, 817 int opsize) 818 { 819 int mode = extract32(insn, 3, 3); 820 int reg0 = REG(insn, 0); 821 return gen_lea_mode(env, s, mode, reg0, opsize); 822 } 823 824 /* Generate code to load/store a value from/into an EA. If WHAT > 0 this is 825 a write otherwise it is a read (0 == sign extend, -1 == zero extend). 826 ADDRP is non-null for readwrite operands. */ 827 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0, 828 int opsize, TCGv val, TCGv *addrp, ea_what what, 829 int index) 830 { 831 TCGv reg, tmp, result; 832 int32_t offset; 833 834 switch (mode) { 835 case 0: /* Data register direct. */ 836 reg = cpu_dregs[reg0]; 837 if (what == EA_STORE) { 838 gen_partset_reg(opsize, reg, val); 839 return store_dummy; 840 } else { 841 return gen_extend(s, reg, opsize, what == EA_LOADS); 842 } 843 case 1: /* Address register direct. */ 844 reg = get_areg(s, reg0); 845 if (what == EA_STORE) { 846 tcg_gen_mov_i32(reg, val); 847 return store_dummy; 848 } else { 849 return gen_extend(s, reg, opsize, what == EA_LOADS); 850 } 851 case 2: /* Indirect register */ 852 reg = get_areg(s, reg0); 853 return gen_ldst(s, opsize, reg, val, what, index); 854 case 3: /* Indirect postincrement. */ 855 reg = get_areg(s, reg0); 856 result = gen_ldst(s, opsize, reg, val, what, index); 857 if (what == EA_STORE || !addrp) { 858 TCGv tmp = tcg_temp_new(); 859 if (reg0 == 7 && opsize == OS_BYTE && 860 m68k_feature(s->env, M68K_FEATURE_M68000)) { 861 tcg_gen_addi_i32(tmp, reg, 2); 862 } else { 863 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize)); 864 } 865 delay_set_areg(s, reg0, tmp, true); 866 } 867 return result; 868 case 4: /* Indirect predecrememnt. */ 869 if (addrp && what == EA_STORE) { 870 tmp = *addrp; 871 } else { 872 tmp = gen_lea_mode(env, s, mode, reg0, opsize); 873 if (IS_NULL_QREG(tmp)) { 874 return tmp; 875 } 876 if (addrp) { 877 *addrp = tmp; 878 } 879 } 880 result = gen_ldst(s, opsize, tmp, val, what, index); 881 if (what == EA_STORE || !addrp) { 882 delay_set_areg(s, reg0, tmp, false); 883 } 884 return result; 885 case 5: /* Indirect displacement. */ 886 case 6: /* Indirect index + displacement. */ 887 do_indirect: 888 if (addrp && what == EA_STORE) { 889 tmp = *addrp; 890 } else { 891 tmp = gen_lea_mode(env, s, mode, reg0, opsize); 892 if (IS_NULL_QREG(tmp)) { 893 return tmp; 894 } 895 if (addrp) { 896 *addrp = tmp; 897 } 898 } 899 return gen_ldst(s, opsize, tmp, val, what, index); 900 case 7: /* Other */ 901 switch (reg0) { 902 case 0: /* Absolute short. */ 903 case 1: /* Absolute long. */ 904 case 2: /* pc displacement */ 905 case 3: /* pc index+displacement. */ 906 goto do_indirect; 907 case 4: /* Immediate. */ 908 /* Sign extend values for consistency. */ 909 switch (opsize) { 910 case OS_BYTE: 911 if (what == EA_LOADS) { 912 offset = (int8_t)read_im8(env, s); 913 } else { 914 offset = read_im8(env, s); 915 } 916 break; 917 case OS_WORD: 918 if (what == EA_LOADS) { 919 offset = (int16_t)read_im16(env, s); 920 } else { 921 offset = read_im16(env, s); 922 } 923 break; 924 case OS_LONG: 925 offset = read_im32(env, s); 926 break; 927 default: 928 g_assert_not_reached(); 929 } 930 return mark_to_release(s, tcg_const_i32(offset)); 931 default: 932 return NULL_QREG; 933 } 934 } 935 /* Should never happen. */ 936 return NULL_QREG; 937 } 938 939 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn, 940 int opsize, TCGv val, TCGv *addrp, ea_what what, int index) 941 { 942 int mode = extract32(insn, 3, 3); 943 int reg0 = REG(insn, 0); 944 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index); 945 } 946 947 static TCGv_ptr gen_fp_ptr(int freg) 948 { 949 TCGv_ptr fp = tcg_temp_new_ptr(); 950 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fregs[freg])); 951 return fp; 952 } 953 954 static TCGv_ptr gen_fp_result_ptr(void) 955 { 956 TCGv_ptr fp = tcg_temp_new_ptr(); 957 tcg_gen_addi_ptr(fp, cpu_env, offsetof(CPUM68KState, fp_result)); 958 return fp; 959 } 960 961 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src) 962 { 963 TCGv t32; 964 TCGv_i64 t64; 965 966 t32 = tcg_temp_new(); 967 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper)); 968 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper)); 969 tcg_temp_free(t32); 970 971 t64 = tcg_temp_new_i64(); 972 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower)); 973 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower)); 974 tcg_temp_free_i64(t64); 975 } 976 977 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, 978 int index) 979 { 980 TCGv tmp; 981 TCGv_i64 t64; 982 983 t64 = tcg_temp_new_i64(); 984 tmp = tcg_temp_new(); 985 switch (opsize) { 986 case OS_BYTE: 987 tcg_gen_qemu_ld8s(tmp, addr, index); 988 gen_helper_exts32(cpu_env, fp, tmp); 989 break; 990 case OS_WORD: 991 tcg_gen_qemu_ld16s(tmp, addr, index); 992 gen_helper_exts32(cpu_env, fp, tmp); 993 break; 994 case OS_LONG: 995 tcg_gen_qemu_ld32u(tmp, addr, index); 996 gen_helper_exts32(cpu_env, fp, tmp); 997 break; 998 case OS_SINGLE: 999 tcg_gen_qemu_ld32u(tmp, addr, index); 1000 gen_helper_extf32(cpu_env, fp, tmp); 1001 break; 1002 case OS_DOUBLE: 1003 tcg_gen_qemu_ld64(t64, addr, index); 1004 gen_helper_extf64(cpu_env, fp, t64); 1005 break; 1006 case OS_EXTENDED: 1007 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { 1008 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP); 1009 break; 1010 } 1011 tcg_gen_qemu_ld32u(tmp, addr, index); 1012 tcg_gen_shri_i32(tmp, tmp, 16); 1013 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper)); 1014 tcg_gen_addi_i32(tmp, addr, 4); 1015 tcg_gen_qemu_ld64(t64, tmp, index); 1016 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower)); 1017 break; 1018 case OS_PACKED: 1019 /* unimplemented data type on 68040/ColdFire 1020 * FIXME if needed for another FPU 1021 */ 1022 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP); 1023 break; 1024 default: 1025 g_assert_not_reached(); 1026 } 1027 tcg_temp_free(tmp); 1028 tcg_temp_free_i64(t64); 1029 } 1030 1031 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp, 1032 int index) 1033 { 1034 TCGv tmp; 1035 TCGv_i64 t64; 1036 1037 t64 = tcg_temp_new_i64(); 1038 tmp = tcg_temp_new(); 1039 switch (opsize) { 1040 case OS_BYTE: 1041 gen_helper_reds32(tmp, cpu_env, fp); 1042 tcg_gen_qemu_st8(tmp, addr, index); 1043 break; 1044 case OS_WORD: 1045 gen_helper_reds32(tmp, cpu_env, fp); 1046 tcg_gen_qemu_st16(tmp, addr, index); 1047 break; 1048 case OS_LONG: 1049 gen_helper_reds32(tmp, cpu_env, fp); 1050 tcg_gen_qemu_st32(tmp, addr, index); 1051 break; 1052 case OS_SINGLE: 1053 gen_helper_redf32(tmp, cpu_env, fp); 1054 tcg_gen_qemu_st32(tmp, addr, index); 1055 break; 1056 case OS_DOUBLE: 1057 gen_helper_redf64(t64, cpu_env, fp); 1058 tcg_gen_qemu_st64(t64, addr, index); 1059 break; 1060 case OS_EXTENDED: 1061 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { 1062 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP); 1063 break; 1064 } 1065 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper)); 1066 tcg_gen_shli_i32(tmp, tmp, 16); 1067 tcg_gen_qemu_st32(tmp, addr, index); 1068 tcg_gen_addi_i32(tmp, addr, 4); 1069 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower)); 1070 tcg_gen_qemu_st64(t64, tmp, index); 1071 break; 1072 case OS_PACKED: 1073 /* unimplemented data type on 68040/ColdFire 1074 * FIXME if needed for another FPU 1075 */ 1076 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP); 1077 break; 1078 default: 1079 g_assert_not_reached(); 1080 } 1081 tcg_temp_free(tmp); 1082 tcg_temp_free_i64(t64); 1083 } 1084 1085 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr, 1086 TCGv_ptr fp, ea_what what, int index) 1087 { 1088 if (what == EA_STORE) { 1089 gen_store_fp(s, opsize, addr, fp, index); 1090 } else { 1091 gen_load_fp(s, opsize, addr, fp, index); 1092 } 1093 } 1094 1095 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode, 1096 int reg0, int opsize, TCGv_ptr fp, ea_what what, 1097 int index) 1098 { 1099 TCGv reg, addr, tmp; 1100 TCGv_i64 t64; 1101 1102 switch (mode) { 1103 case 0: /* Data register direct. */ 1104 reg = cpu_dregs[reg0]; 1105 if (what == EA_STORE) { 1106 switch (opsize) { 1107 case OS_BYTE: 1108 case OS_WORD: 1109 case OS_LONG: 1110 gen_helper_reds32(reg, cpu_env, fp); 1111 break; 1112 case OS_SINGLE: 1113 gen_helper_redf32(reg, cpu_env, fp); 1114 break; 1115 default: 1116 g_assert_not_reached(); 1117 } 1118 } else { 1119 tmp = tcg_temp_new(); 1120 switch (opsize) { 1121 case OS_BYTE: 1122 tcg_gen_ext8s_i32(tmp, reg); 1123 gen_helper_exts32(cpu_env, fp, tmp); 1124 break; 1125 case OS_WORD: 1126 tcg_gen_ext16s_i32(tmp, reg); 1127 gen_helper_exts32(cpu_env, fp, tmp); 1128 break; 1129 case OS_LONG: 1130 gen_helper_exts32(cpu_env, fp, reg); 1131 break; 1132 case OS_SINGLE: 1133 gen_helper_extf32(cpu_env, fp, reg); 1134 break; 1135 default: 1136 g_assert_not_reached(); 1137 } 1138 tcg_temp_free(tmp); 1139 } 1140 return 0; 1141 case 1: /* Address register direct. */ 1142 return -1; 1143 case 2: /* Indirect register */ 1144 addr = get_areg(s, reg0); 1145 gen_ldst_fp(s, opsize, addr, fp, what, index); 1146 return 0; 1147 case 3: /* Indirect postincrement. */ 1148 addr = cpu_aregs[reg0]; 1149 gen_ldst_fp(s, opsize, addr, fp, what, index); 1150 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize)); 1151 return 0; 1152 case 4: /* Indirect predecrememnt. */ 1153 addr = gen_lea_mode(env, s, mode, reg0, opsize); 1154 if (IS_NULL_QREG(addr)) { 1155 return -1; 1156 } 1157 gen_ldst_fp(s, opsize, addr, fp, what, index); 1158 tcg_gen_mov_i32(cpu_aregs[reg0], addr); 1159 return 0; 1160 case 5: /* Indirect displacement. */ 1161 case 6: /* Indirect index + displacement. */ 1162 do_indirect: 1163 addr = gen_lea_mode(env, s, mode, reg0, opsize); 1164 if (IS_NULL_QREG(addr)) { 1165 return -1; 1166 } 1167 gen_ldst_fp(s, opsize, addr, fp, what, index); 1168 return 0; 1169 case 7: /* Other */ 1170 switch (reg0) { 1171 case 0: /* Absolute short. */ 1172 case 1: /* Absolute long. */ 1173 case 2: /* pc displacement */ 1174 case 3: /* pc index+displacement. */ 1175 goto do_indirect; 1176 case 4: /* Immediate. */ 1177 if (what == EA_STORE) { 1178 return -1; 1179 } 1180 switch (opsize) { 1181 case OS_BYTE: 1182 tmp = tcg_const_i32((int8_t)read_im8(env, s)); 1183 gen_helper_exts32(cpu_env, fp, tmp); 1184 tcg_temp_free(tmp); 1185 break; 1186 case OS_WORD: 1187 tmp = tcg_const_i32((int16_t)read_im16(env, s)); 1188 gen_helper_exts32(cpu_env, fp, tmp); 1189 tcg_temp_free(tmp); 1190 break; 1191 case OS_LONG: 1192 tmp = tcg_const_i32(read_im32(env, s)); 1193 gen_helper_exts32(cpu_env, fp, tmp); 1194 tcg_temp_free(tmp); 1195 break; 1196 case OS_SINGLE: 1197 tmp = tcg_const_i32(read_im32(env, s)); 1198 gen_helper_extf32(cpu_env, fp, tmp); 1199 tcg_temp_free(tmp); 1200 break; 1201 case OS_DOUBLE: 1202 t64 = tcg_const_i64(read_im64(env, s)); 1203 gen_helper_extf64(cpu_env, fp, t64); 1204 tcg_temp_free_i64(t64); 1205 break; 1206 case OS_EXTENDED: 1207 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) { 1208 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP); 1209 break; 1210 } 1211 tmp = tcg_const_i32(read_im32(env, s) >> 16); 1212 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper)); 1213 tcg_temp_free(tmp); 1214 t64 = tcg_const_i64(read_im64(env, s)); 1215 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower)); 1216 tcg_temp_free_i64(t64); 1217 break; 1218 case OS_PACKED: 1219 /* unimplemented data type on 68040/ColdFire 1220 * FIXME if needed for another FPU 1221 */ 1222 gen_exception(s, s->insn_pc, EXCP_FP_UNIMP); 1223 break; 1224 default: 1225 g_assert_not_reached(); 1226 } 1227 return 0; 1228 default: 1229 return -1; 1230 } 1231 } 1232 return -1; 1233 } 1234 1235 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn, 1236 int opsize, TCGv_ptr fp, ea_what what, int index) 1237 { 1238 int mode = extract32(insn, 3, 3); 1239 int reg0 = REG(insn, 0); 1240 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index); 1241 } 1242 1243 typedef struct { 1244 TCGCond tcond; 1245 bool g1; 1246 bool g2; 1247 TCGv v1; 1248 TCGv v2; 1249 } DisasCompare; 1250 1251 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond) 1252 { 1253 TCGv tmp, tmp2; 1254 TCGCond tcond; 1255 CCOp op = s->cc_op; 1256 1257 /* The CC_OP_CMP form can handle most normal comparisons directly. */ 1258 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) { 1259 c->g1 = c->g2 = 1; 1260 c->v1 = QREG_CC_N; 1261 c->v2 = QREG_CC_V; 1262 switch (cond) { 1263 case 2: /* HI */ 1264 case 3: /* LS */ 1265 tcond = TCG_COND_LEU; 1266 goto done; 1267 case 4: /* CC */ 1268 case 5: /* CS */ 1269 tcond = TCG_COND_LTU; 1270 goto done; 1271 case 6: /* NE */ 1272 case 7: /* EQ */ 1273 tcond = TCG_COND_EQ; 1274 goto done; 1275 case 10: /* PL */ 1276 case 11: /* MI */ 1277 c->g1 = c->g2 = 0; 1278 c->v2 = tcg_const_i32(0); 1279 c->v1 = tmp = tcg_temp_new(); 1280 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V); 1281 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1); 1282 /* fallthru */ 1283 case 12: /* GE */ 1284 case 13: /* LT */ 1285 tcond = TCG_COND_LT; 1286 goto done; 1287 case 14: /* GT */ 1288 case 15: /* LE */ 1289 tcond = TCG_COND_LE; 1290 goto done; 1291 } 1292 } 1293 1294 c->g1 = 1; 1295 c->g2 = 0; 1296 c->v2 = tcg_const_i32(0); 1297 1298 switch (cond) { 1299 case 0: /* T */ 1300 case 1: /* F */ 1301 c->v1 = c->v2; 1302 tcond = TCG_COND_NEVER; 1303 goto done; 1304 case 14: /* GT (!(Z || (N ^ V))) */ 1305 case 15: /* LE (Z || (N ^ V)) */ 1306 /* Logic operations clear V, which simplifies LE to (Z || N), 1307 and since Z and N are co-located, this becomes a normal 1308 comparison vs N. */ 1309 if (op == CC_OP_LOGIC) { 1310 c->v1 = QREG_CC_N; 1311 tcond = TCG_COND_LE; 1312 goto done; 1313 } 1314 break; 1315 case 12: /* GE (!(N ^ V)) */ 1316 case 13: /* LT (N ^ V) */ 1317 /* Logic operations clear V, which simplifies this to N. */ 1318 if (op != CC_OP_LOGIC) { 1319 break; 1320 } 1321 /* fallthru */ 1322 case 10: /* PL (!N) */ 1323 case 11: /* MI (N) */ 1324 /* Several cases represent N normally. */ 1325 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || 1326 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL || 1327 op == CC_OP_LOGIC) { 1328 c->v1 = QREG_CC_N; 1329 tcond = TCG_COND_LT; 1330 goto done; 1331 } 1332 break; 1333 case 6: /* NE (!Z) */ 1334 case 7: /* EQ (Z) */ 1335 /* Some cases fold Z into N. */ 1336 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || 1337 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL || 1338 op == CC_OP_LOGIC) { 1339 tcond = TCG_COND_EQ; 1340 c->v1 = QREG_CC_N; 1341 goto done; 1342 } 1343 break; 1344 case 4: /* CC (!C) */ 1345 case 5: /* CS (C) */ 1346 /* Some cases fold C into X. */ 1347 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL || 1348 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) { 1349 tcond = TCG_COND_NE; 1350 c->v1 = QREG_CC_X; 1351 goto done; 1352 } 1353 /* fallthru */ 1354 case 8: /* VC (!V) */ 1355 case 9: /* VS (V) */ 1356 /* Logic operations clear V and C. */ 1357 if (op == CC_OP_LOGIC) { 1358 tcond = TCG_COND_NEVER; 1359 c->v1 = c->v2; 1360 goto done; 1361 } 1362 break; 1363 } 1364 1365 /* Otherwise, flush flag state to CC_OP_FLAGS. */ 1366 gen_flush_flags(s); 1367 1368 switch (cond) { 1369 case 0: /* T */ 1370 case 1: /* F */ 1371 default: 1372 /* Invalid, or handled above. */ 1373 abort(); 1374 case 2: /* HI (!C && !Z) -> !(C || Z)*/ 1375 case 3: /* LS (C || Z) */ 1376 c->v1 = tmp = tcg_temp_new(); 1377 c->g1 = 0; 1378 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); 1379 tcg_gen_or_i32(tmp, tmp, QREG_CC_C); 1380 tcond = TCG_COND_NE; 1381 break; 1382 case 4: /* CC (!C) */ 1383 case 5: /* CS (C) */ 1384 c->v1 = QREG_CC_C; 1385 tcond = TCG_COND_NE; 1386 break; 1387 case 6: /* NE (!Z) */ 1388 case 7: /* EQ (Z) */ 1389 c->v1 = QREG_CC_Z; 1390 tcond = TCG_COND_EQ; 1391 break; 1392 case 8: /* VC (!V) */ 1393 case 9: /* VS (V) */ 1394 c->v1 = QREG_CC_V; 1395 tcond = TCG_COND_LT; 1396 break; 1397 case 10: /* PL (!N) */ 1398 case 11: /* MI (N) */ 1399 c->v1 = QREG_CC_N; 1400 tcond = TCG_COND_LT; 1401 break; 1402 case 12: /* GE (!(N ^ V)) */ 1403 case 13: /* LT (N ^ V) */ 1404 c->v1 = tmp = tcg_temp_new(); 1405 c->g1 = 0; 1406 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V); 1407 tcond = TCG_COND_LT; 1408 break; 1409 case 14: /* GT (!(Z || (N ^ V))) */ 1410 case 15: /* LE (Z || (N ^ V)) */ 1411 c->v1 = tmp = tcg_temp_new(); 1412 c->g1 = 0; 1413 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2); 1414 tcg_gen_neg_i32(tmp, tmp); 1415 tmp2 = tcg_temp_new(); 1416 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V); 1417 tcg_gen_or_i32(tmp, tmp, tmp2); 1418 tcg_temp_free(tmp2); 1419 tcond = TCG_COND_LT; 1420 break; 1421 } 1422 1423 done: 1424 if ((cond & 1) == 0) { 1425 tcond = tcg_invert_cond(tcond); 1426 } 1427 c->tcond = tcond; 1428 } 1429 1430 static void free_cond(DisasCompare *c) 1431 { 1432 if (!c->g1) { 1433 tcg_temp_free(c->v1); 1434 } 1435 if (!c->g2) { 1436 tcg_temp_free(c->v2); 1437 } 1438 } 1439 1440 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1) 1441 { 1442 DisasCompare c; 1443 1444 gen_cc_cond(&c, s, cond); 1445 update_cc_op(s); 1446 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1); 1447 free_cond(&c); 1448 } 1449 1450 /* Force a TB lookup after an instruction that changes the CPU state. */ 1451 static void gen_lookup_tb(DisasContext *s) 1452 { 1453 update_cc_op(s); 1454 tcg_gen_movi_i32(QREG_PC, s->pc); 1455 s->is_jmp = DISAS_UPDATE; 1456 } 1457 1458 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \ 1459 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \ 1460 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \ 1461 if (IS_NULL_QREG(result)) { \ 1462 gen_addr_fault(s); \ 1463 return; \ 1464 } \ 1465 } while (0) 1466 1467 #define DEST_EA(env, insn, opsize, val, addrp) do { \ 1468 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \ 1469 EA_STORE, IS_USER(s)); \ 1470 if (IS_NULL_QREG(ea_result)) { \ 1471 gen_addr_fault(s); \ 1472 return; \ 1473 } \ 1474 } while (0) 1475 1476 static inline bool use_goto_tb(DisasContext *s, uint32_t dest) 1477 { 1478 #ifndef CONFIG_USER_ONLY 1479 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) || 1480 (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 1481 #else 1482 return true; 1483 #endif 1484 } 1485 1486 /* Generate a jump to an immediate address. */ 1487 static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) 1488 { 1489 if (unlikely(s->singlestep_enabled)) { 1490 gen_exception(s, dest, EXCP_DEBUG); 1491 } else if (use_goto_tb(s, dest)) { 1492 tcg_gen_goto_tb(n); 1493 tcg_gen_movi_i32(QREG_PC, dest); 1494 tcg_gen_exit_tb((uintptr_t)s->tb + n); 1495 } else { 1496 gen_jmp_im(s, dest); 1497 tcg_gen_exit_tb(0); 1498 } 1499 s->is_jmp = DISAS_TB_JUMP; 1500 } 1501 1502 DISAS_INSN(scc) 1503 { 1504 DisasCompare c; 1505 int cond; 1506 TCGv tmp; 1507 1508 cond = (insn >> 8) & 0xf; 1509 gen_cc_cond(&c, s, cond); 1510 1511 tmp = tcg_temp_new(); 1512 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2); 1513 free_cond(&c); 1514 1515 tcg_gen_neg_i32(tmp, tmp); 1516 DEST_EA(env, insn, OS_BYTE, tmp, NULL); 1517 tcg_temp_free(tmp); 1518 } 1519 1520 DISAS_INSN(dbcc) 1521 { 1522 TCGLabel *l1; 1523 TCGv reg; 1524 TCGv tmp; 1525 int16_t offset; 1526 uint32_t base; 1527 1528 reg = DREG(insn, 0); 1529 base = s->pc; 1530 offset = (int16_t)read_im16(env, s); 1531 l1 = gen_new_label(); 1532 gen_jmpcc(s, (insn >> 8) & 0xf, l1); 1533 1534 tmp = tcg_temp_new(); 1535 tcg_gen_ext16s_i32(tmp, reg); 1536 tcg_gen_addi_i32(tmp, tmp, -1); 1537 gen_partset_reg(OS_WORD, reg, tmp); 1538 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1); 1539 gen_jmp_tb(s, 1, base + offset); 1540 gen_set_label(l1); 1541 gen_jmp_tb(s, 0, s->pc); 1542 } 1543 1544 DISAS_INSN(undef_mac) 1545 { 1546 gen_exception(s, s->insn_pc, EXCP_LINEA); 1547 } 1548 1549 DISAS_INSN(undef_fpu) 1550 { 1551 gen_exception(s, s->insn_pc, EXCP_LINEF); 1552 } 1553 1554 DISAS_INSN(undef) 1555 { 1556 /* ??? This is both instructions that are as yet unimplemented 1557 for the 680x0 series, as well as those that are implemented 1558 but actually illegal for CPU32 or pre-68020. */ 1559 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x", 1560 insn, s->insn_pc); 1561 gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED); 1562 } 1563 1564 DISAS_INSN(mulw) 1565 { 1566 TCGv reg; 1567 TCGv tmp; 1568 TCGv src; 1569 int sign; 1570 1571 sign = (insn & 0x100) != 0; 1572 reg = DREG(insn, 9); 1573 tmp = tcg_temp_new(); 1574 if (sign) 1575 tcg_gen_ext16s_i32(tmp, reg); 1576 else 1577 tcg_gen_ext16u_i32(tmp, reg); 1578 SRC_EA(env, src, OS_WORD, sign, NULL); 1579 tcg_gen_mul_i32(tmp, tmp, src); 1580 tcg_gen_mov_i32(reg, tmp); 1581 gen_logic_cc(s, tmp, OS_LONG); 1582 tcg_temp_free(tmp); 1583 } 1584 1585 DISAS_INSN(divw) 1586 { 1587 int sign; 1588 TCGv src; 1589 TCGv destr; 1590 1591 /* divX.w <EA>,Dn 32/16 -> 16r:16q */ 1592 1593 sign = (insn & 0x100) != 0; 1594 1595 /* dest.l / src.w */ 1596 1597 SRC_EA(env, src, OS_WORD, sign, NULL); 1598 destr = tcg_const_i32(REG(insn, 9)); 1599 if (sign) { 1600 gen_helper_divsw(cpu_env, destr, src); 1601 } else { 1602 gen_helper_divuw(cpu_env, destr, src); 1603 } 1604 tcg_temp_free(destr); 1605 1606 set_cc_op(s, CC_OP_FLAGS); 1607 } 1608 1609 DISAS_INSN(divl) 1610 { 1611 TCGv num, reg, den; 1612 int sign; 1613 uint16_t ext; 1614 1615 ext = read_im16(env, s); 1616 1617 sign = (ext & 0x0800) != 0; 1618 1619 if (ext & 0x400) { 1620 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) { 1621 gen_exception(s, s->insn_pc, EXCP_ILLEGAL); 1622 return; 1623 } 1624 1625 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */ 1626 1627 SRC_EA(env, den, OS_LONG, 0, NULL); 1628 num = tcg_const_i32(REG(ext, 12)); 1629 reg = tcg_const_i32(REG(ext, 0)); 1630 if (sign) { 1631 gen_helper_divsll(cpu_env, num, reg, den); 1632 } else { 1633 gen_helper_divull(cpu_env, num, reg, den); 1634 } 1635 tcg_temp_free(reg); 1636 tcg_temp_free(num); 1637 set_cc_op(s, CC_OP_FLAGS); 1638 return; 1639 } 1640 1641 /* divX.l <EA>, Dq 32/32 -> 32q */ 1642 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */ 1643 1644 SRC_EA(env, den, OS_LONG, 0, NULL); 1645 num = tcg_const_i32(REG(ext, 12)); 1646 reg = tcg_const_i32(REG(ext, 0)); 1647 if (sign) { 1648 gen_helper_divsl(cpu_env, num, reg, den); 1649 } else { 1650 gen_helper_divul(cpu_env, num, reg, den); 1651 } 1652 tcg_temp_free(reg); 1653 tcg_temp_free(num); 1654 1655 set_cc_op(s, CC_OP_FLAGS); 1656 } 1657 1658 static void bcd_add(TCGv dest, TCGv src) 1659 { 1660 TCGv t0, t1; 1661 1662 /* dest10 = dest10 + src10 + X 1663 * 1664 * t1 = src 1665 * t2 = t1 + 0x066 1666 * t3 = t2 + dest + X 1667 * t4 = t2 ^ dest 1668 * t5 = t3 ^ t4 1669 * t6 = ~t5 & 0x110 1670 * t7 = (t6 >> 2) | (t6 >> 3) 1671 * return t3 - t7 1672 */ 1673 1674 /* t1 = (src + 0x066) + dest + X 1675 * = result with some possible exceding 0x6 1676 */ 1677 1678 t0 = tcg_const_i32(0x066); 1679 tcg_gen_add_i32(t0, t0, src); 1680 1681 t1 = tcg_temp_new(); 1682 tcg_gen_add_i32(t1, t0, dest); 1683 tcg_gen_add_i32(t1, t1, QREG_CC_X); 1684 1685 /* we will remove exceding 0x6 where there is no carry */ 1686 1687 /* t0 = (src + 0x0066) ^ dest 1688 * = t1 without carries 1689 */ 1690 1691 tcg_gen_xor_i32(t0, t0, dest); 1692 1693 /* extract the carries 1694 * t0 = t0 ^ t1 1695 * = only the carries 1696 */ 1697 1698 tcg_gen_xor_i32(t0, t0, t1); 1699 1700 /* generate 0x1 where there is no carry 1701 * and for each 0x10, generate a 0x6 1702 */ 1703 1704 tcg_gen_shri_i32(t0, t0, 3); 1705 tcg_gen_not_i32(t0, t0); 1706 tcg_gen_andi_i32(t0, t0, 0x22); 1707 tcg_gen_add_i32(dest, t0, t0); 1708 tcg_gen_add_i32(dest, dest, t0); 1709 tcg_temp_free(t0); 1710 1711 /* remove the exceding 0x6 1712 * for digits that have not generated a carry 1713 */ 1714 1715 tcg_gen_sub_i32(dest, t1, dest); 1716 tcg_temp_free(t1); 1717 } 1718 1719 static void bcd_sub(TCGv dest, TCGv src) 1720 { 1721 TCGv t0, t1, t2; 1722 1723 /* dest10 = dest10 - src10 - X 1724 * = bcd_add(dest + 1 - X, 0x199 - src) 1725 */ 1726 1727 /* t0 = 0x066 + (0x199 - src) */ 1728 1729 t0 = tcg_temp_new(); 1730 tcg_gen_subfi_i32(t0, 0x1ff, src); 1731 1732 /* t1 = t0 + dest + 1 - X*/ 1733 1734 t1 = tcg_temp_new(); 1735 tcg_gen_add_i32(t1, t0, dest); 1736 tcg_gen_addi_i32(t1, t1, 1); 1737 tcg_gen_sub_i32(t1, t1, QREG_CC_X); 1738 1739 /* t2 = t0 ^ dest */ 1740 1741 t2 = tcg_temp_new(); 1742 tcg_gen_xor_i32(t2, t0, dest); 1743 1744 /* t0 = t1 ^ t2 */ 1745 1746 tcg_gen_xor_i32(t0, t1, t2); 1747 1748 /* t2 = ~t0 & 0x110 1749 * t0 = (t2 >> 2) | (t2 >> 3) 1750 * 1751 * to fit on 8bit operands, changed in: 1752 * 1753 * t2 = ~(t0 >> 3) & 0x22 1754 * t0 = t2 + t2 1755 * t0 = t0 + t2 1756 */ 1757 1758 tcg_gen_shri_i32(t2, t0, 3); 1759 tcg_gen_not_i32(t2, t2); 1760 tcg_gen_andi_i32(t2, t2, 0x22); 1761 tcg_gen_add_i32(t0, t2, t2); 1762 tcg_gen_add_i32(t0, t0, t2); 1763 tcg_temp_free(t2); 1764 1765 /* return t1 - t0 */ 1766 1767 tcg_gen_sub_i32(dest, t1, t0); 1768 tcg_temp_free(t0); 1769 tcg_temp_free(t1); 1770 } 1771 1772 static void bcd_flags(TCGv val) 1773 { 1774 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff); 1775 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C); 1776 1777 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1); 1778 1779 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C); 1780 } 1781 1782 DISAS_INSN(abcd_reg) 1783 { 1784 TCGv src; 1785 TCGv dest; 1786 1787 gen_flush_flags(s); /* !Z is sticky */ 1788 1789 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); 1790 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0); 1791 bcd_add(dest, src); 1792 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest); 1793 1794 bcd_flags(dest); 1795 } 1796 1797 DISAS_INSN(abcd_mem) 1798 { 1799 TCGv src, dest, addr; 1800 1801 gen_flush_flags(s); /* !Z is sticky */ 1802 1803 /* Indirect pre-decrement load (mode 4) */ 1804 1805 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE, 1806 NULL_QREG, NULL, EA_LOADU, IS_USER(s)); 1807 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, 1808 NULL_QREG, &addr, EA_LOADU, IS_USER(s)); 1809 1810 bcd_add(dest, src); 1811 1812 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, 1813 EA_STORE, IS_USER(s)); 1814 1815 bcd_flags(dest); 1816 } 1817 1818 DISAS_INSN(sbcd_reg) 1819 { 1820 TCGv src, dest; 1821 1822 gen_flush_flags(s); /* !Z is sticky */ 1823 1824 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); 1825 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0); 1826 1827 bcd_sub(dest, src); 1828 1829 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest); 1830 1831 bcd_flags(dest); 1832 } 1833 1834 DISAS_INSN(sbcd_mem) 1835 { 1836 TCGv src, dest, addr; 1837 1838 gen_flush_flags(s); /* !Z is sticky */ 1839 1840 /* Indirect pre-decrement load (mode 4) */ 1841 1842 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE, 1843 NULL_QREG, NULL, EA_LOADU, IS_USER(s)); 1844 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, 1845 NULL_QREG, &addr, EA_LOADU, IS_USER(s)); 1846 1847 bcd_sub(dest, src); 1848 1849 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, 1850 EA_STORE, IS_USER(s)); 1851 1852 bcd_flags(dest); 1853 } 1854 1855 DISAS_INSN(nbcd) 1856 { 1857 TCGv src, dest; 1858 TCGv addr; 1859 1860 gen_flush_flags(s); /* !Z is sticky */ 1861 1862 SRC_EA(env, src, OS_BYTE, 0, &addr); 1863 1864 dest = tcg_const_i32(0); 1865 bcd_sub(dest, src); 1866 1867 DEST_EA(env, insn, OS_BYTE, dest, &addr); 1868 1869 bcd_flags(dest); 1870 1871 tcg_temp_free(dest); 1872 } 1873 1874 DISAS_INSN(addsub) 1875 { 1876 TCGv reg; 1877 TCGv dest; 1878 TCGv src; 1879 TCGv tmp; 1880 TCGv addr; 1881 int add; 1882 int opsize; 1883 1884 add = (insn & 0x4000) != 0; 1885 opsize = insn_opsize(insn); 1886 reg = gen_extend(s, DREG(insn, 9), opsize, 1); 1887 dest = tcg_temp_new(); 1888 if (insn & 0x100) { 1889 SRC_EA(env, tmp, opsize, 1, &addr); 1890 src = reg; 1891 } else { 1892 tmp = reg; 1893 SRC_EA(env, src, opsize, 1, NULL); 1894 } 1895 if (add) { 1896 tcg_gen_add_i32(dest, tmp, src); 1897 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src); 1898 set_cc_op(s, CC_OP_ADDB + opsize); 1899 } else { 1900 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src); 1901 tcg_gen_sub_i32(dest, tmp, src); 1902 set_cc_op(s, CC_OP_SUBB + opsize); 1903 } 1904 gen_update_cc_add(dest, src, opsize); 1905 if (insn & 0x100) { 1906 DEST_EA(env, insn, opsize, dest, &addr); 1907 } else { 1908 gen_partset_reg(opsize, DREG(insn, 9), dest); 1909 } 1910 tcg_temp_free(dest); 1911 } 1912 1913 /* Reverse the order of the bits in REG. */ 1914 DISAS_INSN(bitrev) 1915 { 1916 TCGv reg; 1917 reg = DREG(insn, 0); 1918 gen_helper_bitrev(reg, reg); 1919 } 1920 1921 DISAS_INSN(bitop_reg) 1922 { 1923 int opsize; 1924 int op; 1925 TCGv src1; 1926 TCGv src2; 1927 TCGv tmp; 1928 TCGv addr; 1929 TCGv dest; 1930 1931 if ((insn & 0x38) != 0) 1932 opsize = OS_BYTE; 1933 else 1934 opsize = OS_LONG; 1935 op = (insn >> 6) & 3; 1936 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); 1937 1938 gen_flush_flags(s); 1939 src2 = tcg_temp_new(); 1940 if (opsize == OS_BYTE) 1941 tcg_gen_andi_i32(src2, DREG(insn, 9), 7); 1942 else 1943 tcg_gen_andi_i32(src2, DREG(insn, 9), 31); 1944 1945 tmp = tcg_const_i32(1); 1946 tcg_gen_shl_i32(tmp, tmp, src2); 1947 tcg_temp_free(src2); 1948 1949 tcg_gen_and_i32(QREG_CC_Z, src1, tmp); 1950 1951 dest = tcg_temp_new(); 1952 switch (op) { 1953 case 1: /* bchg */ 1954 tcg_gen_xor_i32(dest, src1, tmp); 1955 break; 1956 case 2: /* bclr */ 1957 tcg_gen_andc_i32(dest, src1, tmp); 1958 break; 1959 case 3: /* bset */ 1960 tcg_gen_or_i32(dest, src1, tmp); 1961 break; 1962 default: /* btst */ 1963 break; 1964 } 1965 tcg_temp_free(tmp); 1966 if (op) { 1967 DEST_EA(env, insn, opsize, dest, &addr); 1968 } 1969 tcg_temp_free(dest); 1970 } 1971 1972 DISAS_INSN(sats) 1973 { 1974 TCGv reg; 1975 reg = DREG(insn, 0); 1976 gen_flush_flags(s); 1977 gen_helper_sats(reg, reg, QREG_CC_V); 1978 gen_logic_cc(s, reg, OS_LONG); 1979 } 1980 1981 static void gen_push(DisasContext *s, TCGv val) 1982 { 1983 TCGv tmp; 1984 1985 tmp = tcg_temp_new(); 1986 tcg_gen_subi_i32(tmp, QREG_SP, 4); 1987 gen_store(s, OS_LONG, tmp, val, IS_USER(s)); 1988 tcg_gen_mov_i32(QREG_SP, tmp); 1989 tcg_temp_free(tmp); 1990 } 1991 1992 static TCGv mreg(int reg) 1993 { 1994 if (reg < 8) { 1995 /* Dx */ 1996 return cpu_dregs[reg]; 1997 } 1998 /* Ax */ 1999 return cpu_aregs[reg & 7]; 2000 } 2001 2002 DISAS_INSN(movem) 2003 { 2004 TCGv addr, incr, tmp, r[16]; 2005 int is_load = (insn & 0x0400) != 0; 2006 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD; 2007 uint16_t mask = read_im16(env, s); 2008 int mode = extract32(insn, 3, 3); 2009 int reg0 = REG(insn, 0); 2010 int i; 2011 2012 tmp = cpu_aregs[reg0]; 2013 2014 switch (mode) { 2015 case 0: /* data register direct */ 2016 case 1: /* addr register direct */ 2017 do_addr_fault: 2018 gen_addr_fault(s); 2019 return; 2020 2021 case 2: /* indirect */ 2022 break; 2023 2024 case 3: /* indirect post-increment */ 2025 if (!is_load) { 2026 /* post-increment is not allowed */ 2027 goto do_addr_fault; 2028 } 2029 break; 2030 2031 case 4: /* indirect pre-decrement */ 2032 if (is_load) { 2033 /* pre-decrement is not allowed */ 2034 goto do_addr_fault; 2035 } 2036 /* We want a bare copy of the address reg, without any pre-decrement 2037 adjustment, as gen_lea would provide. */ 2038 break; 2039 2040 default: 2041 tmp = gen_lea_mode(env, s, mode, reg0, opsize); 2042 if (IS_NULL_QREG(tmp)) { 2043 goto do_addr_fault; 2044 } 2045 break; 2046 } 2047 2048 addr = tcg_temp_new(); 2049 tcg_gen_mov_i32(addr, tmp); 2050 incr = tcg_const_i32(opsize_bytes(opsize)); 2051 2052 if (is_load) { 2053 /* memory to register */ 2054 for (i = 0; i < 16; i++) { 2055 if (mask & (1 << i)) { 2056 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s)); 2057 tcg_gen_add_i32(addr, addr, incr); 2058 } 2059 } 2060 for (i = 0; i < 16; i++) { 2061 if (mask & (1 << i)) { 2062 tcg_gen_mov_i32(mreg(i), r[i]); 2063 tcg_temp_free(r[i]); 2064 } 2065 } 2066 if (mode == 3) { 2067 /* post-increment: movem (An)+,X */ 2068 tcg_gen_mov_i32(cpu_aregs[reg0], addr); 2069 } 2070 } else { 2071 /* register to memory */ 2072 if (mode == 4) { 2073 /* pre-decrement: movem X,-(An) */ 2074 for (i = 15; i >= 0; i--) { 2075 if ((mask << i) & 0x8000) { 2076 tcg_gen_sub_i32(addr, addr, incr); 2077 if (reg0 + 8 == i && 2078 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) { 2079 /* M68020+: if the addressing register is the 2080 * register moved to memory, the value written 2081 * is the initial value decremented by the size of 2082 * the operation, regardless of how many actual 2083 * stores have been performed until this point. 2084 * M68000/M68010: the value is the initial value. 2085 */ 2086 tmp = tcg_temp_new(); 2087 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr); 2088 gen_store(s, opsize, addr, tmp, IS_USER(s)); 2089 tcg_temp_free(tmp); 2090 } else { 2091 gen_store(s, opsize, addr, mreg(i), IS_USER(s)); 2092 } 2093 } 2094 } 2095 tcg_gen_mov_i32(cpu_aregs[reg0], addr); 2096 } else { 2097 for (i = 0; i < 16; i++) { 2098 if (mask & (1 << i)) { 2099 gen_store(s, opsize, addr, mreg(i), IS_USER(s)); 2100 tcg_gen_add_i32(addr, addr, incr); 2101 } 2102 } 2103 } 2104 } 2105 2106 tcg_temp_free(incr); 2107 tcg_temp_free(addr); 2108 } 2109 2110 DISAS_INSN(movep) 2111 { 2112 uint8_t i; 2113 int16_t displ; 2114 TCGv reg; 2115 TCGv addr; 2116 TCGv abuf; 2117 TCGv dbuf; 2118 2119 displ = read_im16(env, s); 2120 2121 addr = AREG(insn, 0); 2122 reg = DREG(insn, 9); 2123 2124 abuf = tcg_temp_new(); 2125 tcg_gen_addi_i32(abuf, addr, displ); 2126 dbuf = tcg_temp_new(); 2127 2128 if (insn & 0x40) { 2129 i = 4; 2130 } else { 2131 i = 2; 2132 } 2133 2134 if (insn & 0x80) { 2135 for ( ; i > 0 ; i--) { 2136 tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8); 2137 tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s)); 2138 if (i > 1) { 2139 tcg_gen_addi_i32(abuf, abuf, 2); 2140 } 2141 } 2142 } else { 2143 for ( ; i > 0 ; i--) { 2144 tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s)); 2145 tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8); 2146 if (i > 1) { 2147 tcg_gen_addi_i32(abuf, abuf, 2); 2148 } 2149 } 2150 } 2151 tcg_temp_free(abuf); 2152 tcg_temp_free(dbuf); 2153 } 2154 2155 DISAS_INSN(bitop_im) 2156 { 2157 int opsize; 2158 int op; 2159 TCGv src1; 2160 uint32_t mask; 2161 int bitnum; 2162 TCGv tmp; 2163 TCGv addr; 2164 2165 if ((insn & 0x38) != 0) 2166 opsize = OS_BYTE; 2167 else 2168 opsize = OS_LONG; 2169 op = (insn >> 6) & 3; 2170 2171 bitnum = read_im16(env, s); 2172 if (m68k_feature(s->env, M68K_FEATURE_M68000)) { 2173 if (bitnum & 0xfe00) { 2174 disas_undef(env, s, insn); 2175 return; 2176 } 2177 } else { 2178 if (bitnum & 0xff00) { 2179 disas_undef(env, s, insn); 2180 return; 2181 } 2182 } 2183 2184 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); 2185 2186 gen_flush_flags(s); 2187 if (opsize == OS_BYTE) 2188 bitnum &= 7; 2189 else 2190 bitnum &= 31; 2191 mask = 1 << bitnum; 2192 2193 tcg_gen_andi_i32(QREG_CC_Z, src1, mask); 2194 2195 if (op) { 2196 tmp = tcg_temp_new(); 2197 switch (op) { 2198 case 1: /* bchg */ 2199 tcg_gen_xori_i32(tmp, src1, mask); 2200 break; 2201 case 2: /* bclr */ 2202 tcg_gen_andi_i32(tmp, src1, ~mask); 2203 break; 2204 case 3: /* bset */ 2205 tcg_gen_ori_i32(tmp, src1, mask); 2206 break; 2207 default: /* btst */ 2208 break; 2209 } 2210 DEST_EA(env, insn, opsize, tmp, &addr); 2211 tcg_temp_free(tmp); 2212 } 2213 } 2214 2215 static TCGv gen_get_ccr(DisasContext *s) 2216 { 2217 TCGv dest; 2218 2219 update_cc_op(s); 2220 dest = tcg_temp_new(); 2221 gen_helper_get_ccr(dest, cpu_env); 2222 return dest; 2223 } 2224 2225 static TCGv gen_get_sr(DisasContext *s) 2226 { 2227 TCGv ccr; 2228 TCGv sr; 2229 2230 ccr = gen_get_ccr(s); 2231 sr = tcg_temp_new(); 2232 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0); 2233 tcg_gen_or_i32(sr, sr, ccr); 2234 return sr; 2235 } 2236 2237 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only) 2238 { 2239 if (ccr_only) { 2240 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0); 2241 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0); 2242 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1); 2243 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0); 2244 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0); 2245 } else { 2246 TCGv sr = tcg_const_i32(val); 2247 gen_helper_set_sr(cpu_env, sr); 2248 tcg_temp_free(sr); 2249 } 2250 set_cc_op(s, CC_OP_FLAGS); 2251 } 2252 2253 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only) 2254 { 2255 if (ccr_only) { 2256 gen_helper_set_ccr(cpu_env, val); 2257 } else { 2258 gen_helper_set_sr(cpu_env, val); 2259 } 2260 set_cc_op(s, CC_OP_FLAGS); 2261 } 2262 2263 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn, 2264 bool ccr_only) 2265 { 2266 if ((insn & 0x3f) == 0x3c) { 2267 uint16_t val; 2268 val = read_im16(env, s); 2269 gen_set_sr_im(s, val, ccr_only); 2270 } else { 2271 TCGv src; 2272 SRC_EA(env, src, OS_WORD, 0, NULL); 2273 gen_set_sr(s, src, ccr_only); 2274 } 2275 } 2276 2277 DISAS_INSN(arith_im) 2278 { 2279 int op; 2280 TCGv im; 2281 TCGv src1; 2282 TCGv dest; 2283 TCGv addr; 2284 int opsize; 2285 bool with_SR = ((insn & 0x3f) == 0x3c); 2286 2287 op = (insn >> 9) & 7; 2288 opsize = insn_opsize(insn); 2289 switch (opsize) { 2290 case OS_BYTE: 2291 im = tcg_const_i32((int8_t)read_im8(env, s)); 2292 break; 2293 case OS_WORD: 2294 im = tcg_const_i32((int16_t)read_im16(env, s)); 2295 break; 2296 case OS_LONG: 2297 im = tcg_const_i32(read_im32(env, s)); 2298 break; 2299 default: 2300 g_assert_not_reached(); 2301 } 2302 2303 if (with_SR) { 2304 /* SR/CCR can only be used with andi/eori/ori */ 2305 if (op == 2 || op == 3 || op == 6) { 2306 disas_undef(env, s, insn); 2307 return; 2308 } 2309 switch (opsize) { 2310 case OS_BYTE: 2311 src1 = gen_get_ccr(s); 2312 break; 2313 case OS_WORD: 2314 if (IS_USER(s)) { 2315 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 2316 return; 2317 } 2318 src1 = gen_get_sr(s); 2319 break; 2320 default: 2321 /* OS_LONG; others already g_assert_not_reached. */ 2322 disas_undef(env, s, insn); 2323 return; 2324 } 2325 } else { 2326 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr); 2327 } 2328 dest = tcg_temp_new(); 2329 switch (op) { 2330 case 0: /* ori */ 2331 tcg_gen_or_i32(dest, src1, im); 2332 if (with_SR) { 2333 gen_set_sr(s, dest, opsize == OS_BYTE); 2334 } else { 2335 DEST_EA(env, insn, opsize, dest, &addr); 2336 gen_logic_cc(s, dest, opsize); 2337 } 2338 break; 2339 case 1: /* andi */ 2340 tcg_gen_and_i32(dest, src1, im); 2341 if (with_SR) { 2342 gen_set_sr(s, dest, opsize == OS_BYTE); 2343 } else { 2344 DEST_EA(env, insn, opsize, dest, &addr); 2345 gen_logic_cc(s, dest, opsize); 2346 } 2347 break; 2348 case 2: /* subi */ 2349 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im); 2350 tcg_gen_sub_i32(dest, src1, im); 2351 gen_update_cc_add(dest, im, opsize); 2352 set_cc_op(s, CC_OP_SUBB + opsize); 2353 DEST_EA(env, insn, opsize, dest, &addr); 2354 break; 2355 case 3: /* addi */ 2356 tcg_gen_add_i32(dest, src1, im); 2357 gen_update_cc_add(dest, im, opsize); 2358 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im); 2359 set_cc_op(s, CC_OP_ADDB + opsize); 2360 DEST_EA(env, insn, opsize, dest, &addr); 2361 break; 2362 case 5: /* eori */ 2363 tcg_gen_xor_i32(dest, src1, im); 2364 if (with_SR) { 2365 gen_set_sr(s, dest, opsize == OS_BYTE); 2366 } else { 2367 DEST_EA(env, insn, opsize, dest, &addr); 2368 gen_logic_cc(s, dest, opsize); 2369 } 2370 break; 2371 case 6: /* cmpi */ 2372 gen_update_cc_cmp(s, src1, im, opsize); 2373 break; 2374 default: 2375 abort(); 2376 } 2377 tcg_temp_free(im); 2378 tcg_temp_free(dest); 2379 } 2380 2381 DISAS_INSN(cas) 2382 { 2383 int opsize; 2384 TCGv addr; 2385 uint16_t ext; 2386 TCGv load; 2387 TCGv cmp; 2388 TCGMemOp opc; 2389 2390 switch ((insn >> 9) & 3) { 2391 case 1: 2392 opsize = OS_BYTE; 2393 opc = MO_SB; 2394 break; 2395 case 2: 2396 opsize = OS_WORD; 2397 opc = MO_TESW; 2398 break; 2399 case 3: 2400 opsize = OS_LONG; 2401 opc = MO_TESL; 2402 break; 2403 default: 2404 g_assert_not_reached(); 2405 } 2406 2407 ext = read_im16(env, s); 2408 2409 /* cas Dc,Du,<EA> */ 2410 2411 addr = gen_lea(env, s, insn, opsize); 2412 if (IS_NULL_QREG(addr)) { 2413 gen_addr_fault(s); 2414 return; 2415 } 2416 2417 cmp = gen_extend(s, DREG(ext, 0), opsize, 1); 2418 2419 /* if <EA> == Dc then 2420 * <EA> = Du 2421 * Dc = <EA> (because <EA> == Dc) 2422 * else 2423 * Dc = <EA> 2424 */ 2425 2426 load = tcg_temp_new(); 2427 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6), 2428 IS_USER(s), opc); 2429 /* update flags before setting cmp to load */ 2430 gen_update_cc_cmp(s, load, cmp, opsize); 2431 gen_partset_reg(opsize, DREG(ext, 0), load); 2432 2433 tcg_temp_free(load); 2434 2435 switch (extract32(insn, 3, 3)) { 2436 case 3: /* Indirect postincrement. */ 2437 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize)); 2438 break; 2439 case 4: /* Indirect predecrememnt. */ 2440 tcg_gen_mov_i32(AREG(insn, 0), addr); 2441 break; 2442 } 2443 } 2444 2445 DISAS_INSN(cas2w) 2446 { 2447 uint16_t ext1, ext2; 2448 TCGv addr1, addr2; 2449 TCGv regs; 2450 2451 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ 2452 2453 ext1 = read_im16(env, s); 2454 2455 if (ext1 & 0x8000) { 2456 /* Address Register */ 2457 addr1 = AREG(ext1, 12); 2458 } else { 2459 /* Data Register */ 2460 addr1 = DREG(ext1, 12); 2461 } 2462 2463 ext2 = read_im16(env, s); 2464 if (ext2 & 0x8000) { 2465 /* Address Register */ 2466 addr2 = AREG(ext2, 12); 2467 } else { 2468 /* Data Register */ 2469 addr2 = DREG(ext2, 12); 2470 } 2471 2472 /* if (R1) == Dc1 && (R2) == Dc2 then 2473 * (R1) = Du1 2474 * (R2) = Du2 2475 * else 2476 * Dc1 = (R1) 2477 * Dc2 = (R2) 2478 */ 2479 2480 regs = tcg_const_i32(REG(ext2, 6) | 2481 (REG(ext1, 6) << 3) | 2482 (REG(ext2, 0) << 6) | 2483 (REG(ext1, 0) << 9)); 2484 if (tb_cflags(s->tb) & CF_PARALLEL) { 2485 gen_helper_exit_atomic(cpu_env); 2486 } else { 2487 gen_helper_cas2w(cpu_env, regs, addr1, addr2); 2488 } 2489 tcg_temp_free(regs); 2490 2491 /* Note that cas2w also assigned to env->cc_op. */ 2492 s->cc_op = CC_OP_CMPW; 2493 s->cc_op_synced = 1; 2494 } 2495 2496 DISAS_INSN(cas2l) 2497 { 2498 uint16_t ext1, ext2; 2499 TCGv addr1, addr2, regs; 2500 2501 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */ 2502 2503 ext1 = read_im16(env, s); 2504 2505 if (ext1 & 0x8000) { 2506 /* Address Register */ 2507 addr1 = AREG(ext1, 12); 2508 } else { 2509 /* Data Register */ 2510 addr1 = DREG(ext1, 12); 2511 } 2512 2513 ext2 = read_im16(env, s); 2514 if (ext2 & 0x8000) { 2515 /* Address Register */ 2516 addr2 = AREG(ext2, 12); 2517 } else { 2518 /* Data Register */ 2519 addr2 = DREG(ext2, 12); 2520 } 2521 2522 /* if (R1) == Dc1 && (R2) == Dc2 then 2523 * (R1) = Du1 2524 * (R2) = Du2 2525 * else 2526 * Dc1 = (R1) 2527 * Dc2 = (R2) 2528 */ 2529 2530 regs = tcg_const_i32(REG(ext2, 6) | 2531 (REG(ext1, 6) << 3) | 2532 (REG(ext2, 0) << 6) | 2533 (REG(ext1, 0) << 9)); 2534 if (tb_cflags(s->tb) & CF_PARALLEL) { 2535 gen_helper_cas2l_parallel(cpu_env, regs, addr1, addr2); 2536 } else { 2537 gen_helper_cas2l(cpu_env, regs, addr1, addr2); 2538 } 2539 tcg_temp_free(regs); 2540 2541 /* Note that cas2l also assigned to env->cc_op. */ 2542 s->cc_op = CC_OP_CMPL; 2543 s->cc_op_synced = 1; 2544 } 2545 2546 DISAS_INSN(byterev) 2547 { 2548 TCGv reg; 2549 2550 reg = DREG(insn, 0); 2551 tcg_gen_bswap32_i32(reg, reg); 2552 } 2553 2554 DISAS_INSN(move) 2555 { 2556 TCGv src; 2557 TCGv dest; 2558 int op; 2559 int opsize; 2560 2561 switch (insn >> 12) { 2562 case 1: /* move.b */ 2563 opsize = OS_BYTE; 2564 break; 2565 case 2: /* move.l */ 2566 opsize = OS_LONG; 2567 break; 2568 case 3: /* move.w */ 2569 opsize = OS_WORD; 2570 break; 2571 default: 2572 abort(); 2573 } 2574 SRC_EA(env, src, opsize, 1, NULL); 2575 op = (insn >> 6) & 7; 2576 if (op == 1) { 2577 /* movea */ 2578 /* The value will already have been sign extended. */ 2579 dest = AREG(insn, 9); 2580 tcg_gen_mov_i32(dest, src); 2581 } else { 2582 /* normal move */ 2583 uint16_t dest_ea; 2584 dest_ea = ((insn >> 9) & 7) | (op << 3); 2585 DEST_EA(env, dest_ea, opsize, src, NULL); 2586 /* This will be correct because loads sign extend. */ 2587 gen_logic_cc(s, src, opsize); 2588 } 2589 } 2590 2591 DISAS_INSN(negx) 2592 { 2593 TCGv z; 2594 TCGv src; 2595 TCGv addr; 2596 int opsize; 2597 2598 opsize = insn_opsize(insn); 2599 SRC_EA(env, src, opsize, 1, &addr); 2600 2601 gen_flush_flags(s); /* compute old Z */ 2602 2603 /* Perform substract with borrow. 2604 * (X, N) = -(src + X); 2605 */ 2606 2607 z = tcg_const_i32(0); 2608 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z); 2609 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X); 2610 tcg_temp_free(z); 2611 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); 2612 2613 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1); 2614 2615 /* Compute signed-overflow for negation. The normal formula for 2616 * subtraction is (res ^ src) & (src ^ dest), but with dest==0 2617 * this simplies to res & src. 2618 */ 2619 2620 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src); 2621 2622 /* Copy the rest of the results into place. */ 2623 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ 2624 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X); 2625 2626 set_cc_op(s, CC_OP_FLAGS); 2627 2628 /* result is in QREG_CC_N */ 2629 2630 DEST_EA(env, insn, opsize, QREG_CC_N, &addr); 2631 } 2632 2633 DISAS_INSN(lea) 2634 { 2635 TCGv reg; 2636 TCGv tmp; 2637 2638 reg = AREG(insn, 9); 2639 tmp = gen_lea(env, s, insn, OS_LONG); 2640 if (IS_NULL_QREG(tmp)) { 2641 gen_addr_fault(s); 2642 return; 2643 } 2644 tcg_gen_mov_i32(reg, tmp); 2645 } 2646 2647 DISAS_INSN(clr) 2648 { 2649 int opsize; 2650 TCGv zero; 2651 2652 zero = tcg_const_i32(0); 2653 2654 opsize = insn_opsize(insn); 2655 DEST_EA(env, insn, opsize, zero, NULL); 2656 gen_logic_cc(s, zero, opsize); 2657 tcg_temp_free(zero); 2658 } 2659 2660 DISAS_INSN(move_from_ccr) 2661 { 2662 TCGv ccr; 2663 2664 ccr = gen_get_ccr(s); 2665 DEST_EA(env, insn, OS_WORD, ccr, NULL); 2666 } 2667 2668 DISAS_INSN(neg) 2669 { 2670 TCGv src1; 2671 TCGv dest; 2672 TCGv addr; 2673 int opsize; 2674 2675 opsize = insn_opsize(insn); 2676 SRC_EA(env, src1, opsize, 1, &addr); 2677 dest = tcg_temp_new(); 2678 tcg_gen_neg_i32(dest, src1); 2679 set_cc_op(s, CC_OP_SUBB + opsize); 2680 gen_update_cc_add(dest, src1, opsize); 2681 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0); 2682 DEST_EA(env, insn, opsize, dest, &addr); 2683 tcg_temp_free(dest); 2684 } 2685 2686 DISAS_INSN(move_to_ccr) 2687 { 2688 gen_move_to_sr(env, s, insn, true); 2689 } 2690 2691 DISAS_INSN(not) 2692 { 2693 TCGv src1; 2694 TCGv dest; 2695 TCGv addr; 2696 int opsize; 2697 2698 opsize = insn_opsize(insn); 2699 SRC_EA(env, src1, opsize, 1, &addr); 2700 dest = tcg_temp_new(); 2701 tcg_gen_not_i32(dest, src1); 2702 DEST_EA(env, insn, opsize, dest, &addr); 2703 gen_logic_cc(s, dest, opsize); 2704 } 2705 2706 DISAS_INSN(swap) 2707 { 2708 TCGv src1; 2709 TCGv src2; 2710 TCGv reg; 2711 2712 src1 = tcg_temp_new(); 2713 src2 = tcg_temp_new(); 2714 reg = DREG(insn, 0); 2715 tcg_gen_shli_i32(src1, reg, 16); 2716 tcg_gen_shri_i32(src2, reg, 16); 2717 tcg_gen_or_i32(reg, src1, src2); 2718 tcg_temp_free(src2); 2719 tcg_temp_free(src1); 2720 gen_logic_cc(s, reg, OS_LONG); 2721 } 2722 2723 DISAS_INSN(bkpt) 2724 { 2725 gen_exception(s, s->insn_pc, EXCP_DEBUG); 2726 } 2727 2728 DISAS_INSN(pea) 2729 { 2730 TCGv tmp; 2731 2732 tmp = gen_lea(env, s, insn, OS_LONG); 2733 if (IS_NULL_QREG(tmp)) { 2734 gen_addr_fault(s); 2735 return; 2736 } 2737 gen_push(s, tmp); 2738 } 2739 2740 DISAS_INSN(ext) 2741 { 2742 int op; 2743 TCGv reg; 2744 TCGv tmp; 2745 2746 reg = DREG(insn, 0); 2747 op = (insn >> 6) & 7; 2748 tmp = tcg_temp_new(); 2749 if (op == 3) 2750 tcg_gen_ext16s_i32(tmp, reg); 2751 else 2752 tcg_gen_ext8s_i32(tmp, reg); 2753 if (op == 2) 2754 gen_partset_reg(OS_WORD, reg, tmp); 2755 else 2756 tcg_gen_mov_i32(reg, tmp); 2757 gen_logic_cc(s, tmp, OS_LONG); 2758 tcg_temp_free(tmp); 2759 } 2760 2761 DISAS_INSN(tst) 2762 { 2763 int opsize; 2764 TCGv tmp; 2765 2766 opsize = insn_opsize(insn); 2767 SRC_EA(env, tmp, opsize, 1, NULL); 2768 gen_logic_cc(s, tmp, opsize); 2769 } 2770 2771 DISAS_INSN(pulse) 2772 { 2773 /* Implemented as a NOP. */ 2774 } 2775 2776 DISAS_INSN(illegal) 2777 { 2778 gen_exception(s, s->insn_pc, EXCP_ILLEGAL); 2779 } 2780 2781 /* ??? This should be atomic. */ 2782 DISAS_INSN(tas) 2783 { 2784 TCGv dest; 2785 TCGv src1; 2786 TCGv addr; 2787 2788 dest = tcg_temp_new(); 2789 SRC_EA(env, src1, OS_BYTE, 1, &addr); 2790 gen_logic_cc(s, src1, OS_BYTE); 2791 tcg_gen_ori_i32(dest, src1, 0x80); 2792 DEST_EA(env, insn, OS_BYTE, dest, &addr); 2793 tcg_temp_free(dest); 2794 } 2795 2796 DISAS_INSN(mull) 2797 { 2798 uint16_t ext; 2799 TCGv src1; 2800 int sign; 2801 2802 ext = read_im16(env, s); 2803 2804 sign = ext & 0x800; 2805 2806 if (ext & 0x400) { 2807 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) { 2808 gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED); 2809 return; 2810 } 2811 2812 SRC_EA(env, src1, OS_LONG, 0, NULL); 2813 2814 if (sign) { 2815 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12)); 2816 } else { 2817 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12)); 2818 } 2819 /* if Dl == Dh, 68040 returns low word */ 2820 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N); 2821 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z); 2822 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); 2823 2824 tcg_gen_movi_i32(QREG_CC_V, 0); 2825 tcg_gen_movi_i32(QREG_CC_C, 0); 2826 2827 set_cc_op(s, CC_OP_FLAGS); 2828 return; 2829 } 2830 SRC_EA(env, src1, OS_LONG, 0, NULL); 2831 if (m68k_feature(s->env, M68K_FEATURE_M68000)) { 2832 tcg_gen_movi_i32(QREG_CC_C, 0); 2833 if (sign) { 2834 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12)); 2835 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */ 2836 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31); 2837 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z); 2838 } else { 2839 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12)); 2840 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */ 2841 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C); 2842 } 2843 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V); 2844 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N); 2845 2846 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); 2847 2848 set_cc_op(s, CC_OP_FLAGS); 2849 } else { 2850 /* The upper 32 bits of the product are discarded, so 2851 muls.l and mulu.l are functionally equivalent. */ 2852 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12)); 2853 gen_logic_cc(s, DREG(ext, 12), OS_LONG); 2854 } 2855 } 2856 2857 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset) 2858 { 2859 TCGv reg; 2860 TCGv tmp; 2861 2862 reg = AREG(insn, 0); 2863 tmp = tcg_temp_new(); 2864 tcg_gen_subi_i32(tmp, QREG_SP, 4); 2865 gen_store(s, OS_LONG, tmp, reg, IS_USER(s)); 2866 if ((insn & 7) != 7) { 2867 tcg_gen_mov_i32(reg, tmp); 2868 } 2869 tcg_gen_addi_i32(QREG_SP, tmp, offset); 2870 tcg_temp_free(tmp); 2871 } 2872 2873 DISAS_INSN(link) 2874 { 2875 int16_t offset; 2876 2877 offset = read_im16(env, s); 2878 gen_link(s, insn, offset); 2879 } 2880 2881 DISAS_INSN(linkl) 2882 { 2883 int32_t offset; 2884 2885 offset = read_im32(env, s); 2886 gen_link(s, insn, offset); 2887 } 2888 2889 DISAS_INSN(unlk) 2890 { 2891 TCGv src; 2892 TCGv reg; 2893 TCGv tmp; 2894 2895 src = tcg_temp_new(); 2896 reg = AREG(insn, 0); 2897 tcg_gen_mov_i32(src, reg); 2898 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s)); 2899 tcg_gen_mov_i32(reg, tmp); 2900 tcg_gen_addi_i32(QREG_SP, src, 4); 2901 tcg_temp_free(src); 2902 tcg_temp_free(tmp); 2903 } 2904 2905 #if defined(CONFIG_SOFTMMU) 2906 DISAS_INSN(reset) 2907 { 2908 if (IS_USER(s)) { 2909 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 2910 return; 2911 } 2912 2913 gen_helper_reset(cpu_env); 2914 } 2915 #endif 2916 2917 DISAS_INSN(nop) 2918 { 2919 } 2920 2921 DISAS_INSN(rtd) 2922 { 2923 TCGv tmp; 2924 int16_t offset = read_im16(env, s); 2925 2926 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s)); 2927 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4); 2928 gen_jmp(s, tmp); 2929 } 2930 2931 DISAS_INSN(rts) 2932 { 2933 TCGv tmp; 2934 2935 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s)); 2936 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4); 2937 gen_jmp(s, tmp); 2938 } 2939 2940 DISAS_INSN(jump) 2941 { 2942 TCGv tmp; 2943 2944 /* Load the target address first to ensure correct exception 2945 behavior. */ 2946 tmp = gen_lea(env, s, insn, OS_LONG); 2947 if (IS_NULL_QREG(tmp)) { 2948 gen_addr_fault(s); 2949 return; 2950 } 2951 if ((insn & 0x40) == 0) { 2952 /* jsr */ 2953 gen_push(s, tcg_const_i32(s->pc)); 2954 } 2955 gen_jmp(s, tmp); 2956 } 2957 2958 DISAS_INSN(addsubq) 2959 { 2960 TCGv src; 2961 TCGv dest; 2962 TCGv val; 2963 int imm; 2964 TCGv addr; 2965 int opsize; 2966 2967 if ((insn & 070) == 010) { 2968 /* Operation on address register is always long. */ 2969 opsize = OS_LONG; 2970 } else { 2971 opsize = insn_opsize(insn); 2972 } 2973 SRC_EA(env, src, opsize, 1, &addr); 2974 imm = (insn >> 9) & 7; 2975 if (imm == 0) { 2976 imm = 8; 2977 } 2978 val = tcg_const_i32(imm); 2979 dest = tcg_temp_new(); 2980 tcg_gen_mov_i32(dest, src); 2981 if ((insn & 0x38) == 0x08) { 2982 /* Don't update condition codes if the destination is an 2983 address register. */ 2984 if (insn & 0x0100) { 2985 tcg_gen_sub_i32(dest, dest, val); 2986 } else { 2987 tcg_gen_add_i32(dest, dest, val); 2988 } 2989 } else { 2990 if (insn & 0x0100) { 2991 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val); 2992 tcg_gen_sub_i32(dest, dest, val); 2993 set_cc_op(s, CC_OP_SUBB + opsize); 2994 } else { 2995 tcg_gen_add_i32(dest, dest, val); 2996 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val); 2997 set_cc_op(s, CC_OP_ADDB + opsize); 2998 } 2999 gen_update_cc_add(dest, val, opsize); 3000 } 3001 tcg_temp_free(val); 3002 DEST_EA(env, insn, opsize, dest, &addr); 3003 tcg_temp_free(dest); 3004 } 3005 3006 DISAS_INSN(tpf) 3007 { 3008 switch (insn & 7) { 3009 case 2: /* One extension word. */ 3010 s->pc += 2; 3011 break; 3012 case 3: /* Two extension words. */ 3013 s->pc += 4; 3014 break; 3015 case 4: /* No extension words. */ 3016 break; 3017 default: 3018 disas_undef(env, s, insn); 3019 } 3020 } 3021 3022 DISAS_INSN(branch) 3023 { 3024 int32_t offset; 3025 uint32_t base; 3026 int op; 3027 TCGLabel *l1; 3028 3029 base = s->pc; 3030 op = (insn >> 8) & 0xf; 3031 offset = (int8_t)insn; 3032 if (offset == 0) { 3033 offset = (int16_t)read_im16(env, s); 3034 } else if (offset == -1) { 3035 offset = read_im32(env, s); 3036 } 3037 if (op == 1) { 3038 /* bsr */ 3039 gen_push(s, tcg_const_i32(s->pc)); 3040 } 3041 if (op > 1) { 3042 /* Bcc */ 3043 l1 = gen_new_label(); 3044 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1); 3045 gen_jmp_tb(s, 1, base + offset); 3046 gen_set_label(l1); 3047 gen_jmp_tb(s, 0, s->pc); 3048 } else { 3049 /* Unconditional branch. */ 3050 update_cc_op(s); 3051 gen_jmp_tb(s, 0, base + offset); 3052 } 3053 } 3054 3055 DISAS_INSN(moveq) 3056 { 3057 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn); 3058 gen_logic_cc(s, DREG(insn, 9), OS_LONG); 3059 } 3060 3061 DISAS_INSN(mvzs) 3062 { 3063 int opsize; 3064 TCGv src; 3065 TCGv reg; 3066 3067 if (insn & 0x40) 3068 opsize = OS_WORD; 3069 else 3070 opsize = OS_BYTE; 3071 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL); 3072 reg = DREG(insn, 9); 3073 tcg_gen_mov_i32(reg, src); 3074 gen_logic_cc(s, src, opsize); 3075 } 3076 3077 DISAS_INSN(or) 3078 { 3079 TCGv reg; 3080 TCGv dest; 3081 TCGv src; 3082 TCGv addr; 3083 int opsize; 3084 3085 opsize = insn_opsize(insn); 3086 reg = gen_extend(s, DREG(insn, 9), opsize, 0); 3087 dest = tcg_temp_new(); 3088 if (insn & 0x100) { 3089 SRC_EA(env, src, opsize, 0, &addr); 3090 tcg_gen_or_i32(dest, src, reg); 3091 DEST_EA(env, insn, opsize, dest, &addr); 3092 } else { 3093 SRC_EA(env, src, opsize, 0, NULL); 3094 tcg_gen_or_i32(dest, src, reg); 3095 gen_partset_reg(opsize, DREG(insn, 9), dest); 3096 } 3097 gen_logic_cc(s, dest, opsize); 3098 tcg_temp_free(dest); 3099 } 3100 3101 DISAS_INSN(suba) 3102 { 3103 TCGv src; 3104 TCGv reg; 3105 3106 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL); 3107 reg = AREG(insn, 9); 3108 tcg_gen_sub_i32(reg, reg, src); 3109 } 3110 3111 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize) 3112 { 3113 TCGv tmp; 3114 3115 gen_flush_flags(s); /* compute old Z */ 3116 3117 /* Perform substract with borrow. 3118 * (X, N) = dest - (src + X); 3119 */ 3120 3121 tmp = tcg_const_i32(0); 3122 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp); 3123 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X); 3124 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); 3125 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1); 3126 3127 /* Compute signed-overflow for substract. */ 3128 3129 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest); 3130 tcg_gen_xor_i32(tmp, dest, src); 3131 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp); 3132 tcg_temp_free(tmp); 3133 3134 /* Copy the rest of the results into place. */ 3135 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ 3136 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X); 3137 3138 set_cc_op(s, CC_OP_FLAGS); 3139 3140 /* result is in QREG_CC_N */ 3141 } 3142 3143 DISAS_INSN(subx_reg) 3144 { 3145 TCGv dest; 3146 TCGv src; 3147 int opsize; 3148 3149 opsize = insn_opsize(insn); 3150 3151 src = gen_extend(s, DREG(insn, 0), opsize, 1); 3152 dest = gen_extend(s, DREG(insn, 9), opsize, 1); 3153 3154 gen_subx(s, src, dest, opsize); 3155 3156 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N); 3157 } 3158 3159 DISAS_INSN(subx_mem) 3160 { 3161 TCGv src; 3162 TCGv addr_src; 3163 TCGv dest; 3164 TCGv addr_dest; 3165 int opsize; 3166 3167 opsize = insn_opsize(insn); 3168 3169 addr_src = AREG(insn, 0); 3170 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize)); 3171 src = gen_load(s, opsize, addr_src, 1, IS_USER(s)); 3172 3173 addr_dest = AREG(insn, 9); 3174 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize)); 3175 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s)); 3176 3177 gen_subx(s, src, dest, opsize); 3178 3179 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); 3180 3181 tcg_temp_free(dest); 3182 tcg_temp_free(src); 3183 } 3184 3185 DISAS_INSN(mov3q) 3186 { 3187 TCGv src; 3188 int val; 3189 3190 val = (insn >> 9) & 7; 3191 if (val == 0) 3192 val = -1; 3193 src = tcg_const_i32(val); 3194 gen_logic_cc(s, src, OS_LONG); 3195 DEST_EA(env, insn, OS_LONG, src, NULL); 3196 tcg_temp_free(src); 3197 } 3198 3199 DISAS_INSN(cmp) 3200 { 3201 TCGv src; 3202 TCGv reg; 3203 int opsize; 3204 3205 opsize = insn_opsize(insn); 3206 SRC_EA(env, src, opsize, 1, NULL); 3207 reg = gen_extend(s, DREG(insn, 9), opsize, 1); 3208 gen_update_cc_cmp(s, reg, src, opsize); 3209 } 3210 3211 DISAS_INSN(cmpa) 3212 { 3213 int opsize; 3214 TCGv src; 3215 TCGv reg; 3216 3217 if (insn & 0x100) { 3218 opsize = OS_LONG; 3219 } else { 3220 opsize = OS_WORD; 3221 } 3222 SRC_EA(env, src, opsize, 1, NULL); 3223 reg = AREG(insn, 9); 3224 gen_update_cc_cmp(s, reg, src, OS_LONG); 3225 } 3226 3227 DISAS_INSN(cmpm) 3228 { 3229 int opsize = insn_opsize(insn); 3230 TCGv src, dst; 3231 3232 /* Post-increment load (mode 3) from Ay. */ 3233 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize, 3234 NULL_QREG, NULL, EA_LOADS, IS_USER(s)); 3235 /* Post-increment load (mode 3) from Ax. */ 3236 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize, 3237 NULL_QREG, NULL, EA_LOADS, IS_USER(s)); 3238 3239 gen_update_cc_cmp(s, dst, src, opsize); 3240 } 3241 3242 DISAS_INSN(eor) 3243 { 3244 TCGv src; 3245 TCGv dest; 3246 TCGv addr; 3247 int opsize; 3248 3249 opsize = insn_opsize(insn); 3250 3251 SRC_EA(env, src, opsize, 0, &addr); 3252 dest = tcg_temp_new(); 3253 tcg_gen_xor_i32(dest, src, DREG(insn, 9)); 3254 gen_logic_cc(s, dest, opsize); 3255 DEST_EA(env, insn, opsize, dest, &addr); 3256 tcg_temp_free(dest); 3257 } 3258 3259 static void do_exg(TCGv reg1, TCGv reg2) 3260 { 3261 TCGv temp = tcg_temp_new(); 3262 tcg_gen_mov_i32(temp, reg1); 3263 tcg_gen_mov_i32(reg1, reg2); 3264 tcg_gen_mov_i32(reg2, temp); 3265 tcg_temp_free(temp); 3266 } 3267 3268 DISAS_INSN(exg_dd) 3269 { 3270 /* exchange Dx and Dy */ 3271 do_exg(DREG(insn, 9), DREG(insn, 0)); 3272 } 3273 3274 DISAS_INSN(exg_aa) 3275 { 3276 /* exchange Ax and Ay */ 3277 do_exg(AREG(insn, 9), AREG(insn, 0)); 3278 } 3279 3280 DISAS_INSN(exg_da) 3281 { 3282 /* exchange Dx and Ay */ 3283 do_exg(DREG(insn, 9), AREG(insn, 0)); 3284 } 3285 3286 DISAS_INSN(and) 3287 { 3288 TCGv src; 3289 TCGv reg; 3290 TCGv dest; 3291 TCGv addr; 3292 int opsize; 3293 3294 dest = tcg_temp_new(); 3295 3296 opsize = insn_opsize(insn); 3297 reg = DREG(insn, 9); 3298 if (insn & 0x100) { 3299 SRC_EA(env, src, opsize, 0, &addr); 3300 tcg_gen_and_i32(dest, src, reg); 3301 DEST_EA(env, insn, opsize, dest, &addr); 3302 } else { 3303 SRC_EA(env, src, opsize, 0, NULL); 3304 tcg_gen_and_i32(dest, src, reg); 3305 gen_partset_reg(opsize, reg, dest); 3306 } 3307 gen_logic_cc(s, dest, opsize); 3308 tcg_temp_free(dest); 3309 } 3310 3311 DISAS_INSN(adda) 3312 { 3313 TCGv src; 3314 TCGv reg; 3315 3316 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL); 3317 reg = AREG(insn, 9); 3318 tcg_gen_add_i32(reg, reg, src); 3319 } 3320 3321 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize) 3322 { 3323 TCGv tmp; 3324 3325 gen_flush_flags(s); /* compute old Z */ 3326 3327 /* Perform addition with carry. 3328 * (X, N) = src + dest + X; 3329 */ 3330 3331 tmp = tcg_const_i32(0); 3332 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp); 3333 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp); 3334 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); 3335 3336 /* Compute signed-overflow for addition. */ 3337 3338 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src); 3339 tcg_gen_xor_i32(tmp, dest, src); 3340 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp); 3341 tcg_temp_free(tmp); 3342 3343 /* Copy the rest of the results into place. */ 3344 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */ 3345 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X); 3346 3347 set_cc_op(s, CC_OP_FLAGS); 3348 3349 /* result is in QREG_CC_N */ 3350 } 3351 3352 DISAS_INSN(addx_reg) 3353 { 3354 TCGv dest; 3355 TCGv src; 3356 int opsize; 3357 3358 opsize = insn_opsize(insn); 3359 3360 dest = gen_extend(s, DREG(insn, 9), opsize, 1); 3361 src = gen_extend(s, DREG(insn, 0), opsize, 1); 3362 3363 gen_addx(s, src, dest, opsize); 3364 3365 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N); 3366 } 3367 3368 DISAS_INSN(addx_mem) 3369 { 3370 TCGv src; 3371 TCGv addr_src; 3372 TCGv dest; 3373 TCGv addr_dest; 3374 int opsize; 3375 3376 opsize = insn_opsize(insn); 3377 3378 addr_src = AREG(insn, 0); 3379 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize)); 3380 src = gen_load(s, opsize, addr_src, 1, IS_USER(s)); 3381 3382 addr_dest = AREG(insn, 9); 3383 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize)); 3384 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s)); 3385 3386 gen_addx(s, src, dest, opsize); 3387 3388 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s)); 3389 3390 tcg_temp_free(dest); 3391 tcg_temp_free(src); 3392 } 3393 3394 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize) 3395 { 3396 int count = (insn >> 9) & 7; 3397 int logical = insn & 8; 3398 int left = insn & 0x100; 3399 int bits = opsize_bytes(opsize) * 8; 3400 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical); 3401 3402 if (count == 0) { 3403 count = 8; 3404 } 3405 3406 tcg_gen_movi_i32(QREG_CC_V, 0); 3407 if (left) { 3408 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count); 3409 tcg_gen_shli_i32(QREG_CC_N, reg, count); 3410 3411 /* Note that ColdFire always clears V (done above), 3412 while M68000 sets if the most significant bit is changed at 3413 any time during the shift operation */ 3414 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { 3415 /* if shift count >= bits, V is (reg != 0) */ 3416 if (count >= bits) { 3417 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V); 3418 } else { 3419 TCGv t0 = tcg_temp_new(); 3420 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1); 3421 tcg_gen_sari_i32(t0, reg, bits - count - 1); 3422 tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0); 3423 tcg_temp_free(t0); 3424 } 3425 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V); 3426 } 3427 } else { 3428 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1); 3429 if (logical) { 3430 tcg_gen_shri_i32(QREG_CC_N, reg, count); 3431 } else { 3432 tcg_gen_sari_i32(QREG_CC_N, reg, count); 3433 } 3434 } 3435 3436 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); 3437 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1); 3438 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); 3439 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C); 3440 3441 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N); 3442 set_cc_op(s, CC_OP_FLAGS); 3443 } 3444 3445 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize) 3446 { 3447 int logical = insn & 8; 3448 int left = insn & 0x100; 3449 int bits = opsize_bytes(opsize) * 8; 3450 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical); 3451 TCGv s32; 3452 TCGv_i64 t64, s64; 3453 3454 t64 = tcg_temp_new_i64(); 3455 s64 = tcg_temp_new_i64(); 3456 s32 = tcg_temp_new(); 3457 3458 /* Note that m68k truncates the shift count modulo 64, not 32. 3459 In addition, a 64-bit shift makes it easy to find "the last 3460 bit shifted out", for the carry flag. */ 3461 tcg_gen_andi_i32(s32, DREG(insn, 9), 63); 3462 tcg_gen_extu_i32_i64(s64, s32); 3463 tcg_gen_extu_i32_i64(t64, reg); 3464 3465 /* Optimistically set V=0. Also used as a zero source below. */ 3466 tcg_gen_movi_i32(QREG_CC_V, 0); 3467 if (left) { 3468 tcg_gen_shl_i64(t64, t64, s64); 3469 3470 if (opsize == OS_LONG) { 3471 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64); 3472 /* Note that C=0 if shift count is 0, and we get that for free. */ 3473 } else { 3474 TCGv zero = tcg_const_i32(0); 3475 tcg_gen_extrl_i64_i32(QREG_CC_N, t64); 3476 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits); 3477 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C, 3478 s32, zero, zero, QREG_CC_C); 3479 tcg_temp_free(zero); 3480 } 3481 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1); 3482 3483 /* X = C, but only if the shift count was non-zero. */ 3484 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V, 3485 QREG_CC_C, QREG_CC_X); 3486 3487 /* M68000 sets V if the most significant bit is changed at 3488 * any time during the shift operation. Do this via creating 3489 * an extension of the sign bit, comparing, and discarding 3490 * the bits below the sign bit. I.e. 3491 * int64_t s = (intN_t)reg; 3492 * int64_t t = (int64_t)(intN_t)reg << count; 3493 * V = ((s ^ t) & (-1 << (bits - 1))) != 0 3494 */ 3495 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { 3496 TCGv_i64 tt = tcg_const_i64(32); 3497 /* if shift is greater than 32, use 32 */ 3498 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64); 3499 tcg_temp_free_i64(tt); 3500 /* Sign extend the input to 64 bits; re-do the shift. */ 3501 tcg_gen_ext_i32_i64(t64, reg); 3502 tcg_gen_shl_i64(s64, t64, s64); 3503 /* Clear all bits that are unchanged. */ 3504 tcg_gen_xor_i64(t64, t64, s64); 3505 /* Ignore the bits below the sign bit. */ 3506 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1)); 3507 /* If any bits remain set, we have overflow. */ 3508 tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0); 3509 tcg_gen_extrl_i64_i32(QREG_CC_V, t64); 3510 tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V); 3511 } 3512 } else { 3513 tcg_gen_shli_i64(t64, t64, 32); 3514 if (logical) { 3515 tcg_gen_shr_i64(t64, t64, s64); 3516 } else { 3517 tcg_gen_sar_i64(t64, t64, s64); 3518 } 3519 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64); 3520 3521 /* Note that C=0 if shift count is 0, and we get that for free. */ 3522 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31); 3523 3524 /* X = C, but only if the shift count was non-zero. */ 3525 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V, 3526 QREG_CC_C, QREG_CC_X); 3527 } 3528 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1); 3529 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); 3530 3531 tcg_temp_free(s32); 3532 tcg_temp_free_i64(s64); 3533 tcg_temp_free_i64(t64); 3534 3535 /* Write back the result. */ 3536 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N); 3537 set_cc_op(s, CC_OP_FLAGS); 3538 } 3539 3540 DISAS_INSN(shift8_im) 3541 { 3542 shift_im(s, insn, OS_BYTE); 3543 } 3544 3545 DISAS_INSN(shift16_im) 3546 { 3547 shift_im(s, insn, OS_WORD); 3548 } 3549 3550 DISAS_INSN(shift_im) 3551 { 3552 shift_im(s, insn, OS_LONG); 3553 } 3554 3555 DISAS_INSN(shift8_reg) 3556 { 3557 shift_reg(s, insn, OS_BYTE); 3558 } 3559 3560 DISAS_INSN(shift16_reg) 3561 { 3562 shift_reg(s, insn, OS_WORD); 3563 } 3564 3565 DISAS_INSN(shift_reg) 3566 { 3567 shift_reg(s, insn, OS_LONG); 3568 } 3569 3570 DISAS_INSN(shift_mem) 3571 { 3572 int logical = insn & 8; 3573 int left = insn & 0x100; 3574 TCGv src; 3575 TCGv addr; 3576 3577 SRC_EA(env, src, OS_WORD, !logical, &addr); 3578 tcg_gen_movi_i32(QREG_CC_V, 0); 3579 if (left) { 3580 tcg_gen_shri_i32(QREG_CC_C, src, 15); 3581 tcg_gen_shli_i32(QREG_CC_N, src, 1); 3582 3583 /* Note that ColdFire always clears V, 3584 while M68000 sets if the most significant bit is changed at 3585 any time during the shift operation */ 3586 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) { 3587 src = gen_extend(s, src, OS_WORD, 1); 3588 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src); 3589 } 3590 } else { 3591 tcg_gen_mov_i32(QREG_CC_C, src); 3592 if (logical) { 3593 tcg_gen_shri_i32(QREG_CC_N, src, 1); 3594 } else { 3595 tcg_gen_sari_i32(QREG_CC_N, src, 1); 3596 } 3597 } 3598 3599 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1); 3600 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1); 3601 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N); 3602 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C); 3603 3604 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr); 3605 set_cc_op(s, CC_OP_FLAGS); 3606 } 3607 3608 static void rotate(TCGv reg, TCGv shift, int left, int size) 3609 { 3610 switch (size) { 3611 case 8: 3612 /* Replicate the 8-bit input so that a 32-bit rotate works. */ 3613 tcg_gen_ext8u_i32(reg, reg); 3614 tcg_gen_muli_i32(reg, reg, 0x01010101); 3615 goto do_long; 3616 case 16: 3617 /* Replicate the 16-bit input so that a 32-bit rotate works. */ 3618 tcg_gen_deposit_i32(reg, reg, reg, 16, 16); 3619 goto do_long; 3620 do_long: 3621 default: 3622 if (left) { 3623 tcg_gen_rotl_i32(reg, reg, shift); 3624 } else { 3625 tcg_gen_rotr_i32(reg, reg, shift); 3626 } 3627 } 3628 3629 /* compute flags */ 3630 3631 switch (size) { 3632 case 8: 3633 tcg_gen_ext8s_i32(reg, reg); 3634 break; 3635 case 16: 3636 tcg_gen_ext16s_i32(reg, reg); 3637 break; 3638 default: 3639 break; 3640 } 3641 3642 /* QREG_CC_X is not affected */ 3643 3644 tcg_gen_mov_i32(QREG_CC_N, reg); 3645 tcg_gen_mov_i32(QREG_CC_Z, reg); 3646 3647 if (left) { 3648 tcg_gen_andi_i32(QREG_CC_C, reg, 1); 3649 } else { 3650 tcg_gen_shri_i32(QREG_CC_C, reg, 31); 3651 } 3652 3653 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */ 3654 } 3655 3656 static void rotate_x_flags(TCGv reg, TCGv X, int size) 3657 { 3658 switch (size) { 3659 case 8: 3660 tcg_gen_ext8s_i32(reg, reg); 3661 break; 3662 case 16: 3663 tcg_gen_ext16s_i32(reg, reg); 3664 break; 3665 default: 3666 break; 3667 } 3668 tcg_gen_mov_i32(QREG_CC_N, reg); 3669 tcg_gen_mov_i32(QREG_CC_Z, reg); 3670 tcg_gen_mov_i32(QREG_CC_X, X); 3671 tcg_gen_mov_i32(QREG_CC_C, X); 3672 tcg_gen_movi_i32(QREG_CC_V, 0); 3673 } 3674 3675 /* Result of rotate_x() is valid if 0 <= shift <= size */ 3676 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size) 3677 { 3678 TCGv X, shl, shr, shx, sz, zero; 3679 3680 sz = tcg_const_i32(size); 3681 3682 shr = tcg_temp_new(); 3683 shl = tcg_temp_new(); 3684 shx = tcg_temp_new(); 3685 if (left) { 3686 tcg_gen_mov_i32(shl, shift); /* shl = shift */ 3687 tcg_gen_movi_i32(shr, size + 1); 3688 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */ 3689 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */ 3690 /* shx = shx < 0 ? size : shx; */ 3691 zero = tcg_const_i32(0); 3692 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx); 3693 tcg_temp_free(zero); 3694 } else { 3695 tcg_gen_mov_i32(shr, shift); /* shr = shift */ 3696 tcg_gen_movi_i32(shl, size + 1); 3697 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */ 3698 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */ 3699 } 3700 3701 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */ 3702 3703 tcg_gen_shl_i32(shl, reg, shl); 3704 tcg_gen_shr_i32(shr, reg, shr); 3705 tcg_gen_or_i32(reg, shl, shr); 3706 tcg_temp_free(shl); 3707 tcg_temp_free(shr); 3708 tcg_gen_shl_i32(shx, QREG_CC_X, shx); 3709 tcg_gen_or_i32(reg, reg, shx); 3710 tcg_temp_free(shx); 3711 3712 /* X = (reg >> size) & 1 */ 3713 3714 X = tcg_temp_new(); 3715 tcg_gen_shr_i32(X, reg, sz); 3716 tcg_gen_andi_i32(X, X, 1); 3717 tcg_temp_free(sz); 3718 3719 return X; 3720 } 3721 3722 /* Result of rotate32_x() is valid if 0 <= shift < 33 */ 3723 static TCGv rotate32_x(TCGv reg, TCGv shift, int left) 3724 { 3725 TCGv_i64 t0, shift64; 3726 TCGv X, lo, hi, zero; 3727 3728 shift64 = tcg_temp_new_i64(); 3729 tcg_gen_extu_i32_i64(shift64, shift); 3730 3731 t0 = tcg_temp_new_i64(); 3732 3733 X = tcg_temp_new(); 3734 lo = tcg_temp_new(); 3735 hi = tcg_temp_new(); 3736 3737 if (left) { 3738 /* create [reg:X:..] */ 3739 3740 tcg_gen_shli_i32(lo, QREG_CC_X, 31); 3741 tcg_gen_concat_i32_i64(t0, lo, reg); 3742 3743 /* rotate */ 3744 3745 tcg_gen_rotl_i64(t0, t0, shift64); 3746 tcg_temp_free_i64(shift64); 3747 3748 /* result is [reg:..:reg:X] */ 3749 3750 tcg_gen_extr_i64_i32(lo, hi, t0); 3751 tcg_gen_andi_i32(X, lo, 1); 3752 3753 tcg_gen_shri_i32(lo, lo, 1); 3754 } else { 3755 /* create [..:X:reg] */ 3756 3757 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X); 3758 3759 tcg_gen_rotr_i64(t0, t0, shift64); 3760 tcg_temp_free_i64(shift64); 3761 3762 /* result is value: [X:reg:..:reg] */ 3763 3764 tcg_gen_extr_i64_i32(lo, hi, t0); 3765 3766 /* extract X */ 3767 3768 tcg_gen_shri_i32(X, hi, 31); 3769 3770 /* extract result */ 3771 3772 tcg_gen_shli_i32(hi, hi, 1); 3773 } 3774 tcg_temp_free_i64(t0); 3775 tcg_gen_or_i32(lo, lo, hi); 3776 tcg_temp_free(hi); 3777 3778 /* if shift == 0, register and X are not affected */ 3779 3780 zero = tcg_const_i32(0); 3781 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X); 3782 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo); 3783 tcg_temp_free(zero); 3784 tcg_temp_free(lo); 3785 3786 return X; 3787 } 3788 3789 DISAS_INSN(rotate_im) 3790 { 3791 TCGv shift; 3792 int tmp; 3793 int left = (insn & 0x100); 3794 3795 tmp = (insn >> 9) & 7; 3796 if (tmp == 0) { 3797 tmp = 8; 3798 } 3799 3800 shift = tcg_const_i32(tmp); 3801 if (insn & 8) { 3802 rotate(DREG(insn, 0), shift, left, 32); 3803 } else { 3804 TCGv X = rotate32_x(DREG(insn, 0), shift, left); 3805 rotate_x_flags(DREG(insn, 0), X, 32); 3806 tcg_temp_free(X); 3807 } 3808 tcg_temp_free(shift); 3809 3810 set_cc_op(s, CC_OP_FLAGS); 3811 } 3812 3813 DISAS_INSN(rotate8_im) 3814 { 3815 int left = (insn & 0x100); 3816 TCGv reg; 3817 TCGv shift; 3818 int tmp; 3819 3820 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); 3821 3822 tmp = (insn >> 9) & 7; 3823 if (tmp == 0) { 3824 tmp = 8; 3825 } 3826 3827 shift = tcg_const_i32(tmp); 3828 if (insn & 8) { 3829 rotate(reg, shift, left, 8); 3830 } else { 3831 TCGv X = rotate_x(reg, shift, left, 8); 3832 rotate_x_flags(reg, X, 8); 3833 tcg_temp_free(X); 3834 } 3835 tcg_temp_free(shift); 3836 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg); 3837 set_cc_op(s, CC_OP_FLAGS); 3838 } 3839 3840 DISAS_INSN(rotate16_im) 3841 { 3842 int left = (insn & 0x100); 3843 TCGv reg; 3844 TCGv shift; 3845 int tmp; 3846 3847 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0); 3848 tmp = (insn >> 9) & 7; 3849 if (tmp == 0) { 3850 tmp = 8; 3851 } 3852 3853 shift = tcg_const_i32(tmp); 3854 if (insn & 8) { 3855 rotate(reg, shift, left, 16); 3856 } else { 3857 TCGv X = rotate_x(reg, shift, left, 16); 3858 rotate_x_flags(reg, X, 16); 3859 tcg_temp_free(X); 3860 } 3861 tcg_temp_free(shift); 3862 gen_partset_reg(OS_WORD, DREG(insn, 0), reg); 3863 set_cc_op(s, CC_OP_FLAGS); 3864 } 3865 3866 DISAS_INSN(rotate_reg) 3867 { 3868 TCGv reg; 3869 TCGv src; 3870 TCGv t0, t1; 3871 int left = (insn & 0x100); 3872 3873 reg = DREG(insn, 0); 3874 src = DREG(insn, 9); 3875 /* shift in [0..63] */ 3876 t0 = tcg_temp_new(); 3877 tcg_gen_andi_i32(t0, src, 63); 3878 t1 = tcg_temp_new_i32(); 3879 if (insn & 8) { 3880 tcg_gen_andi_i32(t1, src, 31); 3881 rotate(reg, t1, left, 32); 3882 /* if shift == 0, clear C */ 3883 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C, 3884 t0, QREG_CC_V /* 0 */, 3885 QREG_CC_V /* 0 */, QREG_CC_C); 3886 } else { 3887 TCGv X; 3888 /* modulo 33 */ 3889 tcg_gen_movi_i32(t1, 33); 3890 tcg_gen_remu_i32(t1, t0, t1); 3891 X = rotate32_x(DREG(insn, 0), t1, left); 3892 rotate_x_flags(DREG(insn, 0), X, 32); 3893 tcg_temp_free(X); 3894 } 3895 tcg_temp_free(t1); 3896 tcg_temp_free(t0); 3897 set_cc_op(s, CC_OP_FLAGS); 3898 } 3899 3900 DISAS_INSN(rotate8_reg) 3901 { 3902 TCGv reg; 3903 TCGv src; 3904 TCGv t0, t1; 3905 int left = (insn & 0x100); 3906 3907 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0); 3908 src = DREG(insn, 9); 3909 /* shift in [0..63] */ 3910 t0 = tcg_temp_new_i32(); 3911 tcg_gen_andi_i32(t0, src, 63); 3912 t1 = tcg_temp_new_i32(); 3913 if (insn & 8) { 3914 tcg_gen_andi_i32(t1, src, 7); 3915 rotate(reg, t1, left, 8); 3916 /* if shift == 0, clear C */ 3917 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C, 3918 t0, QREG_CC_V /* 0 */, 3919 QREG_CC_V /* 0 */, QREG_CC_C); 3920 } else { 3921 TCGv X; 3922 /* modulo 9 */ 3923 tcg_gen_movi_i32(t1, 9); 3924 tcg_gen_remu_i32(t1, t0, t1); 3925 X = rotate_x(reg, t1, left, 8); 3926 rotate_x_flags(reg, X, 8); 3927 tcg_temp_free(X); 3928 } 3929 tcg_temp_free(t1); 3930 tcg_temp_free(t0); 3931 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg); 3932 set_cc_op(s, CC_OP_FLAGS); 3933 } 3934 3935 DISAS_INSN(rotate16_reg) 3936 { 3937 TCGv reg; 3938 TCGv src; 3939 TCGv t0, t1; 3940 int left = (insn & 0x100); 3941 3942 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0); 3943 src = DREG(insn, 9); 3944 /* shift in [0..63] */ 3945 t0 = tcg_temp_new_i32(); 3946 tcg_gen_andi_i32(t0, src, 63); 3947 t1 = tcg_temp_new_i32(); 3948 if (insn & 8) { 3949 tcg_gen_andi_i32(t1, src, 15); 3950 rotate(reg, t1, left, 16); 3951 /* if shift == 0, clear C */ 3952 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C, 3953 t0, QREG_CC_V /* 0 */, 3954 QREG_CC_V /* 0 */, QREG_CC_C); 3955 } else { 3956 TCGv X; 3957 /* modulo 17 */ 3958 tcg_gen_movi_i32(t1, 17); 3959 tcg_gen_remu_i32(t1, t0, t1); 3960 X = rotate_x(reg, t1, left, 16); 3961 rotate_x_flags(reg, X, 16); 3962 tcg_temp_free(X); 3963 } 3964 tcg_temp_free(t1); 3965 tcg_temp_free(t0); 3966 gen_partset_reg(OS_WORD, DREG(insn, 0), reg); 3967 set_cc_op(s, CC_OP_FLAGS); 3968 } 3969 3970 DISAS_INSN(rotate_mem) 3971 { 3972 TCGv src; 3973 TCGv addr; 3974 TCGv shift; 3975 int left = (insn & 0x100); 3976 3977 SRC_EA(env, src, OS_WORD, 0, &addr); 3978 3979 shift = tcg_const_i32(1); 3980 if (insn & 0x0200) { 3981 rotate(src, shift, left, 16); 3982 } else { 3983 TCGv X = rotate_x(src, shift, left, 16); 3984 rotate_x_flags(src, X, 16); 3985 tcg_temp_free(X); 3986 } 3987 tcg_temp_free(shift); 3988 DEST_EA(env, insn, OS_WORD, src, &addr); 3989 set_cc_op(s, CC_OP_FLAGS); 3990 } 3991 3992 DISAS_INSN(bfext_reg) 3993 { 3994 int ext = read_im16(env, s); 3995 int is_sign = insn & 0x200; 3996 TCGv src = DREG(insn, 0); 3997 TCGv dst = DREG(ext, 12); 3998 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; 3999 int ofs = extract32(ext, 6, 5); /* big bit-endian */ 4000 int pos = 32 - ofs - len; /* little bit-endian */ 4001 TCGv tmp = tcg_temp_new(); 4002 TCGv shift; 4003 4004 /* In general, we're going to rotate the field so that it's at the 4005 top of the word and then right-shift by the complement of the 4006 width to extend the field. */ 4007 if (ext & 0x20) { 4008 /* Variable width. */ 4009 if (ext & 0x800) { 4010 /* Variable offset. */ 4011 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31); 4012 tcg_gen_rotl_i32(tmp, src, tmp); 4013 } else { 4014 tcg_gen_rotli_i32(tmp, src, ofs); 4015 } 4016 4017 shift = tcg_temp_new(); 4018 tcg_gen_neg_i32(shift, DREG(ext, 0)); 4019 tcg_gen_andi_i32(shift, shift, 31); 4020 tcg_gen_sar_i32(QREG_CC_N, tmp, shift); 4021 if (is_sign) { 4022 tcg_gen_mov_i32(dst, QREG_CC_N); 4023 } else { 4024 tcg_gen_shr_i32(dst, tmp, shift); 4025 } 4026 tcg_temp_free(shift); 4027 } else { 4028 /* Immediate width. */ 4029 if (ext & 0x800) { 4030 /* Variable offset */ 4031 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31); 4032 tcg_gen_rotl_i32(tmp, src, tmp); 4033 src = tmp; 4034 pos = 32 - len; 4035 } else { 4036 /* Immediate offset. If the field doesn't wrap around the 4037 end of the word, rely on (s)extract completely. */ 4038 if (pos < 0) { 4039 tcg_gen_rotli_i32(tmp, src, ofs); 4040 src = tmp; 4041 pos = 32 - len; 4042 } 4043 } 4044 4045 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len); 4046 if (is_sign) { 4047 tcg_gen_mov_i32(dst, QREG_CC_N); 4048 } else { 4049 tcg_gen_extract_i32(dst, src, pos, len); 4050 } 4051 } 4052 4053 tcg_temp_free(tmp); 4054 set_cc_op(s, CC_OP_LOGIC); 4055 } 4056 4057 DISAS_INSN(bfext_mem) 4058 { 4059 int ext = read_im16(env, s); 4060 int is_sign = insn & 0x200; 4061 TCGv dest = DREG(ext, 12); 4062 TCGv addr, len, ofs; 4063 4064 addr = gen_lea(env, s, insn, OS_UNSIZED); 4065 if (IS_NULL_QREG(addr)) { 4066 gen_addr_fault(s); 4067 return; 4068 } 4069 4070 if (ext & 0x20) { 4071 len = DREG(ext, 0); 4072 } else { 4073 len = tcg_const_i32(extract32(ext, 0, 5)); 4074 } 4075 if (ext & 0x800) { 4076 ofs = DREG(ext, 6); 4077 } else { 4078 ofs = tcg_const_i32(extract32(ext, 6, 5)); 4079 } 4080 4081 if (is_sign) { 4082 gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len); 4083 tcg_gen_mov_i32(QREG_CC_N, dest); 4084 } else { 4085 TCGv_i64 tmp = tcg_temp_new_i64(); 4086 gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len); 4087 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp); 4088 tcg_temp_free_i64(tmp); 4089 } 4090 set_cc_op(s, CC_OP_LOGIC); 4091 4092 if (!(ext & 0x20)) { 4093 tcg_temp_free(len); 4094 } 4095 if (!(ext & 0x800)) { 4096 tcg_temp_free(ofs); 4097 } 4098 } 4099 4100 DISAS_INSN(bfop_reg) 4101 { 4102 int ext = read_im16(env, s); 4103 TCGv src = DREG(insn, 0); 4104 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; 4105 int ofs = extract32(ext, 6, 5); /* big bit-endian */ 4106 TCGv mask, tofs, tlen; 4107 4108 tofs = NULL; 4109 tlen = NULL; 4110 if ((insn & 0x0f00) == 0x0d00) { /* bfffo */ 4111 tofs = tcg_temp_new(); 4112 tlen = tcg_temp_new(); 4113 } 4114 4115 if ((ext & 0x820) == 0) { 4116 /* Immediate width and offset. */ 4117 uint32_t maski = 0x7fffffffu >> (len - 1); 4118 if (ofs + len <= 32) { 4119 tcg_gen_shli_i32(QREG_CC_N, src, ofs); 4120 } else { 4121 tcg_gen_rotli_i32(QREG_CC_N, src, ofs); 4122 } 4123 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski); 4124 mask = tcg_const_i32(ror32(maski, ofs)); 4125 if (tofs) { 4126 tcg_gen_movi_i32(tofs, ofs); 4127 tcg_gen_movi_i32(tlen, len); 4128 } 4129 } else { 4130 TCGv tmp = tcg_temp_new(); 4131 if (ext & 0x20) { 4132 /* Variable width */ 4133 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1); 4134 tcg_gen_andi_i32(tmp, tmp, 31); 4135 mask = tcg_const_i32(0x7fffffffu); 4136 tcg_gen_shr_i32(mask, mask, tmp); 4137 if (tlen) { 4138 tcg_gen_addi_i32(tlen, tmp, 1); 4139 } 4140 } else { 4141 /* Immediate width */ 4142 mask = tcg_const_i32(0x7fffffffu >> (len - 1)); 4143 if (tlen) { 4144 tcg_gen_movi_i32(tlen, len); 4145 } 4146 } 4147 if (ext & 0x800) { 4148 /* Variable offset */ 4149 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31); 4150 tcg_gen_rotl_i32(QREG_CC_N, src, tmp); 4151 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask); 4152 tcg_gen_rotr_i32(mask, mask, tmp); 4153 if (tofs) { 4154 tcg_gen_mov_i32(tofs, tmp); 4155 } 4156 } else { 4157 /* Immediate offset (and variable width) */ 4158 tcg_gen_rotli_i32(QREG_CC_N, src, ofs); 4159 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask); 4160 tcg_gen_rotri_i32(mask, mask, ofs); 4161 if (tofs) { 4162 tcg_gen_movi_i32(tofs, ofs); 4163 } 4164 } 4165 tcg_temp_free(tmp); 4166 } 4167 set_cc_op(s, CC_OP_LOGIC); 4168 4169 switch (insn & 0x0f00) { 4170 case 0x0a00: /* bfchg */ 4171 tcg_gen_eqv_i32(src, src, mask); 4172 break; 4173 case 0x0c00: /* bfclr */ 4174 tcg_gen_and_i32(src, src, mask); 4175 break; 4176 case 0x0d00: /* bfffo */ 4177 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen); 4178 tcg_temp_free(tlen); 4179 tcg_temp_free(tofs); 4180 break; 4181 case 0x0e00: /* bfset */ 4182 tcg_gen_orc_i32(src, src, mask); 4183 break; 4184 case 0x0800: /* bftst */ 4185 /* flags already set; no other work to do. */ 4186 break; 4187 default: 4188 g_assert_not_reached(); 4189 } 4190 tcg_temp_free(mask); 4191 } 4192 4193 DISAS_INSN(bfop_mem) 4194 { 4195 int ext = read_im16(env, s); 4196 TCGv addr, len, ofs; 4197 TCGv_i64 t64; 4198 4199 addr = gen_lea(env, s, insn, OS_UNSIZED); 4200 if (IS_NULL_QREG(addr)) { 4201 gen_addr_fault(s); 4202 return; 4203 } 4204 4205 if (ext & 0x20) { 4206 len = DREG(ext, 0); 4207 } else { 4208 len = tcg_const_i32(extract32(ext, 0, 5)); 4209 } 4210 if (ext & 0x800) { 4211 ofs = DREG(ext, 6); 4212 } else { 4213 ofs = tcg_const_i32(extract32(ext, 6, 5)); 4214 } 4215 4216 switch (insn & 0x0f00) { 4217 case 0x0a00: /* bfchg */ 4218 gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len); 4219 break; 4220 case 0x0c00: /* bfclr */ 4221 gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len); 4222 break; 4223 case 0x0d00: /* bfffo */ 4224 t64 = tcg_temp_new_i64(); 4225 gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len); 4226 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64); 4227 tcg_temp_free_i64(t64); 4228 break; 4229 case 0x0e00: /* bfset */ 4230 gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len); 4231 break; 4232 case 0x0800: /* bftst */ 4233 gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len); 4234 break; 4235 default: 4236 g_assert_not_reached(); 4237 } 4238 set_cc_op(s, CC_OP_LOGIC); 4239 4240 if (!(ext & 0x20)) { 4241 tcg_temp_free(len); 4242 } 4243 if (!(ext & 0x800)) { 4244 tcg_temp_free(ofs); 4245 } 4246 } 4247 4248 DISAS_INSN(bfins_reg) 4249 { 4250 int ext = read_im16(env, s); 4251 TCGv dst = DREG(insn, 0); 4252 TCGv src = DREG(ext, 12); 4253 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1; 4254 int ofs = extract32(ext, 6, 5); /* big bit-endian */ 4255 int pos = 32 - ofs - len; /* little bit-endian */ 4256 TCGv tmp; 4257 4258 tmp = tcg_temp_new(); 4259 4260 if (ext & 0x20) { 4261 /* Variable width */ 4262 tcg_gen_neg_i32(tmp, DREG(ext, 0)); 4263 tcg_gen_andi_i32(tmp, tmp, 31); 4264 tcg_gen_shl_i32(QREG_CC_N, src, tmp); 4265 } else { 4266 /* Immediate width */ 4267 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len); 4268 } 4269 set_cc_op(s, CC_OP_LOGIC); 4270 4271 /* Immediate width and offset */ 4272 if ((ext & 0x820) == 0) { 4273 /* Check for suitability for deposit. */ 4274 if (pos >= 0) { 4275 tcg_gen_deposit_i32(dst, dst, src, pos, len); 4276 } else { 4277 uint32_t maski = -2U << (len - 1); 4278 uint32_t roti = (ofs + len) & 31; 4279 tcg_gen_andi_i32(tmp, src, ~maski); 4280 tcg_gen_rotri_i32(tmp, tmp, roti); 4281 tcg_gen_andi_i32(dst, dst, ror32(maski, roti)); 4282 tcg_gen_or_i32(dst, dst, tmp); 4283 } 4284 } else { 4285 TCGv mask = tcg_temp_new(); 4286 TCGv rot = tcg_temp_new(); 4287 4288 if (ext & 0x20) { 4289 /* Variable width */ 4290 tcg_gen_subi_i32(rot, DREG(ext, 0), 1); 4291 tcg_gen_andi_i32(rot, rot, 31); 4292 tcg_gen_movi_i32(mask, -2); 4293 tcg_gen_shl_i32(mask, mask, rot); 4294 tcg_gen_mov_i32(rot, DREG(ext, 0)); 4295 tcg_gen_andc_i32(tmp, src, mask); 4296 } else { 4297 /* Immediate width (variable offset) */ 4298 uint32_t maski = -2U << (len - 1); 4299 tcg_gen_andi_i32(tmp, src, ~maski); 4300 tcg_gen_movi_i32(mask, maski); 4301 tcg_gen_movi_i32(rot, len & 31); 4302 } 4303 if (ext & 0x800) { 4304 /* Variable offset */ 4305 tcg_gen_add_i32(rot, rot, DREG(ext, 6)); 4306 } else { 4307 /* Immediate offset (variable width) */ 4308 tcg_gen_addi_i32(rot, rot, ofs); 4309 } 4310 tcg_gen_andi_i32(rot, rot, 31); 4311 tcg_gen_rotr_i32(mask, mask, rot); 4312 tcg_gen_rotr_i32(tmp, tmp, rot); 4313 tcg_gen_and_i32(dst, dst, mask); 4314 tcg_gen_or_i32(dst, dst, tmp); 4315 4316 tcg_temp_free(rot); 4317 tcg_temp_free(mask); 4318 } 4319 tcg_temp_free(tmp); 4320 } 4321 4322 DISAS_INSN(bfins_mem) 4323 { 4324 int ext = read_im16(env, s); 4325 TCGv src = DREG(ext, 12); 4326 TCGv addr, len, ofs; 4327 4328 addr = gen_lea(env, s, insn, OS_UNSIZED); 4329 if (IS_NULL_QREG(addr)) { 4330 gen_addr_fault(s); 4331 return; 4332 } 4333 4334 if (ext & 0x20) { 4335 len = DREG(ext, 0); 4336 } else { 4337 len = tcg_const_i32(extract32(ext, 0, 5)); 4338 } 4339 if (ext & 0x800) { 4340 ofs = DREG(ext, 6); 4341 } else { 4342 ofs = tcg_const_i32(extract32(ext, 6, 5)); 4343 } 4344 4345 gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len); 4346 set_cc_op(s, CC_OP_LOGIC); 4347 4348 if (!(ext & 0x20)) { 4349 tcg_temp_free(len); 4350 } 4351 if (!(ext & 0x800)) { 4352 tcg_temp_free(ofs); 4353 } 4354 } 4355 4356 DISAS_INSN(ff1) 4357 { 4358 TCGv reg; 4359 reg = DREG(insn, 0); 4360 gen_logic_cc(s, reg, OS_LONG); 4361 gen_helper_ff1(reg, reg); 4362 } 4363 4364 DISAS_INSN(chk) 4365 { 4366 TCGv src, reg; 4367 int opsize; 4368 4369 switch ((insn >> 7) & 3) { 4370 case 3: 4371 opsize = OS_WORD; 4372 break; 4373 case 2: 4374 if (m68k_feature(env, M68K_FEATURE_CHK2)) { 4375 opsize = OS_LONG; 4376 break; 4377 } 4378 /* fallthru */ 4379 default: 4380 gen_exception(s, s->insn_pc, EXCP_ILLEGAL); 4381 return; 4382 } 4383 SRC_EA(env, src, opsize, 1, NULL); 4384 reg = gen_extend(s, DREG(insn, 9), opsize, 1); 4385 4386 gen_flush_flags(s); 4387 gen_helper_chk(cpu_env, reg, src); 4388 } 4389 4390 DISAS_INSN(chk2) 4391 { 4392 uint16_t ext; 4393 TCGv addr1, addr2, bound1, bound2, reg; 4394 int opsize; 4395 4396 switch ((insn >> 9) & 3) { 4397 case 0: 4398 opsize = OS_BYTE; 4399 break; 4400 case 1: 4401 opsize = OS_WORD; 4402 break; 4403 case 2: 4404 opsize = OS_LONG; 4405 break; 4406 default: 4407 gen_exception(s, s->insn_pc, EXCP_ILLEGAL); 4408 return; 4409 } 4410 4411 ext = read_im16(env, s); 4412 if ((ext & 0x0800) == 0) { 4413 gen_exception(s, s->insn_pc, EXCP_ILLEGAL); 4414 return; 4415 } 4416 4417 addr1 = gen_lea(env, s, insn, OS_UNSIZED); 4418 addr2 = tcg_temp_new(); 4419 tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize)); 4420 4421 bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s)); 4422 tcg_temp_free(addr1); 4423 bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s)); 4424 tcg_temp_free(addr2); 4425 4426 reg = tcg_temp_new(); 4427 if (ext & 0x8000) { 4428 tcg_gen_mov_i32(reg, AREG(ext, 12)); 4429 } else { 4430 gen_ext(reg, DREG(ext, 12), opsize, 1); 4431 } 4432 4433 gen_flush_flags(s); 4434 gen_helper_chk2(cpu_env, reg, bound1, bound2); 4435 tcg_temp_free(reg); 4436 tcg_temp_free(bound1); 4437 tcg_temp_free(bound2); 4438 } 4439 4440 static void m68k_copy_line(TCGv dst, TCGv src, int index) 4441 { 4442 TCGv addr; 4443 TCGv_i64 t0, t1; 4444 4445 addr = tcg_temp_new(); 4446 4447 t0 = tcg_temp_new_i64(); 4448 t1 = tcg_temp_new_i64(); 4449 4450 tcg_gen_andi_i32(addr, src, ~15); 4451 tcg_gen_qemu_ld64(t0, addr, index); 4452 tcg_gen_addi_i32(addr, addr, 8); 4453 tcg_gen_qemu_ld64(t1, addr, index); 4454 4455 tcg_gen_andi_i32(addr, dst, ~15); 4456 tcg_gen_qemu_st64(t0, addr, index); 4457 tcg_gen_addi_i32(addr, addr, 8); 4458 tcg_gen_qemu_st64(t1, addr, index); 4459 4460 tcg_temp_free_i64(t0); 4461 tcg_temp_free_i64(t1); 4462 tcg_temp_free(addr); 4463 } 4464 4465 DISAS_INSN(move16_reg) 4466 { 4467 int index = IS_USER(s); 4468 TCGv tmp; 4469 uint16_t ext; 4470 4471 ext = read_im16(env, s); 4472 if ((ext & (1 << 15)) == 0) { 4473 gen_exception(s, s->insn_pc, EXCP_ILLEGAL); 4474 } 4475 4476 m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index); 4477 4478 /* Ax can be Ay, so save Ay before incrementing Ax */ 4479 tmp = tcg_temp_new(); 4480 tcg_gen_mov_i32(tmp, AREG(ext, 12)); 4481 tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16); 4482 tcg_gen_addi_i32(AREG(ext, 12), tmp, 16); 4483 tcg_temp_free(tmp); 4484 } 4485 4486 DISAS_INSN(move16_mem) 4487 { 4488 int index = IS_USER(s); 4489 TCGv reg, addr; 4490 4491 reg = AREG(insn, 0); 4492 addr = tcg_const_i32(read_im32(env, s)); 4493 4494 if ((insn >> 3) & 1) { 4495 /* MOVE16 (xxx).L, (Ay) */ 4496 m68k_copy_line(reg, addr, index); 4497 } else { 4498 /* MOVE16 (Ay), (xxx).L */ 4499 m68k_copy_line(addr, reg, index); 4500 } 4501 4502 tcg_temp_free(addr); 4503 4504 if (((insn >> 3) & 2) == 0) { 4505 /* (Ay)+ */ 4506 tcg_gen_addi_i32(reg, reg, 16); 4507 } 4508 } 4509 4510 DISAS_INSN(strldsr) 4511 { 4512 uint16_t ext; 4513 uint32_t addr; 4514 4515 addr = s->pc - 2; 4516 ext = read_im16(env, s); 4517 if (ext != 0x46FC) { 4518 gen_exception(s, addr, EXCP_UNSUPPORTED); 4519 return; 4520 } 4521 ext = read_im16(env, s); 4522 if (IS_USER(s) || (ext & SR_S) == 0) { 4523 gen_exception(s, addr, EXCP_PRIVILEGE); 4524 return; 4525 } 4526 gen_push(s, gen_get_sr(s)); 4527 gen_set_sr_im(s, ext, 0); 4528 } 4529 4530 DISAS_INSN(move_from_sr) 4531 { 4532 TCGv sr; 4533 4534 if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) { 4535 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4536 return; 4537 } 4538 sr = gen_get_sr(s); 4539 DEST_EA(env, insn, OS_WORD, sr, NULL); 4540 } 4541 4542 #if defined(CONFIG_SOFTMMU) 4543 DISAS_INSN(moves) 4544 { 4545 int opsize; 4546 uint16_t ext; 4547 TCGv reg; 4548 TCGv addr; 4549 int extend; 4550 4551 if (IS_USER(s)) { 4552 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4553 return; 4554 } 4555 4556 ext = read_im16(env, s); 4557 4558 opsize = insn_opsize(insn); 4559 4560 if (ext & 0x8000) { 4561 /* address register */ 4562 reg = AREG(ext, 12); 4563 extend = 1; 4564 } else { 4565 /* data register */ 4566 reg = DREG(ext, 12); 4567 extend = 0; 4568 } 4569 4570 addr = gen_lea(env, s, insn, opsize); 4571 if (IS_NULL_QREG(addr)) { 4572 gen_addr_fault(s); 4573 return; 4574 } 4575 4576 if (ext & 0x0800) { 4577 /* from reg to ea */ 4578 gen_store(s, opsize, addr, reg, DFC_INDEX(s)); 4579 } else { 4580 /* from ea to reg */ 4581 TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s)); 4582 if (extend) { 4583 gen_ext(reg, tmp, opsize, 1); 4584 } else { 4585 gen_partset_reg(opsize, reg, tmp); 4586 } 4587 tcg_temp_free(tmp); 4588 } 4589 switch (extract32(insn, 3, 3)) { 4590 case 3: /* Indirect postincrement. */ 4591 tcg_gen_addi_i32(AREG(insn, 0), addr, 4592 REG(insn, 0) == 7 && opsize == OS_BYTE 4593 ? 2 4594 : opsize_bytes(opsize)); 4595 break; 4596 case 4: /* Indirect predecrememnt. */ 4597 tcg_gen_mov_i32(AREG(insn, 0), addr); 4598 break; 4599 } 4600 } 4601 4602 DISAS_INSN(move_to_sr) 4603 { 4604 if (IS_USER(s)) { 4605 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4606 return; 4607 } 4608 gen_move_to_sr(env, s, insn, false); 4609 gen_lookup_tb(s); 4610 } 4611 4612 DISAS_INSN(move_from_usp) 4613 { 4614 if (IS_USER(s)) { 4615 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4616 return; 4617 } 4618 tcg_gen_ld_i32(AREG(insn, 0), cpu_env, 4619 offsetof(CPUM68KState, sp[M68K_USP])); 4620 } 4621 4622 DISAS_INSN(move_to_usp) 4623 { 4624 if (IS_USER(s)) { 4625 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4626 return; 4627 } 4628 tcg_gen_st_i32(AREG(insn, 0), cpu_env, 4629 offsetof(CPUM68KState, sp[M68K_USP])); 4630 } 4631 4632 DISAS_INSN(halt) 4633 { 4634 if (IS_USER(s)) { 4635 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4636 return; 4637 } 4638 4639 gen_exception(s, s->pc, EXCP_HALT_INSN); 4640 } 4641 4642 DISAS_INSN(stop) 4643 { 4644 uint16_t ext; 4645 4646 if (IS_USER(s)) { 4647 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4648 return; 4649 } 4650 4651 ext = read_im16(env, s); 4652 4653 gen_set_sr_im(s, ext, 0); 4654 tcg_gen_movi_i32(cpu_halted, 1); 4655 gen_exception(s, s->pc, EXCP_HLT); 4656 } 4657 4658 DISAS_INSN(rte) 4659 { 4660 if (IS_USER(s)) { 4661 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4662 return; 4663 } 4664 gen_exception(s, s->insn_pc, EXCP_RTE); 4665 } 4666 4667 DISAS_INSN(cf_movec) 4668 { 4669 uint16_t ext; 4670 TCGv reg; 4671 4672 if (IS_USER(s)) { 4673 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4674 return; 4675 } 4676 4677 ext = read_im16(env, s); 4678 4679 if (ext & 0x8000) { 4680 reg = AREG(ext, 12); 4681 } else { 4682 reg = DREG(ext, 12); 4683 } 4684 gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg); 4685 gen_lookup_tb(s); 4686 } 4687 4688 DISAS_INSN(m68k_movec) 4689 { 4690 uint16_t ext; 4691 TCGv reg; 4692 4693 if (IS_USER(s)) { 4694 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4695 return; 4696 } 4697 4698 ext = read_im16(env, s); 4699 4700 if (ext & 0x8000) { 4701 reg = AREG(ext, 12); 4702 } else { 4703 reg = DREG(ext, 12); 4704 } 4705 if (insn & 1) { 4706 gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg); 4707 } else { 4708 gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff)); 4709 } 4710 gen_lookup_tb(s); 4711 } 4712 4713 DISAS_INSN(intouch) 4714 { 4715 if (IS_USER(s)) { 4716 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4717 return; 4718 } 4719 /* ICache fetch. Implement as no-op. */ 4720 } 4721 4722 DISAS_INSN(cpushl) 4723 { 4724 if (IS_USER(s)) { 4725 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4726 return; 4727 } 4728 /* Cache push/invalidate. Implement as no-op. */ 4729 } 4730 4731 DISAS_INSN(cpush) 4732 { 4733 if (IS_USER(s)) { 4734 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4735 return; 4736 } 4737 /* Cache push/invalidate. Implement as no-op. */ 4738 } 4739 4740 DISAS_INSN(cinv) 4741 { 4742 if (IS_USER(s)) { 4743 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4744 return; 4745 } 4746 /* Invalidate cache line. Implement as no-op. */ 4747 } 4748 4749 #if defined(CONFIG_SOFTMMU) 4750 DISAS_INSN(pflush) 4751 { 4752 TCGv opmode; 4753 4754 if (IS_USER(s)) { 4755 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4756 return; 4757 } 4758 4759 opmode = tcg_const_i32((insn >> 3) & 3); 4760 gen_helper_pflush(cpu_env, AREG(insn, 0), opmode); 4761 tcg_temp_free(opmode); 4762 } 4763 4764 DISAS_INSN(ptest) 4765 { 4766 TCGv is_read; 4767 4768 if (IS_USER(s)) { 4769 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4770 return; 4771 } 4772 is_read = tcg_const_i32((insn >> 5) & 1); 4773 gen_helper_ptest(cpu_env, AREG(insn, 0), is_read); 4774 tcg_temp_free(is_read); 4775 } 4776 #endif 4777 4778 DISAS_INSN(wddata) 4779 { 4780 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4781 } 4782 4783 DISAS_INSN(wdebug) 4784 { 4785 M68kCPU *cpu = m68k_env_get_cpu(env); 4786 4787 if (IS_USER(s)) { 4788 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 4789 return; 4790 } 4791 /* TODO: Implement wdebug. */ 4792 cpu_abort(CPU(cpu), "WDEBUG not implemented"); 4793 } 4794 #endif 4795 4796 DISAS_INSN(trap) 4797 { 4798 gen_exception(s, s->insn_pc, EXCP_TRAP0 + (insn & 0xf)); 4799 } 4800 4801 static void gen_load_fcr(DisasContext *s, TCGv res, int reg) 4802 { 4803 switch (reg) { 4804 case M68K_FPIAR: 4805 tcg_gen_movi_i32(res, 0); 4806 break; 4807 case M68K_FPSR: 4808 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpsr)); 4809 break; 4810 case M68K_FPCR: 4811 tcg_gen_ld_i32(res, cpu_env, offsetof(CPUM68KState, fpcr)); 4812 break; 4813 } 4814 } 4815 4816 static void gen_store_fcr(DisasContext *s, TCGv val, int reg) 4817 { 4818 switch (reg) { 4819 case M68K_FPIAR: 4820 break; 4821 case M68K_FPSR: 4822 tcg_gen_st_i32(val, cpu_env, offsetof(CPUM68KState, fpsr)); 4823 break; 4824 case M68K_FPCR: 4825 gen_helper_set_fpcr(cpu_env, val); 4826 break; 4827 } 4828 } 4829 4830 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg) 4831 { 4832 int index = IS_USER(s); 4833 TCGv tmp; 4834 4835 tmp = tcg_temp_new(); 4836 gen_load_fcr(s, tmp, reg); 4837 tcg_gen_qemu_st32(tmp, addr, index); 4838 tcg_temp_free(tmp); 4839 } 4840 4841 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg) 4842 { 4843 int index = IS_USER(s); 4844 TCGv tmp; 4845 4846 tmp = tcg_temp_new(); 4847 tcg_gen_qemu_ld32u(tmp, addr, index); 4848 gen_store_fcr(s, tmp, reg); 4849 tcg_temp_free(tmp); 4850 } 4851 4852 4853 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s, 4854 uint32_t insn, uint32_t ext) 4855 { 4856 int mask = (ext >> 10) & 7; 4857 int is_write = (ext >> 13) & 1; 4858 int mode = extract32(insn, 3, 3); 4859 int i; 4860 TCGv addr, tmp; 4861 4862 switch (mode) { 4863 case 0: /* Dn */ 4864 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) { 4865 gen_exception(s, s->insn_pc, EXCP_ILLEGAL); 4866 return; 4867 } 4868 if (is_write) { 4869 gen_load_fcr(s, DREG(insn, 0), mask); 4870 } else { 4871 gen_store_fcr(s, DREG(insn, 0), mask); 4872 } 4873 return; 4874 case 1: /* An, only with FPIAR */ 4875 if (mask != M68K_FPIAR) { 4876 gen_exception(s, s->insn_pc, EXCP_ILLEGAL); 4877 return; 4878 } 4879 if (is_write) { 4880 gen_load_fcr(s, AREG(insn, 0), mask); 4881 } else { 4882 gen_store_fcr(s, AREG(insn, 0), mask); 4883 } 4884 return; 4885 default: 4886 break; 4887 } 4888 4889 tmp = gen_lea(env, s, insn, OS_LONG); 4890 if (IS_NULL_QREG(tmp)) { 4891 gen_addr_fault(s); 4892 return; 4893 } 4894 4895 addr = tcg_temp_new(); 4896 tcg_gen_mov_i32(addr, tmp); 4897 4898 /* mask: 4899 * 4900 * 0b100 Floating-Point Control Register 4901 * 0b010 Floating-Point Status Register 4902 * 0b001 Floating-Point Instruction Address Register 4903 * 4904 */ 4905 4906 if (is_write && mode == 4) { 4907 for (i = 2; i >= 0; i--, mask >>= 1) { 4908 if (mask & 1) { 4909 gen_qemu_store_fcr(s, addr, 1 << i); 4910 if (mask != 1) { 4911 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG)); 4912 } 4913 } 4914 } 4915 tcg_gen_mov_i32(AREG(insn, 0), addr); 4916 } else { 4917 for (i = 0; i < 3; i++, mask >>= 1) { 4918 if (mask & 1) { 4919 if (is_write) { 4920 gen_qemu_store_fcr(s, addr, 1 << i); 4921 } else { 4922 gen_qemu_load_fcr(s, addr, 1 << i); 4923 } 4924 if (mask != 1 || mode == 3) { 4925 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG)); 4926 } 4927 } 4928 } 4929 if (mode == 3) { 4930 tcg_gen_mov_i32(AREG(insn, 0), addr); 4931 } 4932 } 4933 tcg_temp_free_i32(addr); 4934 } 4935 4936 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s, 4937 uint32_t insn, uint32_t ext) 4938 { 4939 int opsize; 4940 TCGv addr, tmp; 4941 int mode = (ext >> 11) & 0x3; 4942 int is_load = ((ext & 0x2000) == 0); 4943 4944 if (m68k_feature(s->env, M68K_FEATURE_FPU)) { 4945 opsize = OS_EXTENDED; 4946 } else { 4947 opsize = OS_DOUBLE; /* FIXME */ 4948 } 4949 4950 addr = gen_lea(env, s, insn, opsize); 4951 if (IS_NULL_QREG(addr)) { 4952 gen_addr_fault(s); 4953 return; 4954 } 4955 4956 tmp = tcg_temp_new(); 4957 if (mode & 0x1) { 4958 /* Dynamic register list */ 4959 tcg_gen_ext8u_i32(tmp, DREG(ext, 4)); 4960 } else { 4961 /* Static register list */ 4962 tcg_gen_movi_i32(tmp, ext & 0xff); 4963 } 4964 4965 if (!is_load && (mode & 2) == 0) { 4966 /* predecrement addressing mode 4967 * only available to store register to memory 4968 */ 4969 if (opsize == OS_EXTENDED) { 4970 gen_helper_fmovemx_st_predec(tmp, cpu_env, addr, tmp); 4971 } else { 4972 gen_helper_fmovemd_st_predec(tmp, cpu_env, addr, tmp); 4973 } 4974 } else { 4975 /* postincrement addressing mode */ 4976 if (opsize == OS_EXTENDED) { 4977 if (is_load) { 4978 gen_helper_fmovemx_ld_postinc(tmp, cpu_env, addr, tmp); 4979 } else { 4980 gen_helper_fmovemx_st_postinc(tmp, cpu_env, addr, tmp); 4981 } 4982 } else { 4983 if (is_load) { 4984 gen_helper_fmovemd_ld_postinc(tmp, cpu_env, addr, tmp); 4985 } else { 4986 gen_helper_fmovemd_st_postinc(tmp, cpu_env, addr, tmp); 4987 } 4988 } 4989 } 4990 if ((insn & 070) == 030 || (insn & 070) == 040) { 4991 tcg_gen_mov_i32(AREG(insn, 0), tmp); 4992 } 4993 tcg_temp_free(tmp); 4994 } 4995 4996 /* ??? FP exceptions are not implemented. Most exceptions are deferred until 4997 immediately before the next FP instruction is executed. */ 4998 DISAS_INSN(fpu) 4999 { 5000 uint16_t ext; 5001 int opmode; 5002 int opsize; 5003 TCGv_ptr cpu_src, cpu_dest; 5004 5005 ext = read_im16(env, s); 5006 opmode = ext & 0x7f; 5007 switch ((ext >> 13) & 7) { 5008 case 0: 5009 break; 5010 case 1: 5011 goto undef; 5012 case 2: 5013 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) { 5014 /* fmovecr */ 5015 TCGv rom_offset = tcg_const_i32(opmode); 5016 cpu_dest = gen_fp_ptr(REG(ext, 7)); 5017 gen_helper_fconst(cpu_env, cpu_dest, rom_offset); 5018 tcg_temp_free_ptr(cpu_dest); 5019 tcg_temp_free(rom_offset); 5020 return; 5021 } 5022 break; 5023 case 3: /* fmove out */ 5024 cpu_src = gen_fp_ptr(REG(ext, 7)); 5025 opsize = ext_opsize(ext, 10); 5026 if (gen_ea_fp(env, s, insn, opsize, cpu_src, 5027 EA_STORE, IS_USER(s)) == -1) { 5028 gen_addr_fault(s); 5029 } 5030 gen_helper_ftst(cpu_env, cpu_src); 5031 tcg_temp_free_ptr(cpu_src); 5032 return; 5033 case 4: /* fmove to control register. */ 5034 case 5: /* fmove from control register. */ 5035 gen_op_fmove_fcr(env, s, insn, ext); 5036 return; 5037 case 6: /* fmovem */ 5038 case 7: 5039 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) { 5040 goto undef; 5041 } 5042 gen_op_fmovem(env, s, insn, ext); 5043 return; 5044 } 5045 if (ext & (1 << 14)) { 5046 /* Source effective address. */ 5047 opsize = ext_opsize(ext, 10); 5048 cpu_src = gen_fp_result_ptr(); 5049 if (gen_ea_fp(env, s, insn, opsize, cpu_src, 5050 EA_LOADS, IS_USER(s)) == -1) { 5051 gen_addr_fault(s); 5052 return; 5053 } 5054 } else { 5055 /* Source register. */ 5056 opsize = OS_EXTENDED; 5057 cpu_src = gen_fp_ptr(REG(ext, 10)); 5058 } 5059 cpu_dest = gen_fp_ptr(REG(ext, 7)); 5060 switch (opmode) { 5061 case 0: /* fmove */ 5062 gen_fp_move(cpu_dest, cpu_src); 5063 break; 5064 case 0x40: /* fsmove */ 5065 gen_helper_fsround(cpu_env, cpu_dest, cpu_src); 5066 break; 5067 case 0x44: /* fdmove */ 5068 gen_helper_fdround(cpu_env, cpu_dest, cpu_src); 5069 break; 5070 case 1: /* fint */ 5071 gen_helper_firound(cpu_env, cpu_dest, cpu_src); 5072 break; 5073 case 2: /* fsinh */ 5074 gen_helper_fsinh(cpu_env, cpu_dest, cpu_src); 5075 break; 5076 case 3: /* fintrz */ 5077 gen_helper_fitrunc(cpu_env, cpu_dest, cpu_src); 5078 break; 5079 case 4: /* fsqrt */ 5080 gen_helper_fsqrt(cpu_env, cpu_dest, cpu_src); 5081 break; 5082 case 0x41: /* fssqrt */ 5083 gen_helper_fssqrt(cpu_env, cpu_dest, cpu_src); 5084 break; 5085 case 0x45: /* fdsqrt */ 5086 gen_helper_fdsqrt(cpu_env, cpu_dest, cpu_src); 5087 break; 5088 case 0x06: /* flognp1 */ 5089 gen_helper_flognp1(cpu_env, cpu_dest, cpu_src); 5090 break; 5091 case 0x09: /* ftanh */ 5092 gen_helper_ftanh(cpu_env, cpu_dest, cpu_src); 5093 break; 5094 case 0x0a: /* fatan */ 5095 gen_helper_fatan(cpu_env, cpu_dest, cpu_src); 5096 break; 5097 case 0x0c: /* fasin */ 5098 gen_helper_fasin(cpu_env, cpu_dest, cpu_src); 5099 break; 5100 case 0x0d: /* fatanh */ 5101 gen_helper_fatanh(cpu_env, cpu_dest, cpu_src); 5102 break; 5103 case 0x0e: /* fsin */ 5104 gen_helper_fsin(cpu_env, cpu_dest, cpu_src); 5105 break; 5106 case 0x0f: /* ftan */ 5107 gen_helper_ftan(cpu_env, cpu_dest, cpu_src); 5108 break; 5109 case 0x10: /* fetox */ 5110 gen_helper_fetox(cpu_env, cpu_dest, cpu_src); 5111 break; 5112 case 0x11: /* ftwotox */ 5113 gen_helper_ftwotox(cpu_env, cpu_dest, cpu_src); 5114 break; 5115 case 0x12: /* ftentox */ 5116 gen_helper_ftentox(cpu_env, cpu_dest, cpu_src); 5117 break; 5118 case 0x14: /* flogn */ 5119 gen_helper_flogn(cpu_env, cpu_dest, cpu_src); 5120 break; 5121 case 0x15: /* flog10 */ 5122 gen_helper_flog10(cpu_env, cpu_dest, cpu_src); 5123 break; 5124 case 0x16: /* flog2 */ 5125 gen_helper_flog2(cpu_env, cpu_dest, cpu_src); 5126 break; 5127 case 0x18: /* fabs */ 5128 gen_helper_fabs(cpu_env, cpu_dest, cpu_src); 5129 break; 5130 case 0x58: /* fsabs */ 5131 gen_helper_fsabs(cpu_env, cpu_dest, cpu_src); 5132 break; 5133 case 0x5c: /* fdabs */ 5134 gen_helper_fdabs(cpu_env, cpu_dest, cpu_src); 5135 break; 5136 case 0x19: /* fcosh */ 5137 gen_helper_fcosh(cpu_env, cpu_dest, cpu_src); 5138 break; 5139 case 0x1a: /* fneg */ 5140 gen_helper_fneg(cpu_env, cpu_dest, cpu_src); 5141 break; 5142 case 0x5a: /* fsneg */ 5143 gen_helper_fsneg(cpu_env, cpu_dest, cpu_src); 5144 break; 5145 case 0x5e: /* fdneg */ 5146 gen_helper_fdneg(cpu_env, cpu_dest, cpu_src); 5147 break; 5148 case 0x1c: /* facos */ 5149 gen_helper_facos(cpu_env, cpu_dest, cpu_src); 5150 break; 5151 case 0x1d: /* fcos */ 5152 gen_helper_fcos(cpu_env, cpu_dest, cpu_src); 5153 break; 5154 case 0x1e: /* fgetexp */ 5155 gen_helper_fgetexp(cpu_env, cpu_dest, cpu_src); 5156 break; 5157 case 0x1f: /* fgetman */ 5158 gen_helper_fgetman(cpu_env, cpu_dest, cpu_src); 5159 break; 5160 case 0x20: /* fdiv */ 5161 gen_helper_fdiv(cpu_env, cpu_dest, cpu_src, cpu_dest); 5162 break; 5163 case 0x60: /* fsdiv */ 5164 gen_helper_fsdiv(cpu_env, cpu_dest, cpu_src, cpu_dest); 5165 break; 5166 case 0x64: /* fddiv */ 5167 gen_helper_fddiv(cpu_env, cpu_dest, cpu_src, cpu_dest); 5168 break; 5169 case 0x21: /* fmod */ 5170 gen_helper_fmod(cpu_env, cpu_dest, cpu_src, cpu_dest); 5171 break; 5172 case 0x22: /* fadd */ 5173 gen_helper_fadd(cpu_env, cpu_dest, cpu_src, cpu_dest); 5174 break; 5175 case 0x62: /* fsadd */ 5176 gen_helper_fsadd(cpu_env, cpu_dest, cpu_src, cpu_dest); 5177 break; 5178 case 0x66: /* fdadd */ 5179 gen_helper_fdadd(cpu_env, cpu_dest, cpu_src, cpu_dest); 5180 break; 5181 case 0x23: /* fmul */ 5182 gen_helper_fmul(cpu_env, cpu_dest, cpu_src, cpu_dest); 5183 break; 5184 case 0x63: /* fsmul */ 5185 gen_helper_fsmul(cpu_env, cpu_dest, cpu_src, cpu_dest); 5186 break; 5187 case 0x67: /* fdmul */ 5188 gen_helper_fdmul(cpu_env, cpu_dest, cpu_src, cpu_dest); 5189 break; 5190 case 0x24: /* fsgldiv */ 5191 gen_helper_fsgldiv(cpu_env, cpu_dest, cpu_src, cpu_dest); 5192 break; 5193 case 0x25: /* frem */ 5194 gen_helper_frem(cpu_env, cpu_dest, cpu_src, cpu_dest); 5195 break; 5196 case 0x26: /* fscale */ 5197 gen_helper_fscale(cpu_env, cpu_dest, cpu_src, cpu_dest); 5198 break; 5199 case 0x27: /* fsglmul */ 5200 gen_helper_fsglmul(cpu_env, cpu_dest, cpu_src, cpu_dest); 5201 break; 5202 case 0x28: /* fsub */ 5203 gen_helper_fsub(cpu_env, cpu_dest, cpu_src, cpu_dest); 5204 break; 5205 case 0x68: /* fssub */ 5206 gen_helper_fssub(cpu_env, cpu_dest, cpu_src, cpu_dest); 5207 break; 5208 case 0x6c: /* fdsub */ 5209 gen_helper_fdsub(cpu_env, cpu_dest, cpu_src, cpu_dest); 5210 break; 5211 case 0x30: case 0x31: case 0x32: 5212 case 0x33: case 0x34: case 0x35: 5213 case 0x36: case 0x37: { 5214 TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0)); 5215 gen_helper_fsincos(cpu_env, cpu_dest, cpu_dest2, cpu_src); 5216 tcg_temp_free_ptr(cpu_dest2); 5217 } 5218 break; 5219 case 0x38: /* fcmp */ 5220 gen_helper_fcmp(cpu_env, cpu_src, cpu_dest); 5221 return; 5222 case 0x3a: /* ftst */ 5223 gen_helper_ftst(cpu_env, cpu_src); 5224 return; 5225 default: 5226 goto undef; 5227 } 5228 tcg_temp_free_ptr(cpu_src); 5229 gen_helper_ftst(cpu_env, cpu_dest); 5230 tcg_temp_free_ptr(cpu_dest); 5231 return; 5232 undef: 5233 /* FIXME: Is this right for offset addressing modes? */ 5234 s->pc -= 2; 5235 disas_undef_fpu(env, s, insn); 5236 } 5237 5238 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond) 5239 { 5240 TCGv fpsr; 5241 5242 c->g1 = 1; 5243 c->v2 = tcg_const_i32(0); 5244 c->g2 = 0; 5245 /* TODO: Raise BSUN exception. */ 5246 fpsr = tcg_temp_new(); 5247 gen_load_fcr(s, fpsr, M68K_FPSR); 5248 switch (cond) { 5249 case 0: /* False */ 5250 case 16: /* Signaling False */ 5251 c->v1 = c->v2; 5252 c->tcond = TCG_COND_NEVER; 5253 break; 5254 case 1: /* EQual Z */ 5255 case 17: /* Signaling EQual Z */ 5256 c->v1 = tcg_temp_new(); 5257 c->g1 = 0; 5258 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); 5259 c->tcond = TCG_COND_NE; 5260 break; 5261 case 2: /* Ordered Greater Than !(A || Z || N) */ 5262 case 18: /* Greater Than !(A || Z || N) */ 5263 c->v1 = tcg_temp_new(); 5264 c->g1 = 0; 5265 tcg_gen_andi_i32(c->v1, fpsr, 5266 FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); 5267 c->tcond = TCG_COND_EQ; 5268 break; 5269 case 3: /* Ordered Greater than or Equal Z || !(A || N) */ 5270 case 19: /* Greater than or Equal Z || !(A || N) */ 5271 c->v1 = tcg_temp_new(); 5272 c->g1 = 0; 5273 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); 5274 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); 5275 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N); 5276 tcg_gen_or_i32(c->v1, c->v1, fpsr); 5277 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N); 5278 c->tcond = TCG_COND_NE; 5279 break; 5280 case 4: /* Ordered Less Than !(!N || A || Z); */ 5281 case 20: /* Less Than !(!N || A || Z); */ 5282 c->v1 = tcg_temp_new(); 5283 c->g1 = 0; 5284 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N); 5285 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z); 5286 c->tcond = TCG_COND_EQ; 5287 break; 5288 case 5: /* Ordered Less than or Equal Z || (N && !A) */ 5289 case 21: /* Less than or Equal Z || (N && !A) */ 5290 c->v1 = tcg_temp_new(); 5291 c->g1 = 0; 5292 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); 5293 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A)); 5294 tcg_gen_andc_i32(c->v1, fpsr, c->v1); 5295 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N); 5296 c->tcond = TCG_COND_NE; 5297 break; 5298 case 6: /* Ordered Greater or Less than !(A || Z) */ 5299 case 22: /* Greater or Less than !(A || Z) */ 5300 c->v1 = tcg_temp_new(); 5301 c->g1 = 0; 5302 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); 5303 c->tcond = TCG_COND_EQ; 5304 break; 5305 case 7: /* Ordered !A */ 5306 case 23: /* Greater, Less or Equal !A */ 5307 c->v1 = tcg_temp_new(); 5308 c->g1 = 0; 5309 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); 5310 c->tcond = TCG_COND_EQ; 5311 break; 5312 case 8: /* Unordered A */ 5313 case 24: /* Not Greater, Less or Equal A */ 5314 c->v1 = tcg_temp_new(); 5315 c->g1 = 0; 5316 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A); 5317 c->tcond = TCG_COND_NE; 5318 break; 5319 case 9: /* Unordered or Equal A || Z */ 5320 case 25: /* Not Greater or Less then A || Z */ 5321 c->v1 = tcg_temp_new(); 5322 c->g1 = 0; 5323 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z); 5324 c->tcond = TCG_COND_NE; 5325 break; 5326 case 10: /* Unordered or Greater Than A || !(N || Z)) */ 5327 case 26: /* Not Less or Equal A || !(N || Z)) */ 5328 c->v1 = tcg_temp_new(); 5329 c->g1 = 0; 5330 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); 5331 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); 5332 tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N); 5333 tcg_gen_or_i32(c->v1, c->v1, fpsr); 5334 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N); 5335 c->tcond = TCG_COND_NE; 5336 break; 5337 case 11: /* Unordered or Greater or Equal A || Z || !N */ 5338 case 27: /* Not Less Than A || Z || !N */ 5339 c->v1 = tcg_temp_new(); 5340 c->g1 = 0; 5341 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); 5342 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N); 5343 c->tcond = TCG_COND_NE; 5344 break; 5345 case 12: /* Unordered or Less Than A || (N && !Z) */ 5346 case 28: /* Not Greater than or Equal A || (N && !Z) */ 5347 c->v1 = tcg_temp_new(); 5348 c->g1 = 0; 5349 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); 5350 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z)); 5351 tcg_gen_andc_i32(c->v1, fpsr, c->v1); 5352 tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N); 5353 c->tcond = TCG_COND_NE; 5354 break; 5355 case 13: /* Unordered or Less or Equal A || Z || N */ 5356 case 29: /* Not Greater Than A || Z || N */ 5357 c->v1 = tcg_temp_new(); 5358 c->g1 = 0; 5359 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N); 5360 c->tcond = TCG_COND_NE; 5361 break; 5362 case 14: /* Not Equal !Z */ 5363 case 30: /* Signaling Not Equal !Z */ 5364 c->v1 = tcg_temp_new(); 5365 c->g1 = 0; 5366 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z); 5367 c->tcond = TCG_COND_EQ; 5368 break; 5369 case 15: /* True */ 5370 case 31: /* Signaling True */ 5371 c->v1 = c->v2; 5372 c->tcond = TCG_COND_ALWAYS; 5373 break; 5374 } 5375 tcg_temp_free(fpsr); 5376 } 5377 5378 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1) 5379 { 5380 DisasCompare c; 5381 5382 gen_fcc_cond(&c, s, cond); 5383 update_cc_op(s); 5384 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1); 5385 free_cond(&c); 5386 } 5387 5388 DISAS_INSN(fbcc) 5389 { 5390 uint32_t offset; 5391 uint32_t base; 5392 TCGLabel *l1; 5393 5394 base = s->pc; 5395 offset = (int16_t)read_im16(env, s); 5396 if (insn & (1 << 6)) { 5397 offset = (offset << 16) | read_im16(env, s); 5398 } 5399 5400 l1 = gen_new_label(); 5401 update_cc_op(s); 5402 gen_fjmpcc(s, insn & 0x3f, l1); 5403 gen_jmp_tb(s, 0, s->pc); 5404 gen_set_label(l1); 5405 gen_jmp_tb(s, 1, base + offset); 5406 } 5407 5408 DISAS_INSN(fscc) 5409 { 5410 DisasCompare c; 5411 int cond; 5412 TCGv tmp; 5413 uint16_t ext; 5414 5415 ext = read_im16(env, s); 5416 cond = ext & 0x3f; 5417 gen_fcc_cond(&c, s, cond); 5418 5419 tmp = tcg_temp_new(); 5420 tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2); 5421 free_cond(&c); 5422 5423 tcg_gen_neg_i32(tmp, tmp); 5424 DEST_EA(env, insn, OS_BYTE, tmp, NULL); 5425 tcg_temp_free(tmp); 5426 } 5427 5428 #if defined(CONFIG_SOFTMMU) 5429 DISAS_INSN(frestore) 5430 { 5431 TCGv addr; 5432 5433 if (IS_USER(s)) { 5434 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 5435 return; 5436 } 5437 if (m68k_feature(s->env, M68K_FEATURE_M68040)) { 5438 SRC_EA(env, addr, OS_LONG, 0, NULL); 5439 /* FIXME: check the state frame */ 5440 } else { 5441 disas_undef(env, s, insn); 5442 } 5443 } 5444 5445 DISAS_INSN(fsave) 5446 { 5447 if (IS_USER(s)) { 5448 gen_exception(s, s->insn_pc, EXCP_PRIVILEGE); 5449 return; 5450 } 5451 5452 if (m68k_feature(s->env, M68K_FEATURE_M68040)) { 5453 /* always write IDLE */ 5454 TCGv idle = tcg_const_i32(0x41000000); 5455 DEST_EA(env, insn, OS_LONG, idle, NULL); 5456 tcg_temp_free(idle); 5457 } else { 5458 disas_undef(env, s, insn); 5459 } 5460 } 5461 #endif 5462 5463 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper) 5464 { 5465 TCGv tmp = tcg_temp_new(); 5466 if (s->env->macsr & MACSR_FI) { 5467 if (upper) 5468 tcg_gen_andi_i32(tmp, val, 0xffff0000); 5469 else 5470 tcg_gen_shli_i32(tmp, val, 16); 5471 } else if (s->env->macsr & MACSR_SU) { 5472 if (upper) 5473 tcg_gen_sari_i32(tmp, val, 16); 5474 else 5475 tcg_gen_ext16s_i32(tmp, val); 5476 } else { 5477 if (upper) 5478 tcg_gen_shri_i32(tmp, val, 16); 5479 else 5480 tcg_gen_ext16u_i32(tmp, val); 5481 } 5482 return tmp; 5483 } 5484 5485 static void gen_mac_clear_flags(void) 5486 { 5487 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, 5488 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV)); 5489 } 5490 5491 DISAS_INSN(mac) 5492 { 5493 TCGv rx; 5494 TCGv ry; 5495 uint16_t ext; 5496 int acc; 5497 TCGv tmp; 5498 TCGv addr; 5499 TCGv loadval; 5500 int dual; 5501 TCGv saved_flags; 5502 5503 if (!s->done_mac) { 5504 s->mactmp = tcg_temp_new_i64(); 5505 s->done_mac = 1; 5506 } 5507 5508 ext = read_im16(env, s); 5509 5510 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2); 5511 dual = ((insn & 0x30) != 0 && (ext & 3) != 0); 5512 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) { 5513 disas_undef(env, s, insn); 5514 return; 5515 } 5516 if (insn & 0x30) { 5517 /* MAC with load. */ 5518 tmp = gen_lea(env, s, insn, OS_LONG); 5519 addr = tcg_temp_new(); 5520 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK); 5521 /* Load the value now to ensure correct exception behavior. 5522 Perform writeback after reading the MAC inputs. */ 5523 loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s)); 5524 5525 acc ^= 1; 5526 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12); 5527 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0); 5528 } else { 5529 loadval = addr = NULL_QREG; 5530 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); 5531 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); 5532 } 5533 5534 gen_mac_clear_flags(); 5535 #if 0 5536 l1 = -1; 5537 /* Disabled because conditional branches clobber temporary vars. */ 5538 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) { 5539 /* Skip the multiply if we know we will ignore it. */ 5540 l1 = gen_new_label(); 5541 tmp = tcg_temp_new(); 5542 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8)); 5543 gen_op_jmp_nz32(tmp, l1); 5544 } 5545 #endif 5546 5547 if ((ext & 0x0800) == 0) { 5548 /* Word. */ 5549 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0); 5550 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0); 5551 } 5552 if (s->env->macsr & MACSR_FI) { 5553 gen_helper_macmulf(s->mactmp, cpu_env, rx, ry); 5554 } else { 5555 if (s->env->macsr & MACSR_SU) 5556 gen_helper_macmuls(s->mactmp, cpu_env, rx, ry); 5557 else 5558 gen_helper_macmulu(s->mactmp, cpu_env, rx, ry); 5559 switch ((ext >> 9) & 3) { 5560 case 1: 5561 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1); 5562 break; 5563 case 3: 5564 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1); 5565 break; 5566 } 5567 } 5568 5569 if (dual) { 5570 /* Save the overflow flag from the multiply. */ 5571 saved_flags = tcg_temp_new(); 5572 tcg_gen_mov_i32(saved_flags, QREG_MACSR); 5573 } else { 5574 saved_flags = NULL_QREG; 5575 } 5576 5577 #if 0 5578 /* Disabled because conditional branches clobber temporary vars. */ 5579 if ((s->env->macsr & MACSR_OMC) != 0 && dual) { 5580 /* Skip the accumulate if the value is already saturated. */ 5581 l1 = gen_new_label(); 5582 tmp = tcg_temp_new(); 5583 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc)); 5584 gen_op_jmp_nz32(tmp, l1); 5585 } 5586 #endif 5587 5588 if (insn & 0x100) 5589 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp); 5590 else 5591 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp); 5592 5593 if (s->env->macsr & MACSR_FI) 5594 gen_helper_macsatf(cpu_env, tcg_const_i32(acc)); 5595 else if (s->env->macsr & MACSR_SU) 5596 gen_helper_macsats(cpu_env, tcg_const_i32(acc)); 5597 else 5598 gen_helper_macsatu(cpu_env, tcg_const_i32(acc)); 5599 5600 #if 0 5601 /* Disabled because conditional branches clobber temporary vars. */ 5602 if (l1 != -1) 5603 gen_set_label(l1); 5604 #endif 5605 5606 if (dual) { 5607 /* Dual accumulate variant. */ 5608 acc = (ext >> 2) & 3; 5609 /* Restore the overflow flag from the multiplier. */ 5610 tcg_gen_mov_i32(QREG_MACSR, saved_flags); 5611 #if 0 5612 /* Disabled because conditional branches clobber temporary vars. */ 5613 if ((s->env->macsr & MACSR_OMC) != 0) { 5614 /* Skip the accumulate if the value is already saturated. */ 5615 l1 = gen_new_label(); 5616 tmp = tcg_temp_new(); 5617 gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc)); 5618 gen_op_jmp_nz32(tmp, l1); 5619 } 5620 #endif 5621 if (ext & 2) 5622 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp); 5623 else 5624 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp); 5625 if (s->env->macsr & MACSR_FI) 5626 gen_helper_macsatf(cpu_env, tcg_const_i32(acc)); 5627 else if (s->env->macsr & MACSR_SU) 5628 gen_helper_macsats(cpu_env, tcg_const_i32(acc)); 5629 else 5630 gen_helper_macsatu(cpu_env, tcg_const_i32(acc)); 5631 #if 0 5632 /* Disabled because conditional branches clobber temporary vars. */ 5633 if (l1 != -1) 5634 gen_set_label(l1); 5635 #endif 5636 } 5637 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc)); 5638 5639 if (insn & 0x30) { 5640 TCGv rw; 5641 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); 5642 tcg_gen_mov_i32(rw, loadval); 5643 /* FIXME: Should address writeback happen with the masked or 5644 unmasked value? */ 5645 switch ((insn >> 3) & 7) { 5646 case 3: /* Post-increment. */ 5647 tcg_gen_addi_i32(AREG(insn, 0), addr, 4); 5648 break; 5649 case 4: /* Pre-decrement. */ 5650 tcg_gen_mov_i32(AREG(insn, 0), addr); 5651 } 5652 tcg_temp_free(loadval); 5653 } 5654 } 5655 5656 DISAS_INSN(from_mac) 5657 { 5658 TCGv rx; 5659 TCGv_i64 acc; 5660 int accnum; 5661 5662 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); 5663 accnum = (insn >> 9) & 3; 5664 acc = MACREG(accnum); 5665 if (s->env->macsr & MACSR_FI) { 5666 gen_helper_get_macf(rx, cpu_env, acc); 5667 } else if ((s->env->macsr & MACSR_OMC) == 0) { 5668 tcg_gen_extrl_i64_i32(rx, acc); 5669 } else if (s->env->macsr & MACSR_SU) { 5670 gen_helper_get_macs(rx, acc); 5671 } else { 5672 gen_helper_get_macu(rx, acc); 5673 } 5674 if (insn & 0x40) { 5675 tcg_gen_movi_i64(acc, 0); 5676 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum)); 5677 } 5678 } 5679 5680 DISAS_INSN(move_mac) 5681 { 5682 /* FIXME: This can be done without a helper. */ 5683 int src; 5684 TCGv dest; 5685 src = insn & 3; 5686 dest = tcg_const_i32((insn >> 9) & 3); 5687 gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src)); 5688 gen_mac_clear_flags(); 5689 gen_helper_mac_set_flags(cpu_env, dest); 5690 } 5691 5692 DISAS_INSN(from_macsr) 5693 { 5694 TCGv reg; 5695 5696 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); 5697 tcg_gen_mov_i32(reg, QREG_MACSR); 5698 } 5699 5700 DISAS_INSN(from_mask) 5701 { 5702 TCGv reg; 5703 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); 5704 tcg_gen_mov_i32(reg, QREG_MAC_MASK); 5705 } 5706 5707 DISAS_INSN(from_mext) 5708 { 5709 TCGv reg; 5710 TCGv acc; 5711 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); 5712 acc = tcg_const_i32((insn & 0x400) ? 2 : 0); 5713 if (s->env->macsr & MACSR_FI) 5714 gen_helper_get_mac_extf(reg, cpu_env, acc); 5715 else 5716 gen_helper_get_mac_exti(reg, cpu_env, acc); 5717 } 5718 5719 DISAS_INSN(macsr_to_ccr) 5720 { 5721 TCGv tmp = tcg_temp_new(); 5722 tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf); 5723 gen_helper_set_sr(cpu_env, tmp); 5724 tcg_temp_free(tmp); 5725 set_cc_op(s, CC_OP_FLAGS); 5726 } 5727 5728 DISAS_INSN(to_mac) 5729 { 5730 TCGv_i64 acc; 5731 TCGv val; 5732 int accnum; 5733 accnum = (insn >> 9) & 3; 5734 acc = MACREG(accnum); 5735 SRC_EA(env, val, OS_LONG, 0, NULL); 5736 if (s->env->macsr & MACSR_FI) { 5737 tcg_gen_ext_i32_i64(acc, val); 5738 tcg_gen_shli_i64(acc, acc, 8); 5739 } else if (s->env->macsr & MACSR_SU) { 5740 tcg_gen_ext_i32_i64(acc, val); 5741 } else { 5742 tcg_gen_extu_i32_i64(acc, val); 5743 } 5744 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum)); 5745 gen_mac_clear_flags(); 5746 gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum)); 5747 } 5748 5749 DISAS_INSN(to_macsr) 5750 { 5751 TCGv val; 5752 SRC_EA(env, val, OS_LONG, 0, NULL); 5753 gen_helper_set_macsr(cpu_env, val); 5754 gen_lookup_tb(s); 5755 } 5756 5757 DISAS_INSN(to_mask) 5758 { 5759 TCGv val; 5760 SRC_EA(env, val, OS_LONG, 0, NULL); 5761 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000); 5762 } 5763 5764 DISAS_INSN(to_mext) 5765 { 5766 TCGv val; 5767 TCGv acc; 5768 SRC_EA(env, val, OS_LONG, 0, NULL); 5769 acc = tcg_const_i32((insn & 0x400) ? 2 : 0); 5770 if (s->env->macsr & MACSR_FI) 5771 gen_helper_set_mac_extf(cpu_env, val, acc); 5772 else if (s->env->macsr & MACSR_SU) 5773 gen_helper_set_mac_exts(cpu_env, val, acc); 5774 else 5775 gen_helper_set_mac_extu(cpu_env, val, acc); 5776 } 5777 5778 static disas_proc opcode_table[65536]; 5779 5780 static void 5781 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask) 5782 { 5783 int i; 5784 int from; 5785 int to; 5786 5787 /* Sanity check. All set bits must be included in the mask. */ 5788 if (opcode & ~mask) { 5789 fprintf(stderr, 5790 "qemu internal error: bogus opcode definition %04x/%04x\n", 5791 opcode, mask); 5792 abort(); 5793 } 5794 /* This could probably be cleverer. For now just optimize the case where 5795 the top bits are known. */ 5796 /* Find the first zero bit in the mask. */ 5797 i = 0x8000; 5798 while ((i & mask) != 0) 5799 i >>= 1; 5800 /* Iterate over all combinations of this and lower bits. */ 5801 if (i == 0) 5802 i = 1; 5803 else 5804 i <<= 1; 5805 from = opcode & ~(i - 1); 5806 to = from + i; 5807 for (i = from; i < to; i++) { 5808 if ((i & mask) == opcode) 5809 opcode_table[i] = proc; 5810 } 5811 } 5812 5813 /* Register m68k opcode handlers. Order is important. 5814 Later insn override earlier ones. */ 5815 void register_m68k_insns (CPUM68KState *env) 5816 { 5817 /* Build the opcode table only once to avoid 5818 multithreading issues. */ 5819 if (opcode_table[0] != NULL) { 5820 return; 5821 } 5822 5823 /* use BASE() for instruction available 5824 * for CF_ISA_A and M68000. 5825 */ 5826 #define BASE(name, opcode, mask) \ 5827 register_opcode(disas_##name, 0x##opcode, 0x##mask) 5828 #define INSN(name, opcode, mask, feature) do { \ 5829 if (m68k_feature(env, M68K_FEATURE_##feature)) \ 5830 BASE(name, opcode, mask); \ 5831 } while(0) 5832 BASE(undef, 0000, 0000); 5833 INSN(arith_im, 0080, fff8, CF_ISA_A); 5834 INSN(arith_im, 0000, ff00, M68000); 5835 INSN(chk2, 00c0, f9c0, CHK2); 5836 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC); 5837 BASE(bitop_reg, 0100, f1c0); 5838 BASE(bitop_reg, 0140, f1c0); 5839 BASE(bitop_reg, 0180, f1c0); 5840 BASE(bitop_reg, 01c0, f1c0); 5841 INSN(movep, 0108, f138, MOVEP); 5842 INSN(arith_im, 0280, fff8, CF_ISA_A); 5843 INSN(arith_im, 0200, ff00, M68000); 5844 INSN(undef, 02c0, ffc0, M68000); 5845 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC); 5846 INSN(arith_im, 0480, fff8, CF_ISA_A); 5847 INSN(arith_im, 0400, ff00, M68000); 5848 INSN(undef, 04c0, ffc0, M68000); 5849 INSN(arith_im, 0600, ff00, M68000); 5850 INSN(undef, 06c0, ffc0, M68000); 5851 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC); 5852 INSN(arith_im, 0680, fff8, CF_ISA_A); 5853 INSN(arith_im, 0c00, ff38, CF_ISA_A); 5854 INSN(arith_im, 0c00, ff00, M68000); 5855 BASE(bitop_im, 0800, ffc0); 5856 BASE(bitop_im, 0840, ffc0); 5857 BASE(bitop_im, 0880, ffc0); 5858 BASE(bitop_im, 08c0, ffc0); 5859 INSN(arith_im, 0a80, fff8, CF_ISA_A); 5860 INSN(arith_im, 0a00, ff00, M68000); 5861 #if defined(CONFIG_SOFTMMU) 5862 INSN(moves, 0e00, ff00, M68000); 5863 #endif 5864 INSN(cas, 0ac0, ffc0, CAS); 5865 INSN(cas, 0cc0, ffc0, CAS); 5866 INSN(cas, 0ec0, ffc0, CAS); 5867 INSN(cas2w, 0cfc, ffff, CAS); 5868 INSN(cas2l, 0efc, ffff, CAS); 5869 BASE(move, 1000, f000); 5870 BASE(move, 2000, f000); 5871 BASE(move, 3000, f000); 5872 INSN(chk, 4000, f040, M68000); 5873 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC); 5874 INSN(negx, 4080, fff8, CF_ISA_A); 5875 INSN(negx, 4000, ff00, M68000); 5876 INSN(undef, 40c0, ffc0, M68000); 5877 INSN(move_from_sr, 40c0, fff8, CF_ISA_A); 5878 INSN(move_from_sr, 40c0, ffc0, M68000); 5879 BASE(lea, 41c0, f1c0); 5880 BASE(clr, 4200, ff00); 5881 BASE(undef, 42c0, ffc0); 5882 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A); 5883 INSN(move_from_ccr, 42c0, ffc0, M68000); 5884 INSN(neg, 4480, fff8, CF_ISA_A); 5885 INSN(neg, 4400, ff00, M68000); 5886 INSN(undef, 44c0, ffc0, M68000); 5887 BASE(move_to_ccr, 44c0, ffc0); 5888 INSN(not, 4680, fff8, CF_ISA_A); 5889 INSN(not, 4600, ff00, M68000); 5890 #if defined(CONFIG_SOFTMMU) 5891 BASE(move_to_sr, 46c0, ffc0); 5892 #endif 5893 INSN(nbcd, 4800, ffc0, M68000); 5894 INSN(linkl, 4808, fff8, M68000); 5895 BASE(pea, 4840, ffc0); 5896 BASE(swap, 4840, fff8); 5897 INSN(bkpt, 4848, fff8, BKPT); 5898 INSN(movem, 48d0, fbf8, CF_ISA_A); 5899 INSN(movem, 48e8, fbf8, CF_ISA_A); 5900 INSN(movem, 4880, fb80, M68000); 5901 BASE(ext, 4880, fff8); 5902 BASE(ext, 48c0, fff8); 5903 BASE(ext, 49c0, fff8); 5904 BASE(tst, 4a00, ff00); 5905 INSN(tas, 4ac0, ffc0, CF_ISA_B); 5906 INSN(tas, 4ac0, ffc0, M68000); 5907 #if defined(CONFIG_SOFTMMU) 5908 INSN(halt, 4ac8, ffff, CF_ISA_A); 5909 #endif 5910 INSN(pulse, 4acc, ffff, CF_ISA_A); 5911 BASE(illegal, 4afc, ffff); 5912 INSN(mull, 4c00, ffc0, CF_ISA_A); 5913 INSN(mull, 4c00, ffc0, LONG_MULDIV); 5914 INSN(divl, 4c40, ffc0, CF_ISA_A); 5915 INSN(divl, 4c40, ffc0, LONG_MULDIV); 5916 INSN(sats, 4c80, fff8, CF_ISA_B); 5917 BASE(trap, 4e40, fff0); 5918 BASE(link, 4e50, fff8); 5919 BASE(unlk, 4e58, fff8); 5920 #if defined(CONFIG_SOFTMMU) 5921 INSN(move_to_usp, 4e60, fff8, USP); 5922 INSN(move_from_usp, 4e68, fff8, USP); 5923 INSN(reset, 4e70, ffff, M68000); 5924 BASE(stop, 4e72, ffff); 5925 BASE(rte, 4e73, ffff); 5926 INSN(cf_movec, 4e7b, ffff, CF_ISA_A); 5927 INSN(m68k_movec, 4e7a, fffe, M68000); 5928 #endif 5929 BASE(nop, 4e71, ffff); 5930 INSN(rtd, 4e74, ffff, RTD); 5931 BASE(rts, 4e75, ffff); 5932 BASE(jump, 4e80, ffc0); 5933 BASE(jump, 4ec0, ffc0); 5934 INSN(addsubq, 5000, f080, M68000); 5935 BASE(addsubq, 5080, f0c0); 5936 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */ 5937 INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */ 5938 INSN(dbcc, 50c8, f0f8, M68000); 5939 INSN(tpf, 51f8, fff8, CF_ISA_A); 5940 5941 /* Branch instructions. */ 5942 BASE(branch, 6000, f000); 5943 /* Disable long branch instructions, then add back the ones we want. */ 5944 BASE(undef, 60ff, f0ff); /* All long branches. */ 5945 INSN(branch, 60ff, f0ff, CF_ISA_B); 5946 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */ 5947 INSN(branch, 60ff, ffff, BRAL); 5948 INSN(branch, 60ff, f0ff, BCCL); 5949 5950 BASE(moveq, 7000, f100); 5951 INSN(mvzs, 7100, f100, CF_ISA_B); 5952 BASE(or, 8000, f000); 5953 BASE(divw, 80c0, f0c0); 5954 INSN(sbcd_reg, 8100, f1f8, M68000); 5955 INSN(sbcd_mem, 8108, f1f8, M68000); 5956 BASE(addsub, 9000, f000); 5957 INSN(undef, 90c0, f0c0, CF_ISA_A); 5958 INSN(subx_reg, 9180, f1f8, CF_ISA_A); 5959 INSN(subx_reg, 9100, f138, M68000); 5960 INSN(subx_mem, 9108, f138, M68000); 5961 INSN(suba, 91c0, f1c0, CF_ISA_A); 5962 INSN(suba, 90c0, f0c0, M68000); 5963 5964 BASE(undef_mac, a000, f000); 5965 INSN(mac, a000, f100, CF_EMAC); 5966 INSN(from_mac, a180, f9b0, CF_EMAC); 5967 INSN(move_mac, a110, f9fc, CF_EMAC); 5968 INSN(from_macsr,a980, f9f0, CF_EMAC); 5969 INSN(from_mask, ad80, fff0, CF_EMAC); 5970 INSN(from_mext, ab80, fbf0, CF_EMAC); 5971 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC); 5972 INSN(to_mac, a100, f9c0, CF_EMAC); 5973 INSN(to_macsr, a900, ffc0, CF_EMAC); 5974 INSN(to_mext, ab00, fbc0, CF_EMAC); 5975 INSN(to_mask, ad00, ffc0, CF_EMAC); 5976 5977 INSN(mov3q, a140, f1c0, CF_ISA_B); 5978 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */ 5979 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */ 5980 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */ 5981 INSN(cmp, b080, f1c0, CF_ISA_A); 5982 INSN(cmpa, b1c0, f1c0, CF_ISA_A); 5983 INSN(cmp, b000, f100, M68000); 5984 INSN(eor, b100, f100, M68000); 5985 INSN(cmpm, b108, f138, M68000); 5986 INSN(cmpa, b0c0, f0c0, M68000); 5987 INSN(eor, b180, f1c0, CF_ISA_A); 5988 BASE(and, c000, f000); 5989 INSN(exg_dd, c140, f1f8, M68000); 5990 INSN(exg_aa, c148, f1f8, M68000); 5991 INSN(exg_da, c188, f1f8, M68000); 5992 BASE(mulw, c0c0, f0c0); 5993 INSN(abcd_reg, c100, f1f8, M68000); 5994 INSN(abcd_mem, c108, f1f8, M68000); 5995 BASE(addsub, d000, f000); 5996 INSN(undef, d0c0, f0c0, CF_ISA_A); 5997 INSN(addx_reg, d180, f1f8, CF_ISA_A); 5998 INSN(addx_reg, d100, f138, M68000); 5999 INSN(addx_mem, d108, f138, M68000); 6000 INSN(adda, d1c0, f1c0, CF_ISA_A); 6001 INSN(adda, d0c0, f0c0, M68000); 6002 INSN(shift_im, e080, f0f0, CF_ISA_A); 6003 INSN(shift_reg, e0a0, f0f0, CF_ISA_A); 6004 INSN(shift8_im, e000, f0f0, M68000); 6005 INSN(shift16_im, e040, f0f0, M68000); 6006 INSN(shift_im, e080, f0f0, M68000); 6007 INSN(shift8_reg, e020, f0f0, M68000); 6008 INSN(shift16_reg, e060, f0f0, M68000); 6009 INSN(shift_reg, e0a0, f0f0, M68000); 6010 INSN(shift_mem, e0c0, fcc0, M68000); 6011 INSN(rotate_im, e090, f0f0, M68000); 6012 INSN(rotate8_im, e010, f0f0, M68000); 6013 INSN(rotate16_im, e050, f0f0, M68000); 6014 INSN(rotate_reg, e0b0, f0f0, M68000); 6015 INSN(rotate8_reg, e030, f0f0, M68000); 6016 INSN(rotate16_reg, e070, f0f0, M68000); 6017 INSN(rotate_mem, e4c0, fcc0, M68000); 6018 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */ 6019 INSN(bfext_reg, e9c0, fdf8, BITFIELD); 6020 INSN(bfins_mem, efc0, ffc0, BITFIELD); 6021 INSN(bfins_reg, efc0, fff8, BITFIELD); 6022 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */ 6023 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */ 6024 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */ 6025 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */ 6026 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */ 6027 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */ 6028 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */ 6029 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */ 6030 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */ 6031 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */ 6032 BASE(undef_fpu, f000, f000); 6033 INSN(fpu, f200, ffc0, CF_FPU); 6034 INSN(fbcc, f280, ffc0, CF_FPU); 6035 INSN(fpu, f200, ffc0, FPU); 6036 INSN(fscc, f240, ffc0, FPU); 6037 INSN(fbcc, f280, ff80, FPU); 6038 #if defined(CONFIG_SOFTMMU) 6039 INSN(frestore, f340, ffc0, CF_FPU); 6040 INSN(fsave, f300, ffc0, CF_FPU); 6041 INSN(frestore, f340, ffc0, FPU); 6042 INSN(fsave, f300, ffc0, FPU); 6043 INSN(intouch, f340, ffc0, CF_ISA_A); 6044 INSN(cpushl, f428, ff38, CF_ISA_A); 6045 INSN(cpush, f420, ff20, M68040); 6046 INSN(cinv, f400, ff20, M68040); 6047 INSN(pflush, f500, ffe0, M68040); 6048 INSN(ptest, f548, ffd8, M68040); 6049 INSN(wddata, fb00, ff00, CF_ISA_A); 6050 INSN(wdebug, fbc0, ffc0, CF_ISA_A); 6051 #endif 6052 INSN(move16_mem, f600, ffe0, M68040); 6053 INSN(move16_reg, f620, fff8, M68040); 6054 #undef INSN 6055 } 6056 6057 /* ??? Some of this implementation is not exception safe. We should always 6058 write back the result to memory before setting the condition codes. */ 6059 static void disas_m68k_insn(CPUM68KState * env, DisasContext *s) 6060 { 6061 uint16_t insn = read_im16(env, s); 6062 opcode_table[insn](env, s, insn); 6063 do_writebacks(s); 6064 do_release(s); 6065 } 6066 6067 /* generate intermediate code for basic block 'tb'. */ 6068 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb) 6069 { 6070 CPUM68KState *env = cs->env_ptr; 6071 DisasContext dc1, *dc = &dc1; 6072 target_ulong pc_start; 6073 int pc_offset; 6074 int num_insns; 6075 int max_insns; 6076 6077 /* generate intermediate code */ 6078 pc_start = tb->pc; 6079 6080 dc->tb = tb; 6081 6082 dc->env = env; 6083 dc->is_jmp = DISAS_NEXT; 6084 dc->pc = pc_start; 6085 dc->cc_op = CC_OP_DYNAMIC; 6086 dc->cc_op_synced = 1; 6087 dc->singlestep_enabled = cs->singlestep_enabled; 6088 dc->done_mac = 0; 6089 dc->writeback_mask = 0; 6090 num_insns = 0; 6091 max_insns = tb_cflags(tb) & CF_COUNT_MASK; 6092 if (max_insns == 0) { 6093 max_insns = CF_COUNT_MASK; 6094 } 6095 if (max_insns > TCG_MAX_INSNS) { 6096 max_insns = TCG_MAX_INSNS; 6097 } 6098 6099 init_release_array(dc); 6100 6101 gen_tb_start(tb); 6102 do { 6103 pc_offset = dc->pc - pc_start; 6104 tcg_gen_insn_start(dc->pc, dc->cc_op); 6105 num_insns++; 6106 6107 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { 6108 gen_exception(dc, dc->pc, EXCP_DEBUG); 6109 dc->is_jmp = DISAS_JUMP; 6110 /* The address covered by the breakpoint must be included in 6111 [tb->pc, tb->pc + tb->size) in order to for it to be 6112 properly cleared -- thus we increment the PC here so that 6113 the logic setting tb->size below does the right thing. */ 6114 dc->pc += 2; 6115 break; 6116 } 6117 6118 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { 6119 gen_io_start(); 6120 } 6121 6122 dc->insn_pc = dc->pc; 6123 disas_m68k_insn(env, dc); 6124 } while (!dc->is_jmp && !tcg_op_buf_full() && 6125 !cs->singlestep_enabled && 6126 !singlestep && 6127 (pc_offset) < (TARGET_PAGE_SIZE - 32) && 6128 num_insns < max_insns); 6129 6130 if (tb_cflags(tb) & CF_LAST_IO) 6131 gen_io_end(); 6132 if (unlikely(cs->singlestep_enabled)) { 6133 /* Make sure the pc is updated, and raise a debug exception. */ 6134 if (!dc->is_jmp) { 6135 update_cc_op(dc); 6136 tcg_gen_movi_i32(QREG_PC, dc->pc); 6137 } 6138 gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG)); 6139 } else { 6140 switch(dc->is_jmp) { 6141 case DISAS_NEXT: 6142 update_cc_op(dc); 6143 gen_jmp_tb(dc, 0, dc->pc); 6144 break; 6145 default: 6146 case DISAS_JUMP: 6147 case DISAS_UPDATE: 6148 update_cc_op(dc); 6149 /* indicate that the hash table must be used to find the next TB */ 6150 tcg_gen_exit_tb(0); 6151 break; 6152 case DISAS_TB_JUMP: 6153 /* nothing more to generate */ 6154 break; 6155 } 6156 } 6157 gen_tb_end(tb, num_insns); 6158 6159 #ifdef DEBUG_DISAS 6160 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 6161 && qemu_log_in_addr_range(pc_start)) { 6162 qemu_log_lock(); 6163 qemu_log("----------------\n"); 6164 qemu_log("IN: %s\n", lookup_symbol(pc_start)); 6165 log_target_disas(cs, pc_start, dc->pc - pc_start); 6166 qemu_log("\n"); 6167 qemu_log_unlock(); 6168 } 6169 #endif 6170 tb->size = dc->pc - pc_start; 6171 tb->icount = num_insns; 6172 } 6173 6174 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low) 6175 { 6176 floatx80 a = { .high = high, .low = low }; 6177 union { 6178 float64 f64; 6179 double d; 6180 } u; 6181 6182 u.f64 = floatx80_to_float64(a, &env->fp_status); 6183 return u.d; 6184 } 6185 6186 void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, 6187 int flags) 6188 { 6189 M68kCPU *cpu = M68K_CPU(cs); 6190 CPUM68KState *env = &cpu->env; 6191 int i; 6192 uint16_t sr; 6193 for (i = 0; i < 8; i++) { 6194 cpu_fprintf(f, "D%d = %08x A%d = %08x " 6195 "F%d = %04x %016"PRIx64" (%12g)\n", 6196 i, env->dregs[i], i, env->aregs[i], 6197 i, env->fregs[i].l.upper, env->fregs[i].l.lower, 6198 floatx80_to_double(env, env->fregs[i].l.upper, 6199 env->fregs[i].l.lower)); 6200 } 6201 cpu_fprintf (f, "PC = %08x ", env->pc); 6202 sr = env->sr | cpu_m68k_get_ccr(env); 6203 cpu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n", 6204 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT, 6205 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I', 6206 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-', 6207 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-', 6208 (sr & CCF_C) ? 'C' : '-'); 6209 cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr, 6210 (env->fpsr & FPSR_CC_A) ? 'A' : '-', 6211 (env->fpsr & FPSR_CC_I) ? 'I' : '-', 6212 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-', 6213 (env->fpsr & FPSR_CC_N) ? 'N' : '-'); 6214 cpu_fprintf(f, "\n " 6215 "FPCR = %04x ", env->fpcr); 6216 switch (env->fpcr & FPCR_PREC_MASK) { 6217 case FPCR_PREC_X: 6218 cpu_fprintf(f, "X "); 6219 break; 6220 case FPCR_PREC_S: 6221 cpu_fprintf(f, "S "); 6222 break; 6223 case FPCR_PREC_D: 6224 cpu_fprintf(f, "D "); 6225 break; 6226 } 6227 switch (env->fpcr & FPCR_RND_MASK) { 6228 case FPCR_RND_N: 6229 cpu_fprintf(f, "RN "); 6230 break; 6231 case FPCR_RND_Z: 6232 cpu_fprintf(f, "RZ "); 6233 break; 6234 case FPCR_RND_M: 6235 cpu_fprintf(f, "RM "); 6236 break; 6237 case FPCR_RND_P: 6238 cpu_fprintf(f, "RP "); 6239 break; 6240 } 6241 cpu_fprintf(f, "\n"); 6242 #ifdef CONFIG_SOFTMMU 6243 cpu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n", 6244 env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP], 6245 env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP], 6246 env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]); 6247 cpu_fprintf(f, "VBR = 0x%08x\n", env->vbr); 6248 cpu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc); 6249 cpu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n", 6250 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp); 6251 cpu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n", 6252 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1], 6253 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]); 6254 cpu_fprintf(f, "MMUSR %08x, fault at %08x\n", 6255 env->mmu.mmusr, env->mmu.ar); 6256 #endif 6257 } 6258 6259 void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, 6260 target_ulong *data) 6261 { 6262 int cc_op = data[1]; 6263 env->pc = data[0]; 6264 if (cc_op != CC_OP_DYNAMIC) { 6265 env->cc_op = cc_op; 6266 } 6267 } 6268