1 /* 2 * Xilinx MicroBlaze emulation for qemu: main translation routines. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias. 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "disas/disas.h" 24 #include "exec/exec-all.h" 25 #include "tcg-op.h" 26 #include "exec/helper-proto.h" 27 #include "microblaze-decode.h" 28 #include "exec/cpu_ldst.h" 29 #include "exec/helper-gen.h" 30 #include "exec/translator.h" 31 32 #include "trace-tcg.h" 33 #include "exec/log.h" 34 35 36 #define SIM_COMPAT 0 37 #define DISAS_GNU 1 38 #define DISAS_MB 1 39 #if DISAS_MB && !SIM_COMPAT 40 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) 41 #else 42 # define LOG_DIS(...) do { } while (0) 43 #endif 44 45 #define D(x) 46 47 #define EXTRACT_FIELD(src, start, end) \ 48 (((src) >> start) & ((1 << (end - start + 1)) - 1)) 49 50 /* is_jmp field values */ 51 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 52 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */ 53 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */ 54 55 static TCGv env_debug; 56 static TCGv cpu_R[32]; 57 static TCGv cpu_SR[18]; 58 static TCGv env_imm; 59 static TCGv env_btaken; 60 static TCGv env_btarget; 61 static TCGv env_iflags; 62 static TCGv env_res_addr; 63 static TCGv env_res_val; 64 65 #include "exec/gen-icount.h" 66 67 /* This is the state at translation time. */ 68 typedef struct DisasContext { 69 MicroBlazeCPU *cpu; 70 target_ulong pc; 71 72 /* Decoder. */ 73 int type_b; 74 uint32_t ir; 75 uint8_t opcode; 76 uint8_t rd, ra, rb; 77 uint16_t imm; 78 79 unsigned int cpustate_changed; 80 unsigned int delayed_branch; 81 unsigned int tb_flags, synced_flags; /* tb dependent flags. */ 82 unsigned int clear_imm; 83 int is_jmp; 84 85 #define JMP_NOJMP 0 86 #define JMP_DIRECT 1 87 #define JMP_DIRECT_CC 2 88 #define JMP_INDIRECT 3 89 unsigned int jmp; 90 uint32_t jmp_pc; 91 92 int abort_at_next_insn; 93 int nr_nops; 94 struct TranslationBlock *tb; 95 int singlestep_enabled; 96 } DisasContext; 97 98 static const char *regnames[] = 99 { 100 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 101 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 102 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 103 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 104 }; 105 106 static const char *special_regnames[] = 107 { 108 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7", 109 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15", 110 "sr16", "sr17", "sr18" 111 }; 112 113 static inline void t_sync_flags(DisasContext *dc) 114 { 115 /* Synch the tb dependent flags between translator and runtime. */ 116 if (dc->tb_flags != dc->synced_flags) { 117 tcg_gen_movi_tl(env_iflags, dc->tb_flags); 118 dc->synced_flags = dc->tb_flags; 119 } 120 } 121 122 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index) 123 { 124 TCGv_i32 tmp = tcg_const_i32(index); 125 126 t_sync_flags(dc); 127 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); 128 gen_helper_raise_exception(cpu_env, tmp); 129 tcg_temp_free_i32(tmp); 130 dc->is_jmp = DISAS_UPDATE; 131 } 132 133 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest) 134 { 135 #ifndef CONFIG_USER_ONLY 136 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 137 #else 138 return true; 139 #endif 140 } 141 142 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) 143 { 144 if (use_goto_tb(dc, dest)) { 145 tcg_gen_goto_tb(n); 146 tcg_gen_movi_tl(cpu_SR[SR_PC], dest); 147 tcg_gen_exit_tb((uintptr_t)dc->tb + n); 148 } else { 149 tcg_gen_movi_tl(cpu_SR[SR_PC], dest); 150 tcg_gen_exit_tb(0); 151 } 152 } 153 154 static void read_carry(DisasContext *dc, TCGv d) 155 { 156 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31); 157 } 158 159 /* 160 * write_carry sets the carry bits in MSR based on bit 0 of v. 161 * v[31:1] are ignored. 162 */ 163 static void write_carry(DisasContext *dc, TCGv v) 164 { 165 TCGv t0 = tcg_temp_new(); 166 tcg_gen_shli_tl(t0, v, 31); 167 tcg_gen_sari_tl(t0, t0, 31); 168 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC)); 169 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], 170 ~(MSR_C | MSR_CC)); 171 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0); 172 tcg_temp_free(t0); 173 } 174 175 static void write_carryi(DisasContext *dc, bool carry) 176 { 177 TCGv t0 = tcg_temp_new(); 178 tcg_gen_movi_tl(t0, carry); 179 write_carry(dc, t0); 180 tcg_temp_free(t0); 181 } 182 183 /* True if ALU operand b is a small immediate that may deserve 184 faster treatment. */ 185 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc) 186 { 187 /* Immediate insn without the imm prefix ? */ 188 return dc->type_b && !(dc->tb_flags & IMM_FLAG); 189 } 190 191 static inline TCGv *dec_alu_op_b(DisasContext *dc) 192 { 193 if (dc->type_b) { 194 if (dc->tb_flags & IMM_FLAG) 195 tcg_gen_ori_tl(env_imm, env_imm, dc->imm); 196 else 197 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm)); 198 return &env_imm; 199 } else 200 return &cpu_R[dc->rb]; 201 } 202 203 static void dec_add(DisasContext *dc) 204 { 205 unsigned int k, c; 206 TCGv cf; 207 208 k = dc->opcode & 4; 209 c = dc->opcode & 2; 210 211 LOG_DIS("add%s%s%s r%d r%d r%d\n", 212 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "", 213 dc->rd, dc->ra, dc->rb); 214 215 /* Take care of the easy cases first. */ 216 if (k) { 217 /* k - keep carry, no need to update MSR. */ 218 /* If rd == r0, it's a nop. */ 219 if (dc->rd) { 220 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); 221 222 if (c) { 223 /* c - Add carry into the result. */ 224 cf = tcg_temp_new(); 225 226 read_carry(dc, cf); 227 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf); 228 tcg_temp_free(cf); 229 } 230 } 231 return; 232 } 233 234 /* From now on, we can assume k is zero. So we need to update MSR. */ 235 /* Extract carry. */ 236 cf = tcg_temp_new(); 237 if (c) { 238 read_carry(dc, cf); 239 } else { 240 tcg_gen_movi_tl(cf, 0); 241 } 242 243 if (dc->rd) { 244 TCGv ncf = tcg_temp_new(); 245 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf); 246 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); 247 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf); 248 write_carry(dc, ncf); 249 tcg_temp_free(ncf); 250 } else { 251 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf); 252 write_carry(dc, cf); 253 } 254 tcg_temp_free(cf); 255 } 256 257 static void dec_sub(DisasContext *dc) 258 { 259 unsigned int u, cmp, k, c; 260 TCGv cf, na; 261 262 u = dc->imm & 2; 263 k = dc->opcode & 4; 264 c = dc->opcode & 2; 265 cmp = (dc->imm & 1) && (!dc->type_b) && k; 266 267 if (cmp) { 268 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir); 269 if (dc->rd) { 270 if (u) 271 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); 272 else 273 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); 274 } 275 return; 276 } 277 278 LOG_DIS("sub%s%s r%d, r%d r%d\n", 279 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb); 280 281 /* Take care of the easy cases first. */ 282 if (k) { 283 /* k - keep carry, no need to update MSR. */ 284 /* If rd == r0, it's a nop. */ 285 if (dc->rd) { 286 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]); 287 288 if (c) { 289 /* c - Add carry into the result. */ 290 cf = tcg_temp_new(); 291 292 read_carry(dc, cf); 293 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf); 294 tcg_temp_free(cf); 295 } 296 } 297 return; 298 } 299 300 /* From now on, we can assume k is zero. So we need to update MSR. */ 301 /* Extract carry. And complement a into na. */ 302 cf = tcg_temp_new(); 303 na = tcg_temp_new(); 304 if (c) { 305 read_carry(dc, cf); 306 } else { 307 tcg_gen_movi_tl(cf, 1); 308 } 309 310 /* d = b + ~a + c. carry defaults to 1. */ 311 tcg_gen_not_tl(na, cpu_R[dc->ra]); 312 313 if (dc->rd) { 314 TCGv ncf = tcg_temp_new(); 315 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf); 316 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc))); 317 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf); 318 write_carry(dc, ncf); 319 tcg_temp_free(ncf); 320 } else { 321 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf); 322 write_carry(dc, cf); 323 } 324 tcg_temp_free(cf); 325 tcg_temp_free(na); 326 } 327 328 static void dec_pattern(DisasContext *dc) 329 { 330 unsigned int mode; 331 332 if ((dc->tb_flags & MSR_EE_FLAG) 333 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) 334 && !dc->cpu->cfg.use_pcmp_instr) { 335 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 336 t_gen_raise_exception(dc, EXCP_HW_EXCP); 337 } 338 339 mode = dc->opcode & 3; 340 switch (mode) { 341 case 0: 342 /* pcmpbf. */ 343 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); 344 if (dc->rd) 345 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); 346 break; 347 case 2: 348 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); 349 if (dc->rd) { 350 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd], 351 cpu_R[dc->ra], cpu_R[dc->rb]); 352 } 353 break; 354 case 3: 355 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); 356 if (dc->rd) { 357 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd], 358 cpu_R[dc->ra], cpu_R[dc->rb]); 359 } 360 break; 361 default: 362 cpu_abort(CPU(dc->cpu), 363 "unsupported pattern insn opcode=%x\n", dc->opcode); 364 break; 365 } 366 } 367 368 static void dec_and(DisasContext *dc) 369 { 370 unsigned int not; 371 372 if (!dc->type_b && (dc->imm & (1 << 10))) { 373 dec_pattern(dc); 374 return; 375 } 376 377 not = dc->opcode & (1 << 1); 378 LOG_DIS("and%s\n", not ? "n" : ""); 379 380 if (!dc->rd) 381 return; 382 383 if (not) { 384 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); 385 } else 386 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); 387 } 388 389 static void dec_or(DisasContext *dc) 390 { 391 if (!dc->type_b && (dc->imm & (1 << 10))) { 392 dec_pattern(dc); 393 return; 394 } 395 396 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm); 397 if (dc->rd) 398 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); 399 } 400 401 static void dec_xor(DisasContext *dc) 402 { 403 if (!dc->type_b && (dc->imm & (1 << 10))) { 404 dec_pattern(dc); 405 return; 406 } 407 408 LOG_DIS("xor r%d\n", dc->rd); 409 if (dc->rd) 410 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); 411 } 412 413 static inline void msr_read(DisasContext *dc, TCGv d) 414 { 415 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]); 416 } 417 418 static inline void msr_write(DisasContext *dc, TCGv v) 419 { 420 TCGv t; 421 422 t = tcg_temp_new(); 423 dc->cpustate_changed = 1; 424 /* PVR bit is not writable. */ 425 tcg_gen_andi_tl(t, v, ~MSR_PVR); 426 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR); 427 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t); 428 tcg_temp_free(t); 429 } 430 431 static void dec_msr(DisasContext *dc) 432 { 433 CPUState *cs = CPU(dc->cpu); 434 TCGv t0, t1; 435 unsigned int sr, to, rn; 436 int mem_index = cpu_mmu_index(&dc->cpu->env, false); 437 438 sr = dc->imm & ((1 << 14) - 1); 439 to = dc->imm & (1 << 14); 440 dc->type_b = 1; 441 if (to) 442 dc->cpustate_changed = 1; 443 444 /* msrclr and msrset. */ 445 if (!(dc->imm & (1 << 15))) { 446 unsigned int clr = dc->ir & (1 << 16); 447 448 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set", 449 dc->rd, dc->imm); 450 451 if (!dc->cpu->cfg.use_msr_instr) { 452 /* nop??? */ 453 return; 454 } 455 456 if ((dc->tb_flags & MSR_EE_FLAG) 457 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) { 458 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 459 t_gen_raise_exception(dc, EXCP_HW_EXCP); 460 return; 461 } 462 463 if (dc->rd) 464 msr_read(dc, cpu_R[dc->rd]); 465 466 t0 = tcg_temp_new(); 467 t1 = tcg_temp_new(); 468 msr_read(dc, t0); 469 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc))); 470 471 if (clr) { 472 tcg_gen_not_tl(t1, t1); 473 tcg_gen_and_tl(t0, t0, t1); 474 } else 475 tcg_gen_or_tl(t0, t0, t1); 476 msr_write(dc, t0); 477 tcg_temp_free(t0); 478 tcg_temp_free(t1); 479 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4); 480 dc->is_jmp = DISAS_UPDATE; 481 return; 482 } 483 484 if (to) { 485 if ((dc->tb_flags & MSR_EE_FLAG) 486 && mem_index == MMU_USER_IDX) { 487 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 488 t_gen_raise_exception(dc, EXCP_HW_EXCP); 489 return; 490 } 491 } 492 493 #if !defined(CONFIG_USER_ONLY) 494 /* Catch read/writes to the mmu block. */ 495 if ((sr & ~0xff) == 0x1000) { 496 sr &= 7; 497 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm); 498 if (to) 499 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]); 500 else 501 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr)); 502 return; 503 } 504 #endif 505 506 if (to) { 507 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm); 508 switch (sr) { 509 case 0: 510 break; 511 case 1: 512 msr_write(dc, cpu_R[dc->ra]); 513 break; 514 case 0x3: 515 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]); 516 break; 517 case 0x5: 518 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]); 519 break; 520 case 0x7: 521 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31); 522 break; 523 case 0x800: 524 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr)); 525 break; 526 case 0x802: 527 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr)); 528 break; 529 default: 530 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr); 531 break; 532 } 533 } else { 534 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm); 535 536 switch (sr) { 537 case 0: 538 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc); 539 break; 540 case 1: 541 msr_read(dc, cpu_R[dc->rd]); 542 break; 543 case 0x3: 544 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]); 545 break; 546 case 0x5: 547 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]); 548 break; 549 case 0x7: 550 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]); 551 break; 552 case 0xb: 553 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]); 554 break; 555 case 0x800: 556 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr)); 557 break; 558 case 0x802: 559 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr)); 560 break; 561 case 0x2000: 562 case 0x2001: 563 case 0x2002: 564 case 0x2003: 565 case 0x2004: 566 case 0x2005: 567 case 0x2006: 568 case 0x2007: 569 case 0x2008: 570 case 0x2009: 571 case 0x200a: 572 case 0x200b: 573 case 0x200c: 574 rn = sr & 0xf; 575 tcg_gen_ld_tl(cpu_R[dc->rd], 576 cpu_env, offsetof(CPUMBState, pvr.regs[rn])); 577 break; 578 default: 579 cpu_abort(cs, "unknown mfs reg %x\n", sr); 580 break; 581 } 582 } 583 584 if (dc->rd == 0) { 585 tcg_gen_movi_tl(cpu_R[0], 0); 586 } 587 } 588 589 /* Multiplier unit. */ 590 static void dec_mul(DisasContext *dc) 591 { 592 TCGv tmp; 593 unsigned int subcode; 594 595 if ((dc->tb_flags & MSR_EE_FLAG) 596 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) 597 && !dc->cpu->cfg.use_hw_mul) { 598 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 599 t_gen_raise_exception(dc, EXCP_HW_EXCP); 600 return; 601 } 602 603 subcode = dc->imm & 3; 604 605 if (dc->type_b) { 606 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm); 607 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc))); 608 return; 609 } 610 611 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */ 612 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) { 613 /* nop??? */ 614 } 615 616 tmp = tcg_temp_new(); 617 switch (subcode) { 618 case 0: 619 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); 620 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); 621 break; 622 case 1: 623 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); 624 tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); 625 break; 626 case 2: 627 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); 628 tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); 629 break; 630 case 3: 631 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb); 632 tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]); 633 break; 634 default: 635 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode); 636 break; 637 } 638 tcg_temp_free(tmp); 639 } 640 641 /* Div unit. */ 642 static void dec_div(DisasContext *dc) 643 { 644 unsigned int u; 645 646 u = dc->imm & 2; 647 LOG_DIS("div\n"); 648 649 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) 650 && !dc->cpu->cfg.use_div) { 651 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 652 t_gen_raise_exception(dc, EXCP_HW_EXCP); 653 } 654 655 if (u) 656 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)), 657 cpu_R[dc->ra]); 658 else 659 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)), 660 cpu_R[dc->ra]); 661 if (!dc->rd) 662 tcg_gen_movi_tl(cpu_R[dc->rd], 0); 663 } 664 665 static void dec_barrel(DisasContext *dc) 666 { 667 TCGv t0; 668 unsigned int imm_w, imm_s; 669 bool s, t, e = false, i = false; 670 671 if ((dc->tb_flags & MSR_EE_FLAG) 672 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) 673 && !dc->cpu->cfg.use_barrel) { 674 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 675 t_gen_raise_exception(dc, EXCP_HW_EXCP); 676 return; 677 } 678 679 if (dc->type_b) { 680 /* Insert and extract are only available in immediate mode. */ 681 i = extract32(dc->imm, 15, 1); 682 e = extract32(dc->imm, 14, 1); 683 } 684 s = extract32(dc->imm, 10, 1); 685 t = extract32(dc->imm, 9, 1); 686 imm_w = extract32(dc->imm, 6, 5); 687 imm_s = extract32(dc->imm, 0, 5); 688 689 LOG_DIS("bs%s%s%s r%d r%d r%d\n", 690 e ? "e" : "", 691 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb); 692 693 if (e) { 694 if (imm_w + imm_s > 32 || imm_w == 0) { 695 /* These inputs have an undefined behavior. */ 696 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n", 697 imm_w, imm_s); 698 } else { 699 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w); 700 } 701 } else if (i) { 702 int width = imm_w - imm_s + 1; 703 704 if (imm_w < imm_s) { 705 /* These inputs have an undefined behavior. */ 706 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n", 707 imm_w, imm_s); 708 } else { 709 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra], 710 imm_s, width); 711 } 712 } else { 713 t0 = tcg_temp_new(); 714 715 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc))); 716 tcg_gen_andi_tl(t0, t0, 31); 717 718 if (s) { 719 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0); 720 } else { 721 if (t) { 722 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0); 723 } else { 724 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0); 725 } 726 } 727 tcg_temp_free(t0); 728 } 729 } 730 731 static void dec_bit(DisasContext *dc) 732 { 733 CPUState *cs = CPU(dc->cpu); 734 TCGv t0; 735 unsigned int op; 736 int mem_index = cpu_mmu_index(&dc->cpu->env, false); 737 738 op = dc->ir & ((1 << 9) - 1); 739 switch (op) { 740 case 0x21: 741 /* src. */ 742 t0 = tcg_temp_new(); 743 744 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra); 745 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC); 746 write_carry(dc, cpu_R[dc->ra]); 747 if (dc->rd) { 748 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); 749 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0); 750 } 751 tcg_temp_free(t0); 752 break; 753 754 case 0x1: 755 case 0x41: 756 /* srl. */ 757 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra); 758 759 /* Update carry. Note that write carry only looks at the LSB. */ 760 write_carry(dc, cpu_R[dc->ra]); 761 if (dc->rd) { 762 if (op == 0x41) 763 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); 764 else 765 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1); 766 } 767 break; 768 case 0x60: 769 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra); 770 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]); 771 break; 772 case 0x61: 773 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra); 774 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]); 775 break; 776 case 0x64: 777 case 0x66: 778 case 0x74: 779 case 0x76: 780 /* wdc. */ 781 LOG_DIS("wdc r%d\n", dc->ra); 782 if ((dc->tb_flags & MSR_EE_FLAG) 783 && mem_index == MMU_USER_IDX) { 784 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 785 t_gen_raise_exception(dc, EXCP_HW_EXCP); 786 return; 787 } 788 break; 789 case 0x68: 790 /* wic. */ 791 LOG_DIS("wic r%d\n", dc->ra); 792 if ((dc->tb_flags & MSR_EE_FLAG) 793 && mem_index == MMU_USER_IDX) { 794 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 795 t_gen_raise_exception(dc, EXCP_HW_EXCP); 796 return; 797 } 798 break; 799 case 0xe0: 800 if ((dc->tb_flags & MSR_EE_FLAG) 801 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) 802 && !dc->cpu->cfg.use_pcmp_instr) { 803 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 804 t_gen_raise_exception(dc, EXCP_HW_EXCP); 805 } 806 if (dc->cpu->cfg.use_pcmp_instr) { 807 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32); 808 } 809 break; 810 case 0x1e0: 811 /* swapb */ 812 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra); 813 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]); 814 break; 815 case 0x1e2: 816 /*swaph */ 817 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra); 818 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16); 819 break; 820 default: 821 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n", 822 dc->pc, op, dc->rd, dc->ra, dc->rb); 823 break; 824 } 825 } 826 827 static inline void sync_jmpstate(DisasContext *dc) 828 { 829 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) { 830 if (dc->jmp == JMP_DIRECT) { 831 tcg_gen_movi_tl(env_btaken, 1); 832 } 833 dc->jmp = JMP_INDIRECT; 834 tcg_gen_movi_tl(env_btarget, dc->jmp_pc); 835 } 836 } 837 838 static void dec_imm(DisasContext *dc) 839 { 840 LOG_DIS("imm %x\n", dc->imm << 16); 841 tcg_gen_movi_tl(env_imm, (dc->imm << 16)); 842 dc->tb_flags |= IMM_FLAG; 843 dc->clear_imm = 0; 844 } 845 846 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t) 847 { 848 unsigned int extimm = dc->tb_flags & IMM_FLAG; 849 /* Should be set to one if r1 is used by loadstores. */ 850 int stackprot = 0; 851 852 /* All load/stores use ra. */ 853 if (dc->ra == 1 && dc->cpu->cfg.stackprot) { 854 stackprot = 1; 855 } 856 857 /* Treat the common cases first. */ 858 if (!dc->type_b) { 859 /* If any of the regs is r0, return a ptr to the other. */ 860 if (dc->ra == 0) { 861 return &cpu_R[dc->rb]; 862 } else if (dc->rb == 0) { 863 return &cpu_R[dc->ra]; 864 } 865 866 if (dc->rb == 1 && dc->cpu->cfg.stackprot) { 867 stackprot = 1; 868 } 869 870 *t = tcg_temp_new(); 871 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]); 872 873 if (stackprot) { 874 gen_helper_stackprot(cpu_env, *t); 875 } 876 return t; 877 } 878 /* Immediate. */ 879 if (!extimm) { 880 if (dc->imm == 0) { 881 return &cpu_R[dc->ra]; 882 } 883 *t = tcg_temp_new(); 884 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm)); 885 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t); 886 } else { 887 *t = tcg_temp_new(); 888 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc))); 889 } 890 891 if (stackprot) { 892 gen_helper_stackprot(cpu_env, *t); 893 } 894 return t; 895 } 896 897 static void dec_load(DisasContext *dc) 898 { 899 TCGv t, v, *addr; 900 unsigned int size; 901 bool rev = false, ex = false; 902 TCGMemOp mop; 903 904 mop = dc->opcode & 3; 905 size = 1 << mop; 906 if (!dc->type_b) { 907 rev = extract32(dc->ir, 9, 1); 908 ex = extract32(dc->ir, 10, 1); 909 } 910 mop |= MO_TE; 911 if (rev) { 912 mop ^= MO_BSWAP; 913 } 914 915 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG) 916 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) { 917 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 918 t_gen_raise_exception(dc, EXCP_HW_EXCP); 919 return; 920 } 921 922 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "", 923 ex ? "x" : ""); 924 925 t_sync_flags(dc); 926 addr = compute_ldst_addr(dc, &t); 927 928 /* 929 * When doing reverse accesses we need to do two things. 930 * 931 * 1. Reverse the address wrt endianness. 932 * 2. Byteswap the data lanes on the way back into the CPU core. 933 */ 934 if (rev && size != 4) { 935 /* Endian reverse the address. t is addr. */ 936 switch (size) { 937 case 1: 938 { 939 /* 00 -> 11 940 01 -> 10 941 10 -> 10 942 11 -> 00 */ 943 TCGv low = tcg_temp_new(); 944 945 /* Force addr into the temp. */ 946 if (addr != &t) { 947 t = tcg_temp_new(); 948 tcg_gen_mov_tl(t, *addr); 949 addr = &t; 950 } 951 952 tcg_gen_andi_tl(low, t, 3); 953 tcg_gen_sub_tl(low, tcg_const_tl(3), low); 954 tcg_gen_andi_tl(t, t, ~3); 955 tcg_gen_or_tl(t, t, low); 956 tcg_temp_free(low); 957 break; 958 } 959 960 case 2: 961 /* 00 -> 10 962 10 -> 00. */ 963 /* Force addr into the temp. */ 964 if (addr != &t) { 965 t = tcg_temp_new(); 966 tcg_gen_xori_tl(t, *addr, 2); 967 addr = &t; 968 } else { 969 tcg_gen_xori_tl(t, t, 2); 970 } 971 break; 972 default: 973 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n"); 974 break; 975 } 976 } 977 978 /* lwx does not throw unaligned access errors, so force alignment */ 979 if (ex) { 980 /* Force addr into the temp. */ 981 if (addr != &t) { 982 t = tcg_temp_new(); 983 tcg_gen_mov_tl(t, *addr); 984 addr = &t; 985 } 986 tcg_gen_andi_tl(t, t, ~3); 987 } 988 989 /* If we get a fault on a dslot, the jmpstate better be in sync. */ 990 sync_jmpstate(dc); 991 992 /* Verify alignment if needed. */ 993 /* 994 * Microblaze gives MMU faults priority over faults due to 995 * unaligned addresses. That's why we speculatively do the load 996 * into v. If the load succeeds, we verify alignment of the 997 * address and if that succeeds we write into the destination reg. 998 */ 999 v = tcg_temp_new(); 1000 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop); 1001 1002 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { 1003 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); 1004 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd), 1005 tcg_const_tl(0), tcg_const_tl(size - 1)); 1006 } 1007 1008 if (ex) { 1009 tcg_gen_mov_tl(env_res_addr, *addr); 1010 tcg_gen_mov_tl(env_res_val, v); 1011 } 1012 if (dc->rd) { 1013 tcg_gen_mov_tl(cpu_R[dc->rd], v); 1014 } 1015 tcg_temp_free(v); 1016 1017 if (ex) { /* lwx */ 1018 /* no support for AXI exclusive so always clear C */ 1019 write_carryi(dc, 0); 1020 } 1021 1022 if (addr == &t) 1023 tcg_temp_free(t); 1024 } 1025 1026 static void dec_store(DisasContext *dc) 1027 { 1028 TCGv t, *addr, swx_addr; 1029 TCGLabel *swx_skip = NULL; 1030 unsigned int size, rev = 0, ex = 0; 1031 TCGMemOp mop; 1032 1033 mop = dc->opcode & 3; 1034 size = 1 << mop; 1035 if (!dc->type_b) { 1036 rev = (dc->ir >> 9) & 1; 1037 ex = (dc->ir >> 10) & 1; 1038 } 1039 mop |= MO_TE; 1040 if (rev) { 1041 mop ^= MO_BSWAP; 1042 } 1043 1044 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG) 1045 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) { 1046 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 1047 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1048 return; 1049 } 1050 1051 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "", 1052 ex ? "x" : ""); 1053 t_sync_flags(dc); 1054 /* If we get a fault on a dslot, the jmpstate better be in sync. */ 1055 sync_jmpstate(dc); 1056 addr = compute_ldst_addr(dc, &t); 1057 1058 swx_addr = tcg_temp_local_new(); 1059 if (ex) { /* swx */ 1060 TCGv tval; 1061 1062 /* Force addr into the swx_addr. */ 1063 tcg_gen_mov_tl(swx_addr, *addr); 1064 addr = &swx_addr; 1065 /* swx does not throw unaligned access errors, so force alignment */ 1066 tcg_gen_andi_tl(swx_addr, swx_addr, ~3); 1067 1068 write_carryi(dc, 1); 1069 swx_skip = gen_new_label(); 1070 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip); 1071 1072 /* Compare the value loaded at lwx with current contents of 1073 the reserved location. 1074 FIXME: This only works for system emulation where we can expect 1075 this compare and the following write to be atomic. For user 1076 emulation we need to add atomicity between threads. */ 1077 tval = tcg_temp_new(); 1078 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false), 1079 MO_TEUL); 1080 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip); 1081 write_carryi(dc, 0); 1082 tcg_temp_free(tval); 1083 } 1084 1085 if (rev && size != 4) { 1086 /* Endian reverse the address. t is addr. */ 1087 switch (size) { 1088 case 1: 1089 { 1090 /* 00 -> 11 1091 01 -> 10 1092 10 -> 10 1093 11 -> 00 */ 1094 TCGv low = tcg_temp_new(); 1095 1096 /* Force addr into the temp. */ 1097 if (addr != &t) { 1098 t = tcg_temp_new(); 1099 tcg_gen_mov_tl(t, *addr); 1100 addr = &t; 1101 } 1102 1103 tcg_gen_andi_tl(low, t, 3); 1104 tcg_gen_sub_tl(low, tcg_const_tl(3), low); 1105 tcg_gen_andi_tl(t, t, ~3); 1106 tcg_gen_or_tl(t, t, low); 1107 tcg_temp_free(low); 1108 break; 1109 } 1110 1111 case 2: 1112 /* 00 -> 10 1113 10 -> 00. */ 1114 /* Force addr into the temp. */ 1115 if (addr != &t) { 1116 t = tcg_temp_new(); 1117 tcg_gen_xori_tl(t, *addr, 2); 1118 addr = &t; 1119 } else { 1120 tcg_gen_xori_tl(t, t, 2); 1121 } 1122 break; 1123 default: 1124 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n"); 1125 break; 1126 } 1127 } 1128 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop); 1129 1130 /* Verify alignment if needed. */ 1131 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { 1132 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); 1133 /* FIXME: if the alignment is wrong, we should restore the value 1134 * in memory. One possible way to achieve this is to probe 1135 * the MMU prior to the memaccess, thay way we could put 1136 * the alignment checks in between the probe and the mem 1137 * access. 1138 */ 1139 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd), 1140 tcg_const_tl(1), tcg_const_tl(size - 1)); 1141 } 1142 1143 if (ex) { 1144 gen_set_label(swx_skip); 1145 } 1146 tcg_temp_free(swx_addr); 1147 1148 if (addr == &t) 1149 tcg_temp_free(t); 1150 } 1151 1152 static inline void eval_cc(DisasContext *dc, unsigned int cc, 1153 TCGv d, TCGv a, TCGv b) 1154 { 1155 switch (cc) { 1156 case CC_EQ: 1157 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b); 1158 break; 1159 case CC_NE: 1160 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b); 1161 break; 1162 case CC_LT: 1163 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b); 1164 break; 1165 case CC_LE: 1166 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b); 1167 break; 1168 case CC_GE: 1169 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b); 1170 break; 1171 case CC_GT: 1172 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b); 1173 break; 1174 default: 1175 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc); 1176 break; 1177 } 1178 } 1179 1180 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false) 1181 { 1182 TCGLabel *l1 = gen_new_label(); 1183 /* Conditional jmp. */ 1184 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false); 1185 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1); 1186 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true); 1187 gen_set_label(l1); 1188 } 1189 1190 static void dec_bcc(DisasContext *dc) 1191 { 1192 unsigned int cc; 1193 unsigned int dslot; 1194 1195 cc = EXTRACT_FIELD(dc->ir, 21, 23); 1196 dslot = dc->ir & (1 << 25); 1197 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm); 1198 1199 dc->delayed_branch = 1; 1200 if (dslot) { 1201 dc->delayed_branch = 2; 1202 dc->tb_flags |= D_FLAG; 1203 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)), 1204 cpu_env, offsetof(CPUMBState, bimm)); 1205 } 1206 1207 if (dec_alu_op_b_is_small_imm(dc)) { 1208 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */ 1209 1210 tcg_gen_movi_tl(env_btarget, dc->pc + offset); 1211 dc->jmp = JMP_DIRECT_CC; 1212 dc->jmp_pc = dc->pc + offset; 1213 } else { 1214 dc->jmp = JMP_INDIRECT; 1215 tcg_gen_movi_tl(env_btarget, dc->pc); 1216 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc))); 1217 } 1218 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0)); 1219 } 1220 1221 static void dec_br(DisasContext *dc) 1222 { 1223 unsigned int dslot, link, abs, mbar; 1224 int mem_index = cpu_mmu_index(&dc->cpu->env, false); 1225 1226 dslot = dc->ir & (1 << 20); 1227 abs = dc->ir & (1 << 19); 1228 link = dc->ir & (1 << 18); 1229 1230 /* Memory barrier. */ 1231 mbar = (dc->ir >> 16) & 31; 1232 if (mbar == 2 && dc->imm == 4) { 1233 /* mbar IMM & 16 decodes to sleep. */ 1234 if (dc->rd & 16) { 1235 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT); 1236 TCGv_i32 tmp_1 = tcg_const_i32(1); 1237 1238 LOG_DIS("sleep\n"); 1239 1240 t_sync_flags(dc); 1241 tcg_gen_st_i32(tmp_1, cpu_env, 1242 -offsetof(MicroBlazeCPU, env) 1243 +offsetof(CPUState, halted)); 1244 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4); 1245 gen_helper_raise_exception(cpu_env, tmp_hlt); 1246 tcg_temp_free_i32(tmp_hlt); 1247 tcg_temp_free_i32(tmp_1); 1248 return; 1249 } 1250 LOG_DIS("mbar %d\n", dc->rd); 1251 /* Break the TB. */ 1252 dc->cpustate_changed = 1; 1253 return; 1254 } 1255 1256 LOG_DIS("br%s%s%s%s imm=%x\n", 1257 abs ? "a" : "", link ? "l" : "", 1258 dc->type_b ? "i" : "", dslot ? "d" : "", 1259 dc->imm); 1260 1261 dc->delayed_branch = 1; 1262 if (dslot) { 1263 dc->delayed_branch = 2; 1264 dc->tb_flags |= D_FLAG; 1265 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)), 1266 cpu_env, offsetof(CPUMBState, bimm)); 1267 } 1268 if (link && dc->rd) 1269 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc); 1270 1271 dc->jmp = JMP_INDIRECT; 1272 if (abs) { 1273 tcg_gen_movi_tl(env_btaken, 1); 1274 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc))); 1275 if (link && !dslot) { 1276 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18)) 1277 t_gen_raise_exception(dc, EXCP_BREAK); 1278 if (dc->imm == 0) { 1279 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) { 1280 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 1281 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1282 return; 1283 } 1284 1285 t_gen_raise_exception(dc, EXCP_DEBUG); 1286 } 1287 } 1288 } else { 1289 if (dec_alu_op_b_is_small_imm(dc)) { 1290 dc->jmp = JMP_DIRECT; 1291 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm); 1292 } else { 1293 tcg_gen_movi_tl(env_btaken, 1); 1294 tcg_gen_movi_tl(env_btarget, dc->pc); 1295 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc))); 1296 } 1297 } 1298 } 1299 1300 static inline void do_rti(DisasContext *dc) 1301 { 1302 TCGv t0, t1; 1303 t0 = tcg_temp_new(); 1304 t1 = tcg_temp_new(); 1305 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1); 1306 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE); 1307 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM)); 1308 1309 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM)); 1310 tcg_gen_or_tl(t1, t1, t0); 1311 msr_write(dc, t1); 1312 tcg_temp_free(t1); 1313 tcg_temp_free(t0); 1314 dc->tb_flags &= ~DRTI_FLAG; 1315 } 1316 1317 static inline void do_rtb(DisasContext *dc) 1318 { 1319 TCGv t0, t1; 1320 t0 = tcg_temp_new(); 1321 t1 = tcg_temp_new(); 1322 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP); 1323 tcg_gen_shri_tl(t0, t1, 1); 1324 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM)); 1325 1326 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM)); 1327 tcg_gen_or_tl(t1, t1, t0); 1328 msr_write(dc, t1); 1329 tcg_temp_free(t1); 1330 tcg_temp_free(t0); 1331 dc->tb_flags &= ~DRTB_FLAG; 1332 } 1333 1334 static inline void do_rte(DisasContext *dc) 1335 { 1336 TCGv t0, t1; 1337 t0 = tcg_temp_new(); 1338 t1 = tcg_temp_new(); 1339 1340 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE); 1341 tcg_gen_andi_tl(t1, t1, ~MSR_EIP); 1342 tcg_gen_shri_tl(t0, t1, 1); 1343 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM)); 1344 1345 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM)); 1346 tcg_gen_or_tl(t1, t1, t0); 1347 msr_write(dc, t1); 1348 tcg_temp_free(t1); 1349 tcg_temp_free(t0); 1350 dc->tb_flags &= ~DRTE_FLAG; 1351 } 1352 1353 static void dec_rts(DisasContext *dc) 1354 { 1355 unsigned int b_bit, i_bit, e_bit; 1356 int mem_index = cpu_mmu_index(&dc->cpu->env, false); 1357 1358 i_bit = dc->ir & (1 << 21); 1359 b_bit = dc->ir & (1 << 22); 1360 e_bit = dc->ir & (1 << 23); 1361 1362 dc->delayed_branch = 2; 1363 dc->tb_flags |= D_FLAG; 1364 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)), 1365 cpu_env, offsetof(CPUMBState, bimm)); 1366 1367 if (i_bit) { 1368 LOG_DIS("rtid ir=%x\n", dc->ir); 1369 if ((dc->tb_flags & MSR_EE_FLAG) 1370 && mem_index == MMU_USER_IDX) { 1371 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 1372 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1373 } 1374 dc->tb_flags |= DRTI_FLAG; 1375 } else if (b_bit) { 1376 LOG_DIS("rtbd ir=%x\n", dc->ir); 1377 if ((dc->tb_flags & MSR_EE_FLAG) 1378 && mem_index == MMU_USER_IDX) { 1379 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 1380 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1381 } 1382 dc->tb_flags |= DRTB_FLAG; 1383 } else if (e_bit) { 1384 LOG_DIS("rted ir=%x\n", dc->ir); 1385 if ((dc->tb_flags & MSR_EE_FLAG) 1386 && mem_index == MMU_USER_IDX) { 1387 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 1388 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1389 } 1390 dc->tb_flags |= DRTE_FLAG; 1391 } else 1392 LOG_DIS("rts ir=%x\n", dc->ir); 1393 1394 dc->jmp = JMP_INDIRECT; 1395 tcg_gen_movi_tl(env_btaken, 1); 1396 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc))); 1397 } 1398 1399 static int dec_check_fpuv2(DisasContext *dc) 1400 { 1401 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) { 1402 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU); 1403 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1404 } 1405 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK; 1406 } 1407 1408 static void dec_fpu(DisasContext *dc) 1409 { 1410 unsigned int fpu_insn; 1411 1412 if ((dc->tb_flags & MSR_EE_FLAG) 1413 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) 1414 && !dc->cpu->cfg.use_fpu) { 1415 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 1416 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1417 return; 1418 } 1419 1420 fpu_insn = (dc->ir >> 7) & 7; 1421 1422 switch (fpu_insn) { 1423 case 0: 1424 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra], 1425 cpu_R[dc->rb]); 1426 break; 1427 1428 case 1: 1429 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra], 1430 cpu_R[dc->rb]); 1431 break; 1432 1433 case 2: 1434 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra], 1435 cpu_R[dc->rb]); 1436 break; 1437 1438 case 3: 1439 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra], 1440 cpu_R[dc->rb]); 1441 break; 1442 1443 case 4: 1444 switch ((dc->ir >> 4) & 7) { 1445 case 0: 1446 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env, 1447 cpu_R[dc->ra], cpu_R[dc->rb]); 1448 break; 1449 case 1: 1450 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env, 1451 cpu_R[dc->ra], cpu_R[dc->rb]); 1452 break; 1453 case 2: 1454 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env, 1455 cpu_R[dc->ra], cpu_R[dc->rb]); 1456 break; 1457 case 3: 1458 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env, 1459 cpu_R[dc->ra], cpu_R[dc->rb]); 1460 break; 1461 case 4: 1462 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env, 1463 cpu_R[dc->ra], cpu_R[dc->rb]); 1464 break; 1465 case 5: 1466 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env, 1467 cpu_R[dc->ra], cpu_R[dc->rb]); 1468 break; 1469 case 6: 1470 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env, 1471 cpu_R[dc->ra], cpu_R[dc->rb]); 1472 break; 1473 default: 1474 qemu_log_mask(LOG_UNIMP, 1475 "unimplemented fcmp fpu_insn=%x pc=%x" 1476 " opc=%x\n", 1477 fpu_insn, dc->pc, dc->opcode); 1478 dc->abort_at_next_insn = 1; 1479 break; 1480 } 1481 break; 1482 1483 case 5: 1484 if (!dec_check_fpuv2(dc)) { 1485 return; 1486 } 1487 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]); 1488 break; 1489 1490 case 6: 1491 if (!dec_check_fpuv2(dc)) { 1492 return; 1493 } 1494 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]); 1495 break; 1496 1497 case 7: 1498 if (!dec_check_fpuv2(dc)) { 1499 return; 1500 } 1501 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]); 1502 break; 1503 1504 default: 1505 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x" 1506 " opc=%x\n", 1507 fpu_insn, dc->pc, dc->opcode); 1508 dc->abort_at_next_insn = 1; 1509 break; 1510 } 1511 } 1512 1513 static void dec_null(DisasContext *dc) 1514 { 1515 if ((dc->tb_flags & MSR_EE_FLAG) 1516 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) { 1517 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 1518 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1519 return; 1520 } 1521 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode); 1522 dc->abort_at_next_insn = 1; 1523 } 1524 1525 /* Insns connected to FSL or AXI stream attached devices. */ 1526 static void dec_stream(DisasContext *dc) 1527 { 1528 int mem_index = cpu_mmu_index(&dc->cpu->env, false); 1529 TCGv_i32 t_id, t_ctrl; 1530 int ctrl; 1531 1532 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put", 1533 dc->type_b ? "" : "d", dc->imm); 1534 1535 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) { 1536 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN); 1537 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1538 return; 1539 } 1540 1541 t_id = tcg_temp_new(); 1542 if (dc->type_b) { 1543 tcg_gen_movi_tl(t_id, dc->imm & 0xf); 1544 ctrl = dc->imm >> 10; 1545 } else { 1546 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf); 1547 ctrl = dc->imm >> 5; 1548 } 1549 1550 t_ctrl = tcg_const_tl(ctrl); 1551 1552 if (dc->rd == 0) { 1553 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]); 1554 } else { 1555 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl); 1556 } 1557 tcg_temp_free(t_id); 1558 tcg_temp_free(t_ctrl); 1559 } 1560 1561 static struct decoder_info { 1562 struct { 1563 uint32_t bits; 1564 uint32_t mask; 1565 }; 1566 void (*dec)(DisasContext *dc); 1567 } decinfo[] = { 1568 {DEC_ADD, dec_add}, 1569 {DEC_SUB, dec_sub}, 1570 {DEC_AND, dec_and}, 1571 {DEC_XOR, dec_xor}, 1572 {DEC_OR, dec_or}, 1573 {DEC_BIT, dec_bit}, 1574 {DEC_BARREL, dec_barrel}, 1575 {DEC_LD, dec_load}, 1576 {DEC_ST, dec_store}, 1577 {DEC_IMM, dec_imm}, 1578 {DEC_BR, dec_br}, 1579 {DEC_BCC, dec_bcc}, 1580 {DEC_RTS, dec_rts}, 1581 {DEC_FPU, dec_fpu}, 1582 {DEC_MUL, dec_mul}, 1583 {DEC_DIV, dec_div}, 1584 {DEC_MSR, dec_msr}, 1585 {DEC_STREAM, dec_stream}, 1586 {{0, 0}, dec_null} 1587 }; 1588 1589 static inline void decode(DisasContext *dc, uint32_t ir) 1590 { 1591 int i; 1592 1593 dc->ir = ir; 1594 LOG_DIS("%8.8x\t", dc->ir); 1595 1596 if (dc->ir) 1597 dc->nr_nops = 0; 1598 else { 1599 if ((dc->tb_flags & MSR_EE_FLAG) 1600 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) 1601 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) { 1602 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); 1603 t_gen_raise_exception(dc, EXCP_HW_EXCP); 1604 return; 1605 } 1606 1607 LOG_DIS("nr_nops=%d\t", dc->nr_nops); 1608 dc->nr_nops++; 1609 if (dc->nr_nops > 4) { 1610 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n"); 1611 } 1612 } 1613 /* bit 2 seems to indicate insn type. */ 1614 dc->type_b = ir & (1 << 29); 1615 1616 dc->opcode = EXTRACT_FIELD(ir, 26, 31); 1617 dc->rd = EXTRACT_FIELD(ir, 21, 25); 1618 dc->ra = EXTRACT_FIELD(ir, 16, 20); 1619 dc->rb = EXTRACT_FIELD(ir, 11, 15); 1620 dc->imm = EXTRACT_FIELD(ir, 0, 15); 1621 1622 /* Large switch for all insns. */ 1623 for (i = 0; i < ARRAY_SIZE(decinfo); i++) { 1624 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) { 1625 decinfo[i].dec(dc); 1626 break; 1627 } 1628 } 1629 } 1630 1631 /* generate intermediate code for basic block 'tb'. */ 1632 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb) 1633 { 1634 CPUMBState *env = cs->env_ptr; 1635 MicroBlazeCPU *cpu = mb_env_get_cpu(env); 1636 uint32_t pc_start; 1637 struct DisasContext ctx; 1638 struct DisasContext *dc = &ctx; 1639 uint32_t page_start, org_flags; 1640 target_ulong npc; 1641 int num_insns; 1642 int max_insns; 1643 1644 pc_start = tb->pc; 1645 dc->cpu = cpu; 1646 dc->tb = tb; 1647 org_flags = dc->synced_flags = dc->tb_flags = tb->flags; 1648 1649 dc->is_jmp = DISAS_NEXT; 1650 dc->jmp = 0; 1651 dc->delayed_branch = !!(dc->tb_flags & D_FLAG); 1652 if (dc->delayed_branch) { 1653 dc->jmp = JMP_INDIRECT; 1654 } 1655 dc->pc = pc_start; 1656 dc->singlestep_enabled = cs->singlestep_enabled; 1657 dc->cpustate_changed = 0; 1658 dc->abort_at_next_insn = 0; 1659 dc->nr_nops = 0; 1660 1661 if (pc_start & 3) { 1662 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start); 1663 } 1664 1665 page_start = pc_start & TARGET_PAGE_MASK; 1666 num_insns = 0; 1667 max_insns = tb_cflags(tb) & CF_COUNT_MASK; 1668 if (max_insns == 0) { 1669 max_insns = CF_COUNT_MASK; 1670 } 1671 if (max_insns > TCG_MAX_INSNS) { 1672 max_insns = TCG_MAX_INSNS; 1673 } 1674 1675 gen_tb_start(tb); 1676 do 1677 { 1678 tcg_gen_insn_start(dc->pc); 1679 num_insns++; 1680 1681 #if SIM_COMPAT 1682 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { 1683 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); 1684 gen_helper_debug(); 1685 } 1686 #endif 1687 1688 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) { 1689 t_gen_raise_exception(dc, EXCP_DEBUG); 1690 dc->is_jmp = DISAS_UPDATE; 1691 /* The address covered by the breakpoint must be included in 1692 [tb->pc, tb->pc + tb->size) in order to for it to be 1693 properly cleared -- thus we increment the PC here so that 1694 the logic setting tb->size below does the right thing. */ 1695 dc->pc += 4; 1696 break; 1697 } 1698 1699 /* Pretty disas. */ 1700 LOG_DIS("%8.8x:\t", dc->pc); 1701 1702 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { 1703 gen_io_start(); 1704 } 1705 1706 dc->clear_imm = 1; 1707 decode(dc, cpu_ldl_code(env, dc->pc)); 1708 if (dc->clear_imm) 1709 dc->tb_flags &= ~IMM_FLAG; 1710 dc->pc += 4; 1711 1712 if (dc->delayed_branch) { 1713 dc->delayed_branch--; 1714 if (!dc->delayed_branch) { 1715 if (dc->tb_flags & DRTI_FLAG) 1716 do_rti(dc); 1717 if (dc->tb_flags & DRTB_FLAG) 1718 do_rtb(dc); 1719 if (dc->tb_flags & DRTE_FLAG) 1720 do_rte(dc); 1721 /* Clear the delay slot flag. */ 1722 dc->tb_flags &= ~D_FLAG; 1723 /* If it is a direct jump, try direct chaining. */ 1724 if (dc->jmp == JMP_INDIRECT) { 1725 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc)); 1726 dc->is_jmp = DISAS_JUMP; 1727 } else if (dc->jmp == JMP_DIRECT) { 1728 t_sync_flags(dc); 1729 gen_goto_tb(dc, 0, dc->jmp_pc); 1730 dc->is_jmp = DISAS_TB_JUMP; 1731 } else if (dc->jmp == JMP_DIRECT_CC) { 1732 TCGLabel *l1 = gen_new_label(); 1733 t_sync_flags(dc); 1734 /* Conditional jmp. */ 1735 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1); 1736 gen_goto_tb(dc, 1, dc->pc); 1737 gen_set_label(l1); 1738 gen_goto_tb(dc, 0, dc->jmp_pc); 1739 1740 dc->is_jmp = DISAS_TB_JUMP; 1741 } 1742 break; 1743 } 1744 } 1745 if (cs->singlestep_enabled) { 1746 break; 1747 } 1748 } while (!dc->is_jmp && !dc->cpustate_changed 1749 && !tcg_op_buf_full() 1750 && !singlestep 1751 && (dc->pc - page_start < TARGET_PAGE_SIZE) 1752 && num_insns < max_insns); 1753 1754 npc = dc->pc; 1755 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) { 1756 if (dc->tb_flags & D_FLAG) { 1757 dc->is_jmp = DISAS_UPDATE; 1758 tcg_gen_movi_tl(cpu_SR[SR_PC], npc); 1759 sync_jmpstate(dc); 1760 } else 1761 npc = dc->jmp_pc; 1762 } 1763 1764 if (tb_cflags(tb) & CF_LAST_IO) 1765 gen_io_end(); 1766 /* Force an update if the per-tb cpu state has changed. */ 1767 if (dc->is_jmp == DISAS_NEXT 1768 && (dc->cpustate_changed || org_flags != dc->tb_flags)) { 1769 dc->is_jmp = DISAS_UPDATE; 1770 tcg_gen_movi_tl(cpu_SR[SR_PC], npc); 1771 } 1772 t_sync_flags(dc); 1773 1774 if (unlikely(cs->singlestep_enabled)) { 1775 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG); 1776 1777 if (dc->is_jmp != DISAS_JUMP) { 1778 tcg_gen_movi_tl(cpu_SR[SR_PC], npc); 1779 } 1780 gen_helper_raise_exception(cpu_env, tmp); 1781 tcg_temp_free_i32(tmp); 1782 } else { 1783 switch(dc->is_jmp) { 1784 case DISAS_NEXT: 1785 gen_goto_tb(dc, 1, npc); 1786 break; 1787 default: 1788 case DISAS_JUMP: 1789 case DISAS_UPDATE: 1790 /* indicate that the hash table must be used 1791 to find the next TB */ 1792 tcg_gen_exit_tb(0); 1793 break; 1794 case DISAS_TB_JUMP: 1795 /* nothing more to generate */ 1796 break; 1797 } 1798 } 1799 gen_tb_end(tb, num_insns); 1800 1801 tb->size = dc->pc - pc_start; 1802 tb->icount = num_insns; 1803 1804 #ifdef DEBUG_DISAS 1805 #if !SIM_COMPAT 1806 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 1807 && qemu_log_in_addr_range(pc_start)) { 1808 qemu_log_lock(); 1809 qemu_log("--------------\n"); 1810 log_target_disas(cs, pc_start, dc->pc - pc_start); 1811 qemu_log_unlock(); 1812 } 1813 #endif 1814 #endif 1815 assert(!dc->abort_at_next_insn); 1816 } 1817 1818 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, 1819 int flags) 1820 { 1821 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 1822 CPUMBState *env = &cpu->env; 1823 int i; 1824 1825 if (!env || !f) 1826 return; 1827 1828 cpu_fprintf(f, "IN: PC=%x %s\n", 1829 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC])); 1830 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n", 1831 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR], 1832 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]); 1833 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n", 1834 env->btaken, env->btarget, 1835 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel", 1836 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel", 1837 (env->sregs[SR_MSR] & MSR_EIP), 1838 (env->sregs[SR_MSR] & MSR_IE)); 1839 1840 for (i = 0; i < 32; i++) { 1841 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]); 1842 if ((i + 1) % 4 == 0) 1843 cpu_fprintf(f, "\n"); 1844 } 1845 cpu_fprintf(f, "\n\n"); 1846 } 1847 1848 void mb_tcg_init(void) 1849 { 1850 int i; 1851 1852 env_debug = tcg_global_mem_new(cpu_env, 1853 offsetof(CPUMBState, debug), 1854 "debug0"); 1855 env_iflags = tcg_global_mem_new(cpu_env, 1856 offsetof(CPUMBState, iflags), 1857 "iflags"); 1858 env_imm = tcg_global_mem_new(cpu_env, 1859 offsetof(CPUMBState, imm), 1860 "imm"); 1861 env_btarget = tcg_global_mem_new(cpu_env, 1862 offsetof(CPUMBState, btarget), 1863 "btarget"); 1864 env_btaken = tcg_global_mem_new(cpu_env, 1865 offsetof(CPUMBState, btaken), 1866 "btaken"); 1867 env_res_addr = tcg_global_mem_new(cpu_env, 1868 offsetof(CPUMBState, res_addr), 1869 "res_addr"); 1870 env_res_val = tcg_global_mem_new(cpu_env, 1871 offsetof(CPUMBState, res_val), 1872 "res_val"); 1873 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) { 1874 cpu_R[i] = tcg_global_mem_new(cpu_env, 1875 offsetof(CPUMBState, regs[i]), 1876 regnames[i]); 1877 } 1878 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) { 1879 cpu_SR[i] = tcg_global_mem_new(cpu_env, 1880 offsetof(CPUMBState, sregs[i]), 1881 special_regnames[i]); 1882 } 1883 } 1884 1885 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, 1886 target_ulong *data) 1887 { 1888 env->sregs[SR_PC] = data[0]; 1889 } 1890