1 /* 2 * Xilinx MicroBlaze emulation for qemu: main translation routines. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias. 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "disas/disas.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "exec/helper-proto.h" 27 #include "exec/cpu_ldst.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "qemu/qemu-print.h" 31 32 #include "exec/log.h" 33 34 #define EXTRACT_FIELD(src, start, end) \ 35 (((src) >> start) & ((1 << (end - start + 1)) - 1)) 36 37 /* is_jmp field values */ 38 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 39 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */ 40 41 /* cpu state besides pc was modified dynamically; update pc to next */ 42 #define DISAS_EXIT_NEXT DISAS_TARGET_2 43 /* cpu state besides pc was modified dynamically; update pc to btarget */ 44 #define DISAS_EXIT_JUMP DISAS_TARGET_3 45 46 static TCGv_i32 cpu_R[32]; 47 static TCGv_i32 cpu_pc; 48 static TCGv_i32 cpu_msr; 49 static TCGv_i32 cpu_msr_c; 50 static TCGv_i32 cpu_imm; 51 static TCGv_i32 cpu_bvalue; 52 static TCGv_i32 cpu_btarget; 53 static TCGv_i32 cpu_iflags; 54 static TCGv cpu_res_addr; 55 static TCGv_i32 cpu_res_val; 56 57 #include "exec/gen-icount.h" 58 59 /* This is the state at translation time. */ 60 typedef struct DisasContext { 61 DisasContextBase base; 62 const MicroBlazeCPUConfig *cfg; 63 64 /* TCG op of the current insn_start. */ 65 TCGOp *insn_start; 66 67 TCGv_i32 r0; 68 bool r0_set; 69 70 /* Decoder. */ 71 uint32_t ext_imm; 72 unsigned int tb_flags; 73 unsigned int tb_flags_to_set; 74 int mem_index; 75 76 /* Condition under which to jump, including NEVER and ALWAYS. */ 77 TCGCond jmp_cond; 78 79 /* Immediate branch-taken destination, or -1 for indirect. */ 80 uint32_t jmp_dest; 81 } DisasContext; 82 83 static int typeb_imm(DisasContext *dc, int x) 84 { 85 if (dc->tb_flags & IMM_FLAG) { 86 return deposit32(dc->ext_imm, 0, 16, x); 87 } 88 return x; 89 } 90 91 /* Include the auto-generated decoder. */ 92 #include "decode-insns.c.inc" 93 94 static void t_sync_flags(DisasContext *dc) 95 { 96 /* Synch the tb dependent flags between translator and runtime. */ 97 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) { 98 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK); 99 } 100 } 101 102 static void gen_raise_exception(DisasContext *dc, uint32_t index) 103 { 104 TCGv_i32 tmp = tcg_const_i32(index); 105 106 gen_helper_raise_exception(cpu_env, tmp); 107 tcg_temp_free_i32(tmp); 108 dc->base.is_jmp = DISAS_NORETURN; 109 } 110 111 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index) 112 { 113 t_sync_flags(dc); 114 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); 115 gen_raise_exception(dc, index); 116 } 117 118 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec) 119 { 120 TCGv_i32 tmp = tcg_const_i32(esr_ec); 121 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr)); 122 tcg_temp_free_i32(tmp); 123 124 gen_raise_exception_sync(dc, EXCP_HW_EXCP); 125 } 126 127 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) 128 { 129 if (translator_use_goto_tb(&dc->base, dest)) { 130 tcg_gen_goto_tb(n); 131 tcg_gen_movi_i32(cpu_pc, dest); 132 tcg_gen_exit_tb(dc->base.tb, n); 133 } else { 134 tcg_gen_movi_i32(cpu_pc, dest); 135 tcg_gen_lookup_and_goto_ptr(); 136 } 137 dc->base.is_jmp = DISAS_NORETURN; 138 } 139 140 /* 141 * Returns true if the insn an illegal operation. 142 * If exceptions are enabled, an exception is raised. 143 */ 144 static bool trap_illegal(DisasContext *dc, bool cond) 145 { 146 if (cond && (dc->tb_flags & MSR_EE) 147 && dc->cfg->illegal_opcode_exception) { 148 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP); 149 } 150 return cond; 151 } 152 153 /* 154 * Returns true if the insn is illegal in userspace. 155 * If exceptions are enabled, an exception is raised. 156 */ 157 static bool trap_userspace(DisasContext *dc, bool cond) 158 { 159 bool cond_user = cond && dc->mem_index == MMU_USER_IDX; 160 161 if (cond_user && (dc->tb_flags & MSR_EE)) { 162 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN); 163 } 164 return cond_user; 165 } 166 167 /* 168 * Return true, and log an error, if the current insn is 169 * within a delay slot. 170 */ 171 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type) 172 { 173 if (dc->tb_flags & D_FLAG) { 174 qemu_log_mask(LOG_GUEST_ERROR, 175 "Invalid insn in delay slot: %s at %08x\n", 176 insn_type, (uint32_t)dc->base.pc_next); 177 return true; 178 } 179 return false; 180 } 181 182 static TCGv_i32 reg_for_read(DisasContext *dc, int reg) 183 { 184 if (likely(reg != 0)) { 185 return cpu_R[reg]; 186 } 187 if (!dc->r0_set) { 188 if (dc->r0 == NULL) { 189 dc->r0 = tcg_temp_new_i32(); 190 } 191 tcg_gen_movi_i32(dc->r0, 0); 192 dc->r0_set = true; 193 } 194 return dc->r0; 195 } 196 197 static TCGv_i32 reg_for_write(DisasContext *dc, int reg) 198 { 199 if (likely(reg != 0)) { 200 return cpu_R[reg]; 201 } 202 if (dc->r0 == NULL) { 203 dc->r0 = tcg_temp_new_i32(); 204 } 205 return dc->r0; 206 } 207 208 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects, 209 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32)) 210 { 211 TCGv_i32 rd, ra, rb; 212 213 if (arg->rd == 0 && !side_effects) { 214 return true; 215 } 216 217 rd = reg_for_write(dc, arg->rd); 218 ra = reg_for_read(dc, arg->ra); 219 rb = reg_for_read(dc, arg->rb); 220 fn(rd, ra, rb); 221 return true; 222 } 223 224 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects, 225 void (*fn)(TCGv_i32, TCGv_i32)) 226 { 227 TCGv_i32 rd, ra; 228 229 if (arg->rd == 0 && !side_effects) { 230 return true; 231 } 232 233 rd = reg_for_write(dc, arg->rd); 234 ra = reg_for_read(dc, arg->ra); 235 fn(rd, ra); 236 return true; 237 } 238 239 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects, 240 void (*fni)(TCGv_i32, TCGv_i32, int32_t)) 241 { 242 TCGv_i32 rd, ra; 243 244 if (arg->rd == 0 && !side_effects) { 245 return true; 246 } 247 248 rd = reg_for_write(dc, arg->rd); 249 ra = reg_for_read(dc, arg->ra); 250 fni(rd, ra, arg->imm); 251 return true; 252 } 253 254 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects, 255 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32)) 256 { 257 TCGv_i32 rd, ra, imm; 258 259 if (arg->rd == 0 && !side_effects) { 260 return true; 261 } 262 263 rd = reg_for_write(dc, arg->rd); 264 ra = reg_for_read(dc, arg->ra); 265 imm = tcg_const_i32(arg->imm); 266 267 fn(rd, ra, imm); 268 269 tcg_temp_free_i32(imm); 270 return true; 271 } 272 273 #define DO_TYPEA(NAME, SE, FN) \ 274 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \ 275 { return do_typea(dc, a, SE, FN); } 276 277 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \ 278 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \ 279 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); } 280 281 #define DO_TYPEA0(NAME, SE, FN) \ 282 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \ 283 { return do_typea0(dc, a, SE, FN); } 284 285 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \ 286 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \ 287 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); } 288 289 #define DO_TYPEBI(NAME, SE, FNI) \ 290 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \ 291 { return do_typeb_imm(dc, a, SE, FNI); } 292 293 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \ 294 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \ 295 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); } 296 297 #define DO_TYPEBV(NAME, SE, FN) \ 298 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \ 299 { return do_typeb_val(dc, a, SE, FN); } 300 301 #define ENV_WRAPPER2(NAME, HELPER) \ 302 static void NAME(TCGv_i32 out, TCGv_i32 ina) \ 303 { HELPER(out, cpu_env, ina); } 304 305 #define ENV_WRAPPER3(NAME, HELPER) \ 306 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \ 307 { HELPER(out, cpu_env, ina, inb); } 308 309 /* No input carry, but output carry. */ 310 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 311 { 312 TCGv_i32 zero = tcg_const_i32(0); 313 314 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero); 315 316 tcg_temp_free_i32(zero); 317 } 318 319 /* Input and output carry. */ 320 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 321 { 322 TCGv_i32 zero = tcg_const_i32(0); 323 TCGv_i32 tmp = tcg_temp_new_i32(); 324 325 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero); 326 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); 327 328 tcg_temp_free_i32(tmp); 329 tcg_temp_free_i32(zero); 330 } 331 332 /* Input carry, but no output carry. */ 333 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 334 { 335 tcg_gen_add_i32(out, ina, inb); 336 tcg_gen_add_i32(out, out, cpu_msr_c); 337 } 338 339 DO_TYPEA(add, true, gen_add) 340 DO_TYPEA(addc, true, gen_addc) 341 DO_TYPEA(addk, false, tcg_gen_add_i32) 342 DO_TYPEA(addkc, true, gen_addkc) 343 344 DO_TYPEBV(addi, true, gen_add) 345 DO_TYPEBV(addic, true, gen_addc) 346 DO_TYPEBI(addik, false, tcg_gen_addi_i32) 347 DO_TYPEBV(addikc, true, gen_addkc) 348 349 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm) 350 { 351 tcg_gen_andi_i32(out, ina, ~imm); 352 } 353 354 DO_TYPEA(and, false, tcg_gen_and_i32) 355 DO_TYPEBI(andi, false, tcg_gen_andi_i32) 356 DO_TYPEA(andn, false, tcg_gen_andc_i32) 357 DO_TYPEBI(andni, false, gen_andni) 358 359 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 360 { 361 TCGv_i32 tmp = tcg_temp_new_i32(); 362 tcg_gen_andi_i32(tmp, inb, 31); 363 tcg_gen_sar_i32(out, ina, tmp); 364 tcg_temp_free_i32(tmp); 365 } 366 367 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 368 { 369 TCGv_i32 tmp = tcg_temp_new_i32(); 370 tcg_gen_andi_i32(tmp, inb, 31); 371 tcg_gen_shr_i32(out, ina, tmp); 372 tcg_temp_free_i32(tmp); 373 } 374 375 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 376 { 377 TCGv_i32 tmp = tcg_temp_new_i32(); 378 tcg_gen_andi_i32(tmp, inb, 31); 379 tcg_gen_shl_i32(out, ina, tmp); 380 tcg_temp_free_i32(tmp); 381 } 382 383 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm) 384 { 385 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */ 386 int imm_w = extract32(imm, 5, 5); 387 int imm_s = extract32(imm, 0, 5); 388 389 if (imm_w + imm_s > 32 || imm_w == 0) { 390 /* These inputs have an undefined behavior. */ 391 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n", 392 imm_w, imm_s); 393 } else { 394 tcg_gen_extract_i32(out, ina, imm_s, imm_w); 395 } 396 } 397 398 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm) 399 { 400 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */ 401 int imm_w = extract32(imm, 5, 5); 402 int imm_s = extract32(imm, 0, 5); 403 int width = imm_w - imm_s + 1; 404 405 if (imm_w < imm_s) { 406 /* These inputs have an undefined behavior. */ 407 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n", 408 imm_w, imm_s); 409 } else { 410 tcg_gen_deposit_i32(out, out, ina, imm_s, width); 411 } 412 } 413 414 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra) 415 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl) 416 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll) 417 418 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32) 419 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32) 420 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32) 421 422 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi) 423 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi) 424 425 static void gen_clz(TCGv_i32 out, TCGv_i32 ina) 426 { 427 tcg_gen_clzi_i32(out, ina, 32); 428 } 429 430 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz) 431 432 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 433 { 434 TCGv_i32 lt = tcg_temp_new_i32(); 435 436 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina); 437 tcg_gen_sub_i32(out, inb, ina); 438 tcg_gen_deposit_i32(out, out, lt, 31, 1); 439 tcg_temp_free_i32(lt); 440 } 441 442 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 443 { 444 TCGv_i32 lt = tcg_temp_new_i32(); 445 446 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina); 447 tcg_gen_sub_i32(out, inb, ina); 448 tcg_gen_deposit_i32(out, out, lt, 31, 1); 449 tcg_temp_free_i32(lt); 450 } 451 452 DO_TYPEA(cmp, false, gen_cmp) 453 DO_TYPEA(cmpu, false, gen_cmpu) 454 455 ENV_WRAPPER3(gen_fadd, gen_helper_fadd) 456 ENV_WRAPPER3(gen_frsub, gen_helper_frsub) 457 ENV_WRAPPER3(gen_fmul, gen_helper_fmul) 458 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv) 459 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un) 460 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt) 461 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq) 462 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le) 463 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt) 464 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne) 465 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge) 466 467 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd) 468 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub) 469 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul) 470 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv) 471 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un) 472 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt) 473 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq) 474 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le) 475 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt) 476 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne) 477 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge) 478 479 ENV_WRAPPER2(gen_flt, gen_helper_flt) 480 ENV_WRAPPER2(gen_fint, gen_helper_fint) 481 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt) 482 483 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt) 484 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint) 485 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt) 486 487 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */ 488 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 489 { 490 gen_helper_divs(out, cpu_env, inb, ina); 491 } 492 493 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 494 { 495 gen_helper_divu(out, cpu_env, inb, ina); 496 } 497 498 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv) 499 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu) 500 501 static bool trans_imm(DisasContext *dc, arg_imm *arg) 502 { 503 if (invalid_delay_slot(dc, "imm")) { 504 return true; 505 } 506 dc->ext_imm = arg->imm << 16; 507 tcg_gen_movi_i32(cpu_imm, dc->ext_imm); 508 dc->tb_flags_to_set = IMM_FLAG; 509 return true; 510 } 511 512 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 513 { 514 TCGv_i32 tmp = tcg_temp_new_i32(); 515 tcg_gen_muls2_i32(tmp, out, ina, inb); 516 tcg_temp_free_i32(tmp); 517 } 518 519 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 520 { 521 TCGv_i32 tmp = tcg_temp_new_i32(); 522 tcg_gen_mulu2_i32(tmp, out, ina, inb); 523 tcg_temp_free_i32(tmp); 524 } 525 526 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 527 { 528 TCGv_i32 tmp = tcg_temp_new_i32(); 529 tcg_gen_mulsu2_i32(tmp, out, ina, inb); 530 tcg_temp_free_i32(tmp); 531 } 532 533 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32) 534 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh) 535 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu) 536 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu) 537 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32) 538 539 DO_TYPEA(or, false, tcg_gen_or_i32) 540 DO_TYPEBI(ori, false, tcg_gen_ori_i32) 541 542 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 543 { 544 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb); 545 } 546 547 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 548 { 549 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb); 550 } 551 552 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf) 553 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq) 554 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne) 555 556 /* No input carry, but output carry. */ 557 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 558 { 559 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina); 560 tcg_gen_sub_i32(out, inb, ina); 561 } 562 563 /* Input and output carry. */ 564 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 565 { 566 TCGv_i32 zero = tcg_const_i32(0); 567 TCGv_i32 tmp = tcg_temp_new_i32(); 568 569 tcg_gen_not_i32(tmp, ina); 570 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero); 571 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); 572 573 tcg_temp_free_i32(zero); 574 tcg_temp_free_i32(tmp); 575 } 576 577 /* No input or output carry. */ 578 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 579 { 580 tcg_gen_sub_i32(out, inb, ina); 581 } 582 583 /* Input carry, no output carry. */ 584 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 585 { 586 TCGv_i32 nota = tcg_temp_new_i32(); 587 588 tcg_gen_not_i32(nota, ina); 589 tcg_gen_add_i32(out, inb, nota); 590 tcg_gen_add_i32(out, out, cpu_msr_c); 591 592 tcg_temp_free_i32(nota); 593 } 594 595 DO_TYPEA(rsub, true, gen_rsub) 596 DO_TYPEA(rsubc, true, gen_rsubc) 597 DO_TYPEA(rsubk, false, gen_rsubk) 598 DO_TYPEA(rsubkc, true, gen_rsubkc) 599 600 DO_TYPEBV(rsubi, true, gen_rsub) 601 DO_TYPEBV(rsubic, true, gen_rsubc) 602 DO_TYPEBV(rsubik, false, gen_rsubk) 603 DO_TYPEBV(rsubikc, true, gen_rsubkc) 604 605 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32) 606 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32) 607 608 static void gen_sra(TCGv_i32 out, TCGv_i32 ina) 609 { 610 tcg_gen_andi_i32(cpu_msr_c, ina, 1); 611 tcg_gen_sari_i32(out, ina, 1); 612 } 613 614 static void gen_src(TCGv_i32 out, TCGv_i32 ina) 615 { 616 TCGv_i32 tmp = tcg_temp_new_i32(); 617 618 tcg_gen_mov_i32(tmp, cpu_msr_c); 619 tcg_gen_andi_i32(cpu_msr_c, ina, 1); 620 tcg_gen_extract2_i32(out, ina, tmp, 1); 621 622 tcg_temp_free_i32(tmp); 623 } 624 625 static void gen_srl(TCGv_i32 out, TCGv_i32 ina) 626 { 627 tcg_gen_andi_i32(cpu_msr_c, ina, 1); 628 tcg_gen_shri_i32(out, ina, 1); 629 } 630 631 DO_TYPEA0(sra, false, gen_sra) 632 DO_TYPEA0(src, false, gen_src) 633 DO_TYPEA0(srl, false, gen_srl) 634 635 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina) 636 { 637 tcg_gen_rotri_i32(out, ina, 16); 638 } 639 640 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32) 641 DO_TYPEA0(swaph, false, gen_swaph) 642 643 static bool trans_wdic(DisasContext *dc, arg_wdic *a) 644 { 645 /* Cache operations are nops: only check for supervisor mode. */ 646 trap_userspace(dc, true); 647 return true; 648 } 649 650 DO_TYPEA(xor, false, tcg_gen_xor_i32) 651 DO_TYPEBI(xori, false, tcg_gen_xori_i32) 652 653 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) 654 { 655 TCGv ret = tcg_temp_new(); 656 657 /* If any of the regs is r0, set t to the value of the other reg. */ 658 if (ra && rb) { 659 TCGv_i32 tmp = tcg_temp_new_i32(); 660 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]); 661 tcg_gen_extu_i32_tl(ret, tmp); 662 tcg_temp_free_i32(tmp); 663 } else if (ra) { 664 tcg_gen_extu_i32_tl(ret, cpu_R[ra]); 665 } else if (rb) { 666 tcg_gen_extu_i32_tl(ret, cpu_R[rb]); 667 } else { 668 tcg_gen_movi_tl(ret, 0); 669 } 670 671 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) { 672 gen_helper_stackprot(cpu_env, ret); 673 } 674 return ret; 675 } 676 677 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm) 678 { 679 TCGv ret = tcg_temp_new(); 680 681 /* If any of the regs is r0, set t to the value of the other reg. */ 682 if (ra) { 683 TCGv_i32 tmp = tcg_temp_new_i32(); 684 tcg_gen_addi_i32(tmp, cpu_R[ra], imm); 685 tcg_gen_extu_i32_tl(ret, tmp); 686 tcg_temp_free_i32(tmp); 687 } else { 688 tcg_gen_movi_tl(ret, (uint32_t)imm); 689 } 690 691 if (ra == 1 && dc->cfg->stackprot) { 692 gen_helper_stackprot(cpu_env, ret); 693 } 694 return ret; 695 } 696 697 #ifndef CONFIG_USER_ONLY 698 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb) 699 { 700 int addr_size = dc->cfg->addr_size; 701 TCGv ret = tcg_temp_new(); 702 703 if (addr_size == 32 || ra == 0) { 704 if (rb) { 705 tcg_gen_extu_i32_tl(ret, cpu_R[rb]); 706 } else { 707 tcg_gen_movi_tl(ret, 0); 708 } 709 } else { 710 if (rb) { 711 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]); 712 } else { 713 tcg_gen_extu_i32_tl(ret, cpu_R[ra]); 714 tcg_gen_shli_tl(ret, ret, 32); 715 } 716 if (addr_size < 64) { 717 /* Mask off out of range bits. */ 718 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size)); 719 } 720 } 721 return ret; 722 } 723 #endif 724 725 static void record_unaligned_ess(DisasContext *dc, int rd, 726 MemOp size, bool store) 727 { 728 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1); 729 730 iflags |= ESR_ESS_FLAG; 731 iflags |= rd << 5; 732 iflags |= store * ESR_S; 733 iflags |= (size == MO_32) * ESR_W; 734 735 tcg_set_insn_start_param(dc->insn_start, 1, iflags); 736 } 737 738 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, 739 int mem_index, bool rev) 740 { 741 MemOp size = mop & MO_SIZE; 742 743 /* 744 * When doing reverse accesses we need to do two things. 745 * 746 * 1. Reverse the address wrt endianness. 747 * 2. Byteswap the data lanes on the way back into the CPU core. 748 */ 749 if (rev) { 750 if (size > MO_8) { 751 mop ^= MO_BSWAP; 752 } 753 if (size < MO_32) { 754 tcg_gen_xori_tl(addr, addr, 3 - size); 755 } 756 } 757 758 if (size > MO_8 && 759 (dc->tb_flags & MSR_EE) && 760 dc->cfg->unaligned_exceptions) { 761 record_unaligned_ess(dc, rd, size, false); 762 mop |= MO_ALIGN; 763 } 764 765 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop); 766 767 tcg_temp_free(addr); 768 return true; 769 } 770 771 static bool trans_lbu(DisasContext *dc, arg_typea *arg) 772 { 773 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 774 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false); 775 } 776 777 static bool trans_lbur(DisasContext *dc, arg_typea *arg) 778 { 779 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 780 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true); 781 } 782 783 static bool trans_lbuea(DisasContext *dc, arg_typea *arg) 784 { 785 if (trap_userspace(dc, true)) { 786 return true; 787 } 788 #ifdef CONFIG_USER_ONLY 789 return true; 790 #else 791 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 792 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false); 793 #endif 794 } 795 796 static bool trans_lbui(DisasContext *dc, arg_typeb *arg) 797 { 798 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 799 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false); 800 } 801 802 static bool trans_lhu(DisasContext *dc, arg_typea *arg) 803 { 804 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 805 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); 806 } 807 808 static bool trans_lhur(DisasContext *dc, arg_typea *arg) 809 { 810 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 811 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true); 812 } 813 814 static bool trans_lhuea(DisasContext *dc, arg_typea *arg) 815 { 816 if (trap_userspace(dc, true)) { 817 return true; 818 } 819 #ifdef CONFIG_USER_ONLY 820 return true; 821 #else 822 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 823 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false); 824 #endif 825 } 826 827 static bool trans_lhui(DisasContext *dc, arg_typeb *arg) 828 { 829 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 830 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); 831 } 832 833 static bool trans_lw(DisasContext *dc, arg_typea *arg) 834 { 835 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 836 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); 837 } 838 839 static bool trans_lwr(DisasContext *dc, arg_typea *arg) 840 { 841 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 842 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true); 843 } 844 845 static bool trans_lwea(DisasContext *dc, arg_typea *arg) 846 { 847 if (trap_userspace(dc, true)) { 848 return true; 849 } 850 #ifdef CONFIG_USER_ONLY 851 return true; 852 #else 853 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 854 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false); 855 #endif 856 } 857 858 static bool trans_lwi(DisasContext *dc, arg_typeb *arg) 859 { 860 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 861 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); 862 } 863 864 static bool trans_lwx(DisasContext *dc, arg_typea *arg) 865 { 866 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 867 868 /* lwx does not throw unaligned access errors, so force alignment */ 869 tcg_gen_andi_tl(addr, addr, ~3); 870 871 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL); 872 tcg_gen_mov_tl(cpu_res_addr, addr); 873 tcg_temp_free(addr); 874 875 if (arg->rd) { 876 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val); 877 } 878 879 /* No support for AXI exclusive so always clear C */ 880 tcg_gen_movi_i32(cpu_msr_c, 0); 881 return true; 882 } 883 884 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, 885 int mem_index, bool rev) 886 { 887 MemOp size = mop & MO_SIZE; 888 889 /* 890 * When doing reverse accesses we need to do two things. 891 * 892 * 1. Reverse the address wrt endianness. 893 * 2. Byteswap the data lanes on the way back into the CPU core. 894 */ 895 if (rev) { 896 if (size > MO_8) { 897 mop ^= MO_BSWAP; 898 } 899 if (size < MO_32) { 900 tcg_gen_xori_tl(addr, addr, 3 - size); 901 } 902 } 903 904 if (size > MO_8 && 905 (dc->tb_flags & MSR_EE) && 906 dc->cfg->unaligned_exceptions) { 907 record_unaligned_ess(dc, rd, size, true); 908 mop |= MO_ALIGN; 909 } 910 911 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop); 912 913 tcg_temp_free(addr); 914 return true; 915 } 916 917 static bool trans_sb(DisasContext *dc, arg_typea *arg) 918 { 919 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 920 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false); 921 } 922 923 static bool trans_sbr(DisasContext *dc, arg_typea *arg) 924 { 925 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 926 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true); 927 } 928 929 static bool trans_sbea(DisasContext *dc, arg_typea *arg) 930 { 931 if (trap_userspace(dc, true)) { 932 return true; 933 } 934 #ifdef CONFIG_USER_ONLY 935 return true; 936 #else 937 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 938 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false); 939 #endif 940 } 941 942 static bool trans_sbi(DisasContext *dc, arg_typeb *arg) 943 { 944 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 945 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false); 946 } 947 948 static bool trans_sh(DisasContext *dc, arg_typea *arg) 949 { 950 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 951 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); 952 } 953 954 static bool trans_shr(DisasContext *dc, arg_typea *arg) 955 { 956 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 957 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true); 958 } 959 960 static bool trans_shea(DisasContext *dc, arg_typea *arg) 961 { 962 if (trap_userspace(dc, true)) { 963 return true; 964 } 965 #ifdef CONFIG_USER_ONLY 966 return true; 967 #else 968 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 969 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false); 970 #endif 971 } 972 973 static bool trans_shi(DisasContext *dc, arg_typeb *arg) 974 { 975 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 976 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); 977 } 978 979 static bool trans_sw(DisasContext *dc, arg_typea *arg) 980 { 981 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 982 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); 983 } 984 985 static bool trans_swr(DisasContext *dc, arg_typea *arg) 986 { 987 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 988 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true); 989 } 990 991 static bool trans_swea(DisasContext *dc, arg_typea *arg) 992 { 993 if (trap_userspace(dc, true)) { 994 return true; 995 } 996 #ifdef CONFIG_USER_ONLY 997 return true; 998 #else 999 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 1000 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false); 1001 #endif 1002 } 1003 1004 static bool trans_swi(DisasContext *dc, arg_typeb *arg) 1005 { 1006 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 1007 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); 1008 } 1009 1010 static bool trans_swx(DisasContext *dc, arg_typea *arg) 1011 { 1012 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 1013 TCGLabel *swx_done = gen_new_label(); 1014 TCGLabel *swx_fail = gen_new_label(); 1015 TCGv_i32 tval; 1016 1017 /* swx does not throw unaligned access errors, so force alignment */ 1018 tcg_gen_andi_tl(addr, addr, ~3); 1019 1020 /* 1021 * Compare the address vs the one we used during lwx. 1022 * On mismatch, the operation fails. On match, addr dies at the 1023 * branch, but we know we can use the equal version in the global. 1024 * In either case, addr is no longer needed. 1025 */ 1026 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail); 1027 tcg_temp_free(addr); 1028 1029 /* 1030 * Compare the value loaded during lwx with current contents of 1031 * the reserved location. 1032 */ 1033 tval = tcg_temp_new_i32(); 1034 1035 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val, 1036 reg_for_write(dc, arg->rd), 1037 dc->mem_index, MO_TEUL); 1038 1039 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail); 1040 tcg_temp_free_i32(tval); 1041 1042 /* Success */ 1043 tcg_gen_movi_i32(cpu_msr_c, 0); 1044 tcg_gen_br(swx_done); 1045 1046 /* Failure */ 1047 gen_set_label(swx_fail); 1048 tcg_gen_movi_i32(cpu_msr_c, 1); 1049 1050 gen_set_label(swx_done); 1051 1052 /* 1053 * Prevent the saved address from working again without another ldx. 1054 * Akin to the pseudocode setting reservation = 0. 1055 */ 1056 tcg_gen_movi_tl(cpu_res_addr, -1); 1057 return true; 1058 } 1059 1060 static void setup_dslot(DisasContext *dc, bool type_b) 1061 { 1062 dc->tb_flags_to_set |= D_FLAG; 1063 if (type_b && (dc->tb_flags & IMM_FLAG)) { 1064 dc->tb_flags_to_set |= BIMM_FLAG; 1065 } 1066 } 1067 1068 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm, 1069 bool delay, bool abs, int link) 1070 { 1071 uint32_t add_pc; 1072 1073 if (invalid_delay_slot(dc, "branch")) { 1074 return true; 1075 } 1076 if (delay) { 1077 setup_dslot(dc, dest_rb < 0); 1078 } 1079 1080 if (link) { 1081 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next); 1082 } 1083 1084 /* Store the branch taken destination into btarget. */ 1085 add_pc = abs ? 0 : dc->base.pc_next; 1086 if (dest_rb > 0) { 1087 dc->jmp_dest = -1; 1088 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc); 1089 } else { 1090 dc->jmp_dest = add_pc + dest_imm; 1091 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest); 1092 } 1093 dc->jmp_cond = TCG_COND_ALWAYS; 1094 return true; 1095 } 1096 1097 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \ 1098 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \ 1099 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \ 1100 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \ 1101 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); } 1102 1103 DO_BR(br, bri, false, false, false) 1104 DO_BR(bra, brai, false, true, false) 1105 DO_BR(brd, brid, true, false, false) 1106 DO_BR(brad, braid, true, true, false) 1107 DO_BR(brld, brlid, true, false, true) 1108 DO_BR(brald, bralid, true, true, true) 1109 1110 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm, 1111 TCGCond cond, int ra, bool delay) 1112 { 1113 TCGv_i32 zero, next; 1114 1115 if (invalid_delay_slot(dc, "bcc")) { 1116 return true; 1117 } 1118 if (delay) { 1119 setup_dslot(dc, dest_rb < 0); 1120 } 1121 1122 dc->jmp_cond = cond; 1123 1124 /* Cache the condition register in cpu_bvalue across any delay slot. */ 1125 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra)); 1126 1127 /* Store the branch taken destination into btarget. */ 1128 if (dest_rb > 0) { 1129 dc->jmp_dest = -1; 1130 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next); 1131 } else { 1132 dc->jmp_dest = dc->base.pc_next + dest_imm; 1133 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest); 1134 } 1135 1136 /* Compute the final destination into btarget. */ 1137 zero = tcg_const_i32(0); 1138 next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4); 1139 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget, 1140 reg_for_read(dc, ra), zero, 1141 cpu_btarget, next); 1142 tcg_temp_free_i32(zero); 1143 tcg_temp_free_i32(next); 1144 1145 return true; 1146 } 1147 1148 #define DO_BCC(NAME, COND) \ 1149 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \ 1150 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \ 1151 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \ 1152 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \ 1153 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \ 1154 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \ 1155 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \ 1156 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); } 1157 1158 DO_BCC(beq, TCG_COND_EQ) 1159 DO_BCC(bge, TCG_COND_GE) 1160 DO_BCC(bgt, TCG_COND_GT) 1161 DO_BCC(ble, TCG_COND_LE) 1162 DO_BCC(blt, TCG_COND_LT) 1163 DO_BCC(bne, TCG_COND_NE) 1164 1165 static bool trans_brk(DisasContext *dc, arg_typea_br *arg) 1166 { 1167 if (trap_userspace(dc, true)) { 1168 return true; 1169 } 1170 if (invalid_delay_slot(dc, "brk")) { 1171 return true; 1172 } 1173 1174 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb)); 1175 if (arg->rd) { 1176 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next); 1177 } 1178 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP); 1179 tcg_gen_movi_tl(cpu_res_addr, -1); 1180 1181 dc->base.is_jmp = DISAS_EXIT; 1182 return true; 1183 } 1184 1185 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg) 1186 { 1187 uint32_t imm = arg->imm; 1188 1189 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) { 1190 return true; 1191 } 1192 if (invalid_delay_slot(dc, "brki")) { 1193 return true; 1194 } 1195 1196 tcg_gen_movi_i32(cpu_pc, imm); 1197 if (arg->rd) { 1198 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next); 1199 } 1200 tcg_gen_movi_tl(cpu_res_addr, -1); 1201 1202 #ifdef CONFIG_USER_ONLY 1203 switch (imm) { 1204 case 0x8: /* syscall trap */ 1205 gen_raise_exception_sync(dc, EXCP_SYSCALL); 1206 break; 1207 case 0x18: /* debug trap */ 1208 gen_raise_exception_sync(dc, EXCP_DEBUG); 1209 break; 1210 default: /* eliminated with trap_userspace check */ 1211 g_assert_not_reached(); 1212 } 1213 #else 1214 uint32_t msr_to_set = 0; 1215 1216 if (imm != 0x18) { 1217 msr_to_set |= MSR_BIP; 1218 } 1219 if (imm == 0x8 || imm == 0x18) { 1220 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */ 1221 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1; 1222 tcg_gen_andi_i32(cpu_msr, cpu_msr, 1223 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM)); 1224 } 1225 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set); 1226 dc->base.is_jmp = DISAS_EXIT; 1227 #endif 1228 1229 return true; 1230 } 1231 1232 static bool trans_mbar(DisasContext *dc, arg_mbar *arg) 1233 { 1234 int mbar_imm = arg->imm; 1235 1236 /* Note that mbar is a specialized branch instruction. */ 1237 if (invalid_delay_slot(dc, "mbar")) { 1238 return true; 1239 } 1240 1241 /* Data access memory barrier. */ 1242 if ((mbar_imm & 2) == 0) { 1243 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 1244 } 1245 1246 /* Sleep. */ 1247 if (mbar_imm & 16) { 1248 TCGv_i32 tmp_1; 1249 1250 if (trap_userspace(dc, true)) { 1251 /* Sleep is a privileged instruction. */ 1252 return true; 1253 } 1254 1255 t_sync_flags(dc); 1256 1257 tmp_1 = tcg_const_i32(1); 1258 tcg_gen_st_i32(tmp_1, cpu_env, 1259 -offsetof(MicroBlazeCPU, env) 1260 +offsetof(CPUState, halted)); 1261 tcg_temp_free_i32(tmp_1); 1262 1263 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4); 1264 1265 gen_raise_exception(dc, EXCP_HLT); 1266 } 1267 1268 /* 1269 * If !(mbar_imm & 1), this is an instruction access memory barrier 1270 * and we need to end the TB so that we recognize self-modified 1271 * code immediately. 1272 * 1273 * However, there are some data mbars that need the TB break 1274 * (and return to main loop) to recognize interrupts right away. 1275 * E.g. recognizing a change to an interrupt controller register. 1276 * 1277 * Therefore, choose to end the TB always. 1278 */ 1279 dc->base.is_jmp = DISAS_EXIT_NEXT; 1280 return true; 1281 } 1282 1283 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set) 1284 { 1285 if (trap_userspace(dc, to_set)) { 1286 return true; 1287 } 1288 if (invalid_delay_slot(dc, "rts")) { 1289 return true; 1290 } 1291 1292 dc->tb_flags_to_set |= to_set; 1293 setup_dslot(dc, true); 1294 1295 dc->jmp_cond = TCG_COND_ALWAYS; 1296 dc->jmp_dest = -1; 1297 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm); 1298 return true; 1299 } 1300 1301 #define DO_RTS(NAME, IFLAG) \ 1302 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \ 1303 { return do_rts(dc, arg, IFLAG); } 1304 1305 DO_RTS(rtbd, DRTB_FLAG) 1306 DO_RTS(rtid, DRTI_FLAG) 1307 DO_RTS(rted, DRTE_FLAG) 1308 DO_RTS(rtsd, 0) 1309 1310 static bool trans_zero(DisasContext *dc, arg_zero *arg) 1311 { 1312 /* If opcode_0_illegal, trap. */ 1313 if (dc->cfg->opcode_0_illegal) { 1314 trap_illegal(dc, true); 1315 return true; 1316 } 1317 /* 1318 * Otherwise, this is "add r0, r0, r0". 1319 * Continue to trans_add so that MSR[C] gets cleared. 1320 */ 1321 return false; 1322 } 1323 1324 static void msr_read(DisasContext *dc, TCGv_i32 d) 1325 { 1326 TCGv_i32 t; 1327 1328 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */ 1329 t = tcg_temp_new_i32(); 1330 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC); 1331 tcg_gen_or_i32(d, cpu_msr, t); 1332 tcg_temp_free_i32(t); 1333 } 1334 1335 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set) 1336 { 1337 uint32_t imm = arg->imm; 1338 1339 if (trap_userspace(dc, imm != MSR_C)) { 1340 return true; 1341 } 1342 1343 if (arg->rd) { 1344 msr_read(dc, cpu_R[arg->rd]); 1345 } 1346 1347 /* 1348 * Handle the carry bit separately. 1349 * This is the only bit that userspace can modify. 1350 */ 1351 if (imm & MSR_C) { 1352 tcg_gen_movi_i32(cpu_msr_c, set); 1353 } 1354 1355 /* 1356 * MSR_C and MSR_CC set above. 1357 * MSR_PVR is not writable, and is always clear. 1358 */ 1359 imm &= ~(MSR_C | MSR_CC | MSR_PVR); 1360 1361 if (imm != 0) { 1362 if (set) { 1363 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm); 1364 } else { 1365 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm); 1366 } 1367 dc->base.is_jmp = DISAS_EXIT_NEXT; 1368 } 1369 return true; 1370 } 1371 1372 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg) 1373 { 1374 return do_msrclrset(dc, arg, false); 1375 } 1376 1377 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg) 1378 { 1379 return do_msrclrset(dc, arg, true); 1380 } 1381 1382 static bool trans_mts(DisasContext *dc, arg_mts *arg) 1383 { 1384 if (trap_userspace(dc, true)) { 1385 return true; 1386 } 1387 1388 #ifdef CONFIG_USER_ONLY 1389 g_assert_not_reached(); 1390 #else 1391 if (arg->e && arg->rs != 0x1003) { 1392 qemu_log_mask(LOG_GUEST_ERROR, 1393 "Invalid extended mts reg 0x%x\n", arg->rs); 1394 return true; 1395 } 1396 1397 TCGv_i32 src = reg_for_read(dc, arg->ra); 1398 switch (arg->rs) { 1399 case SR_MSR: 1400 /* Install MSR_C. */ 1401 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1); 1402 /* 1403 * Clear MSR_C and MSR_CC; 1404 * MSR_PVR is not writable, and is always clear. 1405 */ 1406 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR)); 1407 break; 1408 case SR_FSR: 1409 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr)); 1410 break; 1411 case 0x800: 1412 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr)); 1413 break; 1414 case 0x802: 1415 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr)); 1416 break; 1417 1418 case 0x1000: /* PID */ 1419 case 0x1001: /* ZPR */ 1420 case 0x1002: /* TLBX */ 1421 case 0x1003: /* TLBLO */ 1422 case 0x1004: /* TLBHI */ 1423 case 0x1005: /* TLBSX */ 1424 { 1425 TCGv_i32 tmp_ext = tcg_const_i32(arg->e); 1426 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7); 1427 1428 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src); 1429 tcg_temp_free_i32(tmp_reg); 1430 tcg_temp_free_i32(tmp_ext); 1431 } 1432 break; 1433 1434 default: 1435 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs); 1436 return true; 1437 } 1438 dc->base.is_jmp = DISAS_EXIT_NEXT; 1439 return true; 1440 #endif 1441 } 1442 1443 static bool trans_mfs(DisasContext *dc, arg_mfs *arg) 1444 { 1445 TCGv_i32 dest = reg_for_write(dc, arg->rd); 1446 1447 if (arg->e) { 1448 switch (arg->rs) { 1449 case SR_EAR: 1450 { 1451 TCGv_i64 t64 = tcg_temp_new_i64(); 1452 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear)); 1453 tcg_gen_extrh_i64_i32(dest, t64); 1454 tcg_temp_free_i64(t64); 1455 } 1456 return true; 1457 #ifndef CONFIG_USER_ONLY 1458 case 0x1003: /* TLBLO */ 1459 /* Handled below. */ 1460 break; 1461 #endif 1462 case 0x2006 ... 0x2009: 1463 /* High bits of PVR6-9 not implemented. */ 1464 tcg_gen_movi_i32(dest, 0); 1465 return true; 1466 default: 1467 qemu_log_mask(LOG_GUEST_ERROR, 1468 "Invalid extended mfs reg 0x%x\n", arg->rs); 1469 return true; 1470 } 1471 } 1472 1473 switch (arg->rs) { 1474 case SR_PC: 1475 tcg_gen_movi_i32(dest, dc->base.pc_next); 1476 break; 1477 case SR_MSR: 1478 msr_read(dc, dest); 1479 break; 1480 case SR_EAR: 1481 { 1482 TCGv_i64 t64 = tcg_temp_new_i64(); 1483 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear)); 1484 tcg_gen_extrl_i64_i32(dest, t64); 1485 tcg_temp_free_i64(t64); 1486 } 1487 break; 1488 case SR_ESR: 1489 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr)); 1490 break; 1491 case SR_FSR: 1492 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr)); 1493 break; 1494 case SR_BTR: 1495 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr)); 1496 break; 1497 case SR_EDR: 1498 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr)); 1499 break; 1500 case 0x800: 1501 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr)); 1502 break; 1503 case 0x802: 1504 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr)); 1505 break; 1506 1507 #ifndef CONFIG_USER_ONLY 1508 case 0x1000: /* PID */ 1509 case 0x1001: /* ZPR */ 1510 case 0x1002: /* TLBX */ 1511 case 0x1003: /* TLBLO */ 1512 case 0x1004: /* TLBHI */ 1513 case 0x1005: /* TLBSX */ 1514 { 1515 TCGv_i32 tmp_ext = tcg_const_i32(arg->e); 1516 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7); 1517 1518 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg); 1519 tcg_temp_free_i32(tmp_reg); 1520 tcg_temp_free_i32(tmp_ext); 1521 } 1522 break; 1523 #endif 1524 1525 case 0x2000 ... 0x200c: 1526 tcg_gen_ld_i32(dest, cpu_env, 1527 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000]) 1528 - offsetof(MicroBlazeCPU, env)); 1529 break; 1530 default: 1531 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs); 1532 break; 1533 } 1534 return true; 1535 } 1536 1537 static void do_rti(DisasContext *dc) 1538 { 1539 TCGv_i32 tmp = tcg_temp_new_i32(); 1540 1541 tcg_gen_shri_i32(tmp, cpu_msr, 1); 1542 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE); 1543 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM); 1544 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM)); 1545 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); 1546 1547 tcg_temp_free_i32(tmp); 1548 } 1549 1550 static void do_rtb(DisasContext *dc) 1551 { 1552 TCGv_i32 tmp = tcg_temp_new_i32(); 1553 1554 tcg_gen_shri_i32(tmp, cpu_msr, 1); 1555 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP)); 1556 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM)); 1557 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); 1558 1559 tcg_temp_free_i32(tmp); 1560 } 1561 1562 static void do_rte(DisasContext *dc) 1563 { 1564 TCGv_i32 tmp = tcg_temp_new_i32(); 1565 1566 tcg_gen_shri_i32(tmp, cpu_msr, 1); 1567 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE); 1568 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM)); 1569 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP)); 1570 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); 1571 1572 tcg_temp_free_i32(tmp); 1573 } 1574 1575 /* Insns connected to FSL or AXI stream attached devices. */ 1576 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl) 1577 { 1578 TCGv_i32 t_id, t_ctrl; 1579 1580 if (trap_userspace(dc, true)) { 1581 return true; 1582 } 1583 1584 t_id = tcg_temp_new_i32(); 1585 if (rb) { 1586 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf); 1587 } else { 1588 tcg_gen_movi_i32(t_id, imm); 1589 } 1590 1591 t_ctrl = tcg_const_i32(ctrl); 1592 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl); 1593 tcg_temp_free_i32(t_id); 1594 tcg_temp_free_i32(t_ctrl); 1595 return true; 1596 } 1597 1598 static bool trans_get(DisasContext *dc, arg_get *arg) 1599 { 1600 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl); 1601 } 1602 1603 static bool trans_getd(DisasContext *dc, arg_getd *arg) 1604 { 1605 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl); 1606 } 1607 1608 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl) 1609 { 1610 TCGv_i32 t_id, t_ctrl; 1611 1612 if (trap_userspace(dc, true)) { 1613 return true; 1614 } 1615 1616 t_id = tcg_temp_new_i32(); 1617 if (rb) { 1618 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf); 1619 } else { 1620 tcg_gen_movi_i32(t_id, imm); 1621 } 1622 1623 t_ctrl = tcg_const_i32(ctrl); 1624 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra)); 1625 tcg_temp_free_i32(t_id); 1626 tcg_temp_free_i32(t_ctrl); 1627 return true; 1628 } 1629 1630 static bool trans_put(DisasContext *dc, arg_put *arg) 1631 { 1632 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl); 1633 } 1634 1635 static bool trans_putd(DisasContext *dc, arg_putd *arg) 1636 { 1637 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl); 1638 } 1639 1640 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs) 1641 { 1642 DisasContext *dc = container_of(dcb, DisasContext, base); 1643 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 1644 int bound; 1645 1646 dc->cfg = &cpu->cfg; 1647 dc->tb_flags = dc->base.tb->flags; 1648 dc->ext_imm = dc->base.tb->cs_base; 1649 dc->r0 = NULL; 1650 dc->r0_set = false; 1651 dc->mem_index = cpu_mmu_index(&cpu->env, false); 1652 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER; 1653 dc->jmp_dest = -1; 1654 1655 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; 1656 dc->base.max_insns = MIN(dc->base.max_insns, bound); 1657 } 1658 1659 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs) 1660 { 1661 } 1662 1663 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs) 1664 { 1665 DisasContext *dc = container_of(dcb, DisasContext, base); 1666 1667 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK); 1668 dc->insn_start = tcg_last_op(); 1669 } 1670 1671 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs) 1672 { 1673 DisasContext *dc = container_of(dcb, DisasContext, base); 1674 CPUMBState *env = cs->env_ptr; 1675 uint32_t ir; 1676 1677 /* TODO: This should raise an exception, not terminate qemu. */ 1678 if (dc->base.pc_next & 3) { 1679 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", 1680 (uint32_t)dc->base.pc_next); 1681 } 1682 1683 dc->tb_flags_to_set = 0; 1684 1685 ir = cpu_ldl_code(env, dc->base.pc_next); 1686 if (!decode(dc, ir)) { 1687 trap_illegal(dc, true); 1688 } 1689 1690 if (dc->r0) { 1691 tcg_temp_free_i32(dc->r0); 1692 dc->r0 = NULL; 1693 dc->r0_set = false; 1694 } 1695 1696 /* Discard the imm global when its contents cannot be used. */ 1697 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) { 1698 tcg_gen_discard_i32(cpu_imm); 1699 } 1700 1701 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG); 1702 dc->tb_flags |= dc->tb_flags_to_set; 1703 dc->base.pc_next += 4; 1704 1705 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) { 1706 /* 1707 * Finish any return-from branch. 1708 */ 1709 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG); 1710 if (unlikely(rt_ibe != 0)) { 1711 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG); 1712 if (rt_ibe & DRTI_FLAG) { 1713 do_rti(dc); 1714 } else if (rt_ibe & DRTB_FLAG) { 1715 do_rtb(dc); 1716 } else { 1717 do_rte(dc); 1718 } 1719 } 1720 1721 /* Complete the branch, ending the TB. */ 1722 switch (dc->base.is_jmp) { 1723 case DISAS_NORETURN: 1724 /* 1725 * E.g. illegal insn in a delay slot. We've already exited 1726 * and will handle D_FLAG in mb_cpu_do_interrupt. 1727 */ 1728 break; 1729 case DISAS_NEXT: 1730 /* 1731 * Normal insn a delay slot. 1732 * However, the return-from-exception type insns should 1733 * return to the main loop, as they have adjusted MSR. 1734 */ 1735 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP); 1736 break; 1737 case DISAS_EXIT_NEXT: 1738 /* 1739 * E.g. mts insn in a delay slot. Continue with btarget, 1740 * but still return to the main loop. 1741 */ 1742 dc->base.is_jmp = DISAS_EXIT_JUMP; 1743 break; 1744 default: 1745 g_assert_not_reached(); 1746 } 1747 } 1748 } 1749 1750 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs) 1751 { 1752 DisasContext *dc = container_of(dcb, DisasContext, base); 1753 1754 if (dc->base.is_jmp == DISAS_NORETURN) { 1755 /* We have already exited the TB. */ 1756 return; 1757 } 1758 1759 t_sync_flags(dc); 1760 1761 switch (dc->base.is_jmp) { 1762 case DISAS_TOO_MANY: 1763 gen_goto_tb(dc, 0, dc->base.pc_next); 1764 return; 1765 1766 case DISAS_EXIT: 1767 break; 1768 case DISAS_EXIT_NEXT: 1769 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); 1770 break; 1771 case DISAS_EXIT_JUMP: 1772 tcg_gen_mov_i32(cpu_pc, cpu_btarget); 1773 tcg_gen_discard_i32(cpu_btarget); 1774 break; 1775 1776 case DISAS_JUMP: 1777 if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) { 1778 /* Direct jump. */ 1779 tcg_gen_discard_i32(cpu_btarget); 1780 1781 if (dc->jmp_cond != TCG_COND_ALWAYS) { 1782 /* Conditional direct jump. */ 1783 TCGLabel *taken = gen_new_label(); 1784 TCGv_i32 tmp = tcg_temp_new_i32(); 1785 1786 /* 1787 * Copy bvalue to a temp now, so we can discard bvalue. 1788 * This can avoid writing bvalue to memory when the 1789 * delay slot cannot raise an exception. 1790 */ 1791 tcg_gen_mov_i32(tmp, cpu_bvalue); 1792 tcg_gen_discard_i32(cpu_bvalue); 1793 1794 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken); 1795 gen_goto_tb(dc, 1, dc->base.pc_next); 1796 gen_set_label(taken); 1797 } 1798 gen_goto_tb(dc, 0, dc->jmp_dest); 1799 return; 1800 } 1801 1802 /* Indirect jump (or direct jump w/ goto_tb disabled) */ 1803 tcg_gen_mov_i32(cpu_pc, cpu_btarget); 1804 tcg_gen_discard_i32(cpu_btarget); 1805 tcg_gen_lookup_and_goto_ptr(); 1806 return; 1807 1808 default: 1809 g_assert_not_reached(); 1810 } 1811 1812 /* Finish DISAS_EXIT_* */ 1813 if (unlikely(cs->singlestep_enabled)) { 1814 gen_raise_exception(dc, EXCP_DEBUG); 1815 } else { 1816 tcg_gen_exit_tb(NULL, 0); 1817 } 1818 } 1819 1820 static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs) 1821 { 1822 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first)); 1823 log_target_disas(cs, dcb->pc_first, dcb->tb->size); 1824 } 1825 1826 static const TranslatorOps mb_tr_ops = { 1827 .init_disas_context = mb_tr_init_disas_context, 1828 .tb_start = mb_tr_tb_start, 1829 .insn_start = mb_tr_insn_start, 1830 .translate_insn = mb_tr_translate_insn, 1831 .tb_stop = mb_tr_tb_stop, 1832 .disas_log = mb_tr_disas_log, 1833 }; 1834 1835 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) 1836 { 1837 DisasContext dc; 1838 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns); 1839 } 1840 1841 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags) 1842 { 1843 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 1844 CPUMBState *env = &cpu->env; 1845 uint32_t iflags; 1846 int i; 1847 1848 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n", 1849 env->pc, env->msr, 1850 (env->msr & MSR_UM) ? "user" : "kernel", 1851 (env->msr & MSR_UMS) ? "user" : "kernel", 1852 (bool)(env->msr & MSR_EIP), 1853 (bool)(env->msr & MSR_IE)); 1854 1855 iflags = env->iflags; 1856 qemu_fprintf(f, "iflags: 0x%08x", iflags); 1857 if (iflags & IMM_FLAG) { 1858 qemu_fprintf(f, " IMM(0x%08x)", env->imm); 1859 } 1860 if (iflags & BIMM_FLAG) { 1861 qemu_fprintf(f, " BIMM"); 1862 } 1863 if (iflags & D_FLAG) { 1864 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget); 1865 } 1866 if (iflags & DRTI_FLAG) { 1867 qemu_fprintf(f, " DRTI"); 1868 } 1869 if (iflags & DRTE_FLAG) { 1870 qemu_fprintf(f, " DRTE"); 1871 } 1872 if (iflags & DRTB_FLAG) { 1873 qemu_fprintf(f, " DRTB"); 1874 } 1875 if (iflags & ESR_ESS_FLAG) { 1876 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK); 1877 } 1878 1879 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n" 1880 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n", 1881 env->esr, env->fsr, env->btr, env->edr, 1882 env->ear, env->slr, env->shr); 1883 1884 for (i = 0; i < 32; i++) { 1885 qemu_fprintf(f, "r%2.2d=%08x%c", 1886 i, env->regs[i], i % 4 == 3 ? '\n' : ' '); 1887 } 1888 qemu_fprintf(f, "\n"); 1889 } 1890 1891 void mb_tcg_init(void) 1892 { 1893 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X } 1894 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X } 1895 1896 static const struct { 1897 TCGv_i32 *var; int ofs; char name[8]; 1898 } i32s[] = { 1899 /* 1900 * Note that r0 is handled specially in reg_for_read 1901 * and reg_for_write. Nothing should touch cpu_R[0]. 1902 * Leave that element NULL, which will assert quickly 1903 * inside the tcg generator functions. 1904 */ 1905 R(1), R(2), R(3), R(4), R(5), R(6), R(7), 1906 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15), 1907 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23), 1908 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31), 1909 1910 SP(pc), 1911 SP(msr), 1912 SP(msr_c), 1913 SP(imm), 1914 SP(iflags), 1915 SP(bvalue), 1916 SP(btarget), 1917 SP(res_val), 1918 }; 1919 1920 #undef R 1921 #undef SP 1922 1923 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) { 1924 *i32s[i].var = 1925 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name); 1926 } 1927 1928 cpu_res_addr = 1929 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr"); 1930 } 1931 1932 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, 1933 target_ulong *data) 1934 { 1935 env->pc = data[0]; 1936 env->iflags = data[1]; 1937 } 1938