1 /* 2 * Xilinx MicroBlaze emulation for qemu: main translation routines. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias. 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "accel/tcg/cpu-ldst.h" 25 #include "tcg/tcg-op.h" 26 #include "exec/helper-proto.h" 27 #include "exec/helper-gen.h" 28 #include "exec/translator.h" 29 #include "exec/translation-block.h" 30 #include "exec/target_page.h" 31 #include "qemu/qemu-print.h" 32 33 #include "exec/log.h" 34 35 #define HELPER_H "helper.h" 36 #include "exec/helper-info.c.inc" 37 #undef HELPER_H 38 39 #define EXTRACT_FIELD(src, start, end) \ 40 (((src) >> start) & ((1 << (end - start + 1)) - 1)) 41 42 /* is_jmp field values */ 43 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 44 #define DISAS_EXIT DISAS_TARGET_1 /* all cpu state modified dynamically */ 45 46 /* cpu state besides pc was modified dynamically; update pc to next */ 47 #define DISAS_EXIT_NEXT DISAS_TARGET_2 48 /* cpu state besides pc was modified dynamically; update pc to btarget */ 49 #define DISAS_EXIT_JUMP DISAS_TARGET_3 50 51 static TCGv_i32 cpu_R[32]; 52 static TCGv_i32 cpu_pc; 53 static TCGv_i32 cpu_msr; 54 static TCGv_i32 cpu_msr_c; 55 static TCGv_i32 cpu_imm; 56 static TCGv_i32 cpu_bvalue; 57 static TCGv_i32 cpu_btarget; 58 static TCGv_i32 cpu_iflags; 59 static TCGv cpu_res_addr; 60 static TCGv_i32 cpu_res_val; 61 62 /* This is the state at translation time. */ 63 typedef struct DisasContext { 64 DisasContextBase base; 65 const MicroBlazeCPUConfig *cfg; 66 67 TCGv_i32 r0; 68 bool r0_set; 69 70 /* Decoder. */ 71 uint32_t ext_imm; 72 unsigned int tb_flags; 73 unsigned int tb_flags_to_set; 74 int mem_index; 75 76 /* Condition under which to jump, including NEVER and ALWAYS. */ 77 TCGCond jmp_cond; 78 79 /* Immediate branch-taken destination, or -1 for indirect. */ 80 uint32_t jmp_dest; 81 } DisasContext; 82 83 static int typeb_imm(DisasContext *dc, int x) 84 { 85 if (dc->tb_flags & IMM_FLAG) { 86 return deposit32(dc->ext_imm, 0, 16, x); 87 } 88 return x; 89 } 90 91 /* Include the auto-generated decoder. */ 92 #include "decode-insns.c.inc" 93 94 static void t_sync_flags(DisasContext *dc) 95 { 96 /* Synch the tb dependent flags between translator and runtime. */ 97 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) { 98 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK); 99 } 100 } 101 102 static void gen_raise_exception(DisasContext *dc, uint32_t index) 103 { 104 gen_helper_raise_exception(tcg_env, tcg_constant_i32(index)); 105 dc->base.is_jmp = DISAS_NORETURN; 106 } 107 108 static void gen_raise_exception_sync(DisasContext *dc, uint32_t index) 109 { 110 t_sync_flags(dc); 111 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); 112 gen_raise_exception(dc, index); 113 } 114 115 static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec) 116 { 117 TCGv_i32 tmp = tcg_constant_i32(esr_ec); 118 tcg_gen_st_i32(tmp, tcg_env, offsetof(CPUMBState, esr)); 119 120 gen_raise_exception_sync(dc, EXCP_HW_EXCP); 121 } 122 123 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) 124 { 125 if (translator_use_goto_tb(&dc->base, dest)) { 126 tcg_gen_goto_tb(n); 127 tcg_gen_movi_i32(cpu_pc, dest); 128 tcg_gen_exit_tb(dc->base.tb, n); 129 } else { 130 tcg_gen_movi_i32(cpu_pc, dest); 131 tcg_gen_lookup_and_goto_ptr(); 132 } 133 dc->base.is_jmp = DISAS_NORETURN; 134 } 135 136 /* 137 * Returns true if the insn an illegal operation. 138 * If exceptions are enabled, an exception is raised. 139 */ 140 static bool trap_illegal(DisasContext *dc, bool cond) 141 { 142 if (cond && (dc->tb_flags & MSR_EE) 143 && dc->cfg->illegal_opcode_exception) { 144 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP); 145 } 146 return cond; 147 } 148 149 /* 150 * Returns true if the insn is illegal in userspace. 151 * If exceptions are enabled, an exception is raised. 152 */ 153 static bool trap_userspace(DisasContext *dc, bool cond) 154 { 155 bool cond_user = cond && dc->mem_index == MMU_USER_IDX; 156 157 if (cond_user && (dc->tb_flags & MSR_EE)) { 158 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN); 159 } 160 return cond_user; 161 } 162 163 /* 164 * Return true, and log an error, if the current insn is 165 * within a delay slot. 166 */ 167 static bool invalid_delay_slot(DisasContext *dc, const char *insn_type) 168 { 169 if (dc->tb_flags & D_FLAG) { 170 qemu_log_mask(LOG_GUEST_ERROR, 171 "Invalid insn in delay slot: %s at %08x\n", 172 insn_type, (uint32_t)dc->base.pc_next); 173 return true; 174 } 175 return false; 176 } 177 178 static TCGv_i32 reg_for_read(DisasContext *dc, int reg) 179 { 180 if (likely(reg != 0)) { 181 return cpu_R[reg]; 182 } 183 if (!dc->r0_set) { 184 if (dc->r0 == NULL) { 185 dc->r0 = tcg_temp_new_i32(); 186 } 187 tcg_gen_movi_i32(dc->r0, 0); 188 dc->r0_set = true; 189 } 190 return dc->r0; 191 } 192 193 static TCGv_i32 reg_for_write(DisasContext *dc, int reg) 194 { 195 if (likely(reg != 0)) { 196 return cpu_R[reg]; 197 } 198 if (dc->r0 == NULL) { 199 dc->r0 = tcg_temp_new_i32(); 200 } 201 return dc->r0; 202 } 203 204 static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects, 205 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32)) 206 { 207 TCGv_i32 rd, ra, rb; 208 209 if (arg->rd == 0 && !side_effects) { 210 return true; 211 } 212 213 rd = reg_for_write(dc, arg->rd); 214 ra = reg_for_read(dc, arg->ra); 215 rb = reg_for_read(dc, arg->rb); 216 fn(rd, ra, rb); 217 return true; 218 } 219 220 static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects, 221 void (*fn)(TCGv_i32, TCGv_i32)) 222 { 223 TCGv_i32 rd, ra; 224 225 if (arg->rd == 0 && !side_effects) { 226 return true; 227 } 228 229 rd = reg_for_write(dc, arg->rd); 230 ra = reg_for_read(dc, arg->ra); 231 fn(rd, ra); 232 return true; 233 } 234 235 static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects, 236 void (*fni)(TCGv_i32, TCGv_i32, int32_t)) 237 { 238 TCGv_i32 rd, ra; 239 240 if (arg->rd == 0 && !side_effects) { 241 return true; 242 } 243 244 rd = reg_for_write(dc, arg->rd); 245 ra = reg_for_read(dc, arg->ra); 246 fni(rd, ra, arg->imm); 247 return true; 248 } 249 250 static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects, 251 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32)) 252 { 253 TCGv_i32 rd, ra, imm; 254 255 if (arg->rd == 0 && !side_effects) { 256 return true; 257 } 258 259 rd = reg_for_write(dc, arg->rd); 260 ra = reg_for_read(dc, arg->ra); 261 imm = tcg_constant_i32(arg->imm); 262 263 fn(rd, ra, imm); 264 return true; 265 } 266 267 #define DO_TYPEA(NAME, SE, FN) \ 268 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \ 269 { return do_typea(dc, a, SE, FN); } 270 271 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \ 272 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \ 273 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); } 274 275 #define DO_TYPEA0(NAME, SE, FN) \ 276 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \ 277 { return do_typea0(dc, a, SE, FN); } 278 279 #define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \ 280 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \ 281 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); } 282 283 #define DO_TYPEBI(NAME, SE, FNI) \ 284 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \ 285 { return do_typeb_imm(dc, a, SE, FNI); } 286 287 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \ 288 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \ 289 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); } 290 291 #define DO_TYPEBV(NAME, SE, FN) \ 292 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \ 293 { return do_typeb_val(dc, a, SE, FN); } 294 295 #define ENV_WRAPPER2(NAME, HELPER) \ 296 static void NAME(TCGv_i32 out, TCGv_i32 ina) \ 297 { HELPER(out, tcg_env, ina); } 298 299 #define ENV_WRAPPER3(NAME, HELPER) \ 300 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \ 301 { HELPER(out, tcg_env, ina, inb); } 302 303 /* No input carry, but output carry. */ 304 static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 305 { 306 TCGv_i32 zero = tcg_constant_i32(0); 307 308 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero); 309 } 310 311 /* Input and output carry. */ 312 static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 313 { 314 TCGv_i32 zero = tcg_constant_i32(0); 315 TCGv_i32 tmp = tcg_temp_new_i32(); 316 317 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero); 318 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); 319 } 320 321 /* Input carry, but no output carry. */ 322 static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 323 { 324 tcg_gen_add_i32(out, ina, inb); 325 tcg_gen_add_i32(out, out, cpu_msr_c); 326 } 327 328 DO_TYPEA(add, true, gen_add) 329 DO_TYPEA(addc, true, gen_addc) 330 DO_TYPEA(addk, false, tcg_gen_add_i32) 331 DO_TYPEA(addkc, true, gen_addkc) 332 333 DO_TYPEBV(addi, true, gen_add) 334 DO_TYPEBV(addic, true, gen_addc) 335 DO_TYPEBI(addik, false, tcg_gen_addi_i32) 336 DO_TYPEBV(addikc, true, gen_addkc) 337 338 static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm) 339 { 340 tcg_gen_andi_i32(out, ina, ~imm); 341 } 342 343 DO_TYPEA(and, false, tcg_gen_and_i32) 344 DO_TYPEBI(andi, false, tcg_gen_andi_i32) 345 DO_TYPEA(andn, false, tcg_gen_andc_i32) 346 DO_TYPEBI(andni, false, gen_andni) 347 348 static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 349 { 350 TCGv_i32 tmp = tcg_temp_new_i32(); 351 tcg_gen_andi_i32(tmp, inb, 31); 352 tcg_gen_sar_i32(out, ina, tmp); 353 } 354 355 static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 356 { 357 TCGv_i32 tmp = tcg_temp_new_i32(); 358 tcg_gen_andi_i32(tmp, inb, 31); 359 tcg_gen_shr_i32(out, ina, tmp); 360 } 361 362 static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 363 { 364 TCGv_i32 tmp = tcg_temp_new_i32(); 365 tcg_gen_andi_i32(tmp, inb, 31); 366 tcg_gen_shl_i32(out, ina, tmp); 367 } 368 369 static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm) 370 { 371 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */ 372 int imm_w = extract32(imm, 5, 5); 373 int imm_s = extract32(imm, 0, 5); 374 375 if (imm_w + imm_s > 32 || imm_w == 0) { 376 /* These inputs have an undefined behavior. */ 377 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n", 378 imm_w, imm_s); 379 } else { 380 tcg_gen_extract_i32(out, ina, imm_s, imm_w); 381 } 382 } 383 384 static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm) 385 { 386 /* Note that decodetree has extracted and reassembled imm_w/imm_s. */ 387 int imm_w = extract32(imm, 5, 5); 388 int imm_s = extract32(imm, 0, 5); 389 int width = imm_w - imm_s + 1; 390 391 if (imm_w < imm_s) { 392 /* These inputs have an undefined behavior. */ 393 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n", 394 imm_w, imm_s); 395 } else { 396 tcg_gen_deposit_i32(out, out, ina, imm_s, width); 397 } 398 } 399 400 DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra) 401 DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl) 402 DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll) 403 404 DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32) 405 DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32) 406 DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32) 407 408 DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi) 409 DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi) 410 411 static void gen_clz(TCGv_i32 out, TCGv_i32 ina) 412 { 413 tcg_gen_clzi_i32(out, ina, 32); 414 } 415 416 DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz) 417 418 static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 419 { 420 TCGv_i32 lt = tcg_temp_new_i32(); 421 422 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina); 423 tcg_gen_sub_i32(out, inb, ina); 424 tcg_gen_deposit_i32(out, out, lt, 31, 1); 425 } 426 427 static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 428 { 429 TCGv_i32 lt = tcg_temp_new_i32(); 430 431 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina); 432 tcg_gen_sub_i32(out, inb, ina); 433 tcg_gen_deposit_i32(out, out, lt, 31, 1); 434 } 435 436 DO_TYPEA(cmp, false, gen_cmp) 437 DO_TYPEA(cmpu, false, gen_cmpu) 438 439 ENV_WRAPPER3(gen_fadd, gen_helper_fadd) 440 ENV_WRAPPER3(gen_frsub, gen_helper_frsub) 441 ENV_WRAPPER3(gen_fmul, gen_helper_fmul) 442 ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv) 443 ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un) 444 ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt) 445 ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq) 446 ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le) 447 ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt) 448 ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne) 449 ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge) 450 451 DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd) 452 DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub) 453 DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul) 454 DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv) 455 DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un) 456 DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt) 457 DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq) 458 DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le) 459 DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt) 460 DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne) 461 DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge) 462 463 ENV_WRAPPER2(gen_flt, gen_helper_flt) 464 ENV_WRAPPER2(gen_fint, gen_helper_fint) 465 ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt) 466 467 DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt) 468 DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint) 469 DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt) 470 471 /* Does not use ENV_WRAPPER3, because arguments are swapped as well. */ 472 static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 473 { 474 gen_helper_divs(out, tcg_env, inb, ina); 475 } 476 477 static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 478 { 479 gen_helper_divu(out, tcg_env, inb, ina); 480 } 481 482 DO_TYPEA_CFG(idiv, use_div, true, gen_idiv) 483 DO_TYPEA_CFG(idivu, use_div, true, gen_idivu) 484 485 static bool trans_imm(DisasContext *dc, arg_imm *arg) 486 { 487 if (invalid_delay_slot(dc, "imm")) { 488 return true; 489 } 490 dc->ext_imm = arg->imm << 16; 491 tcg_gen_movi_i32(cpu_imm, dc->ext_imm); 492 dc->tb_flags_to_set = IMM_FLAG; 493 return true; 494 } 495 496 static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 497 { 498 TCGv_i32 tmp = tcg_temp_new_i32(); 499 tcg_gen_muls2_i32(tmp, out, ina, inb); 500 } 501 502 static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 503 { 504 TCGv_i32 tmp = tcg_temp_new_i32(); 505 tcg_gen_mulu2_i32(tmp, out, ina, inb); 506 } 507 508 static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 509 { 510 TCGv_i32 tmp = tcg_temp_new_i32(); 511 tcg_gen_mulsu2_i32(tmp, out, ina, inb); 512 } 513 514 DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32) 515 DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh) 516 DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu) 517 DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu) 518 DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32) 519 520 DO_TYPEA(or, false, tcg_gen_or_i32) 521 DO_TYPEBI(ori, false, tcg_gen_ori_i32) 522 523 static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 524 { 525 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb); 526 } 527 528 static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 529 { 530 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb); 531 } 532 533 DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf) 534 DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq) 535 DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne) 536 537 /* No input carry, but output carry. */ 538 static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 539 { 540 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina); 541 tcg_gen_sub_i32(out, inb, ina); 542 } 543 544 /* Input and output carry. */ 545 static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 546 { 547 TCGv_i32 zero = tcg_constant_i32(0); 548 TCGv_i32 tmp = tcg_temp_new_i32(); 549 550 tcg_gen_not_i32(tmp, ina); 551 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero); 552 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero); 553 } 554 555 /* No input or output carry. */ 556 static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 557 { 558 tcg_gen_sub_i32(out, inb, ina); 559 } 560 561 /* Input carry, no output carry. */ 562 static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) 563 { 564 TCGv_i32 nota = tcg_temp_new_i32(); 565 566 tcg_gen_not_i32(nota, ina); 567 tcg_gen_add_i32(out, inb, nota); 568 tcg_gen_add_i32(out, out, cpu_msr_c); 569 } 570 571 DO_TYPEA(rsub, true, gen_rsub) 572 DO_TYPEA(rsubc, true, gen_rsubc) 573 DO_TYPEA(rsubk, false, gen_rsubk) 574 DO_TYPEA(rsubkc, true, gen_rsubkc) 575 576 DO_TYPEBV(rsubi, true, gen_rsub) 577 DO_TYPEBV(rsubic, true, gen_rsubc) 578 DO_TYPEBV(rsubik, false, gen_rsubk) 579 DO_TYPEBV(rsubikc, true, gen_rsubkc) 580 581 DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32) 582 DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32) 583 584 static void gen_sra(TCGv_i32 out, TCGv_i32 ina) 585 { 586 tcg_gen_andi_i32(cpu_msr_c, ina, 1); 587 tcg_gen_sari_i32(out, ina, 1); 588 } 589 590 static void gen_src(TCGv_i32 out, TCGv_i32 ina) 591 { 592 TCGv_i32 tmp = tcg_temp_new_i32(); 593 594 tcg_gen_mov_i32(tmp, cpu_msr_c); 595 tcg_gen_andi_i32(cpu_msr_c, ina, 1); 596 tcg_gen_extract2_i32(out, ina, tmp, 1); 597 } 598 599 static void gen_srl(TCGv_i32 out, TCGv_i32 ina) 600 { 601 tcg_gen_andi_i32(cpu_msr_c, ina, 1); 602 tcg_gen_shri_i32(out, ina, 1); 603 } 604 605 DO_TYPEA0(sra, false, gen_sra) 606 DO_TYPEA0(src, false, gen_src) 607 DO_TYPEA0(srl, false, gen_srl) 608 609 static void gen_swaph(TCGv_i32 out, TCGv_i32 ina) 610 { 611 tcg_gen_rotri_i32(out, ina, 16); 612 } 613 614 DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32) 615 DO_TYPEA0(swaph, false, gen_swaph) 616 617 static bool trans_wdic(DisasContext *dc, arg_wdic *a) 618 { 619 /* Cache operations are nops: only check for supervisor mode. */ 620 trap_userspace(dc, true); 621 return true; 622 } 623 624 DO_TYPEA(xor, false, tcg_gen_xor_i32) 625 DO_TYPEBI(xori, false, tcg_gen_xori_i32) 626 627 static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) 628 { 629 TCGv ret = tcg_temp_new(); 630 631 /* If any of the regs is r0, set t to the value of the other reg. */ 632 if (ra && rb) { 633 TCGv_i32 tmp = tcg_temp_new_i32(); 634 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]); 635 tcg_gen_extu_i32_tl(ret, tmp); 636 } else if (ra) { 637 tcg_gen_extu_i32_tl(ret, cpu_R[ra]); 638 } else if (rb) { 639 tcg_gen_extu_i32_tl(ret, cpu_R[rb]); 640 } else { 641 tcg_gen_movi_tl(ret, 0); 642 } 643 644 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) { 645 gen_helper_stackprot(tcg_env, ret); 646 } 647 return ret; 648 } 649 650 static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm) 651 { 652 TCGv ret = tcg_temp_new(); 653 654 /* If any of the regs is r0, set t to the value of the other reg. */ 655 if (ra) { 656 TCGv_i32 tmp = tcg_temp_new_i32(); 657 tcg_gen_addi_i32(tmp, cpu_R[ra], imm); 658 tcg_gen_extu_i32_tl(ret, tmp); 659 } else { 660 tcg_gen_movi_tl(ret, (uint32_t)imm); 661 } 662 663 if (ra == 1 && dc->cfg->stackprot) { 664 gen_helper_stackprot(tcg_env, ret); 665 } 666 return ret; 667 } 668 669 #ifndef CONFIG_USER_ONLY 670 static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb) 671 { 672 int addr_size = dc->cfg->addr_size; 673 TCGv ret = tcg_temp_new(); 674 675 if (addr_size == 32 || ra == 0) { 676 if (rb) { 677 tcg_gen_extu_i32_tl(ret, cpu_R[rb]); 678 } else { 679 tcg_gen_movi_tl(ret, 0); 680 } 681 } else { 682 if (rb) { 683 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]); 684 } else { 685 tcg_gen_extu_i32_tl(ret, cpu_R[ra]); 686 tcg_gen_shli_tl(ret, ret, 32); 687 } 688 if (addr_size < 64) { 689 /* Mask off out of range bits. */ 690 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size)); 691 } 692 } 693 return ret; 694 } 695 #endif 696 697 #ifndef CONFIG_USER_ONLY 698 static void record_unaligned_ess(DisasContext *dc, int rd, 699 MemOp size, bool store) 700 { 701 uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1); 702 703 iflags |= ESR_ESS_FLAG; 704 iflags |= rd << 5; 705 iflags |= store * ESR_S; 706 iflags |= (size == MO_32) * ESR_W; 707 708 tcg_set_insn_start_param(dc->base.insn_start, 1, iflags); 709 } 710 #endif 711 712 static inline MemOp mo_endian(DisasContext *dc) 713 { 714 return dc->cfg->endi ? MO_LE : MO_BE; 715 } 716 717 static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, 718 int mem_index, bool rev) 719 { 720 MemOp size = mop & MO_SIZE; 721 722 mop |= mo_endian(dc); 723 724 /* 725 * When doing reverse accesses we need to do two things. 726 * 727 * 1. Reverse the address wrt endianness. 728 * 2. Byteswap the data lanes on the way back into the CPU core. 729 */ 730 if (rev) { 731 if (size > MO_8) { 732 mop ^= MO_BSWAP; 733 } 734 if (size < MO_32) { 735 tcg_gen_xori_tl(addr, addr, 3 - size); 736 } 737 } 738 739 /* 740 * For system mode, enforce alignment if the cpu configuration 741 * requires it. For user-mode, the Linux kernel will have fixed up 742 * any unaligned access, so emulate that by *not* setting MO_ALIGN. 743 */ 744 #ifndef CONFIG_USER_ONLY 745 if (size > MO_8 && 746 (dc->tb_flags & MSR_EE) && 747 dc->cfg->unaligned_exceptions) { 748 record_unaligned_ess(dc, rd, size, false); 749 mop |= MO_ALIGN; 750 } 751 #endif 752 753 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop); 754 return true; 755 } 756 757 static bool trans_lbu(DisasContext *dc, arg_typea *arg) 758 { 759 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 760 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false); 761 } 762 763 static bool trans_lbur(DisasContext *dc, arg_typea *arg) 764 { 765 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 766 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true); 767 } 768 769 static bool trans_lbuea(DisasContext *dc, arg_typea *arg) 770 { 771 if (trap_userspace(dc, true)) { 772 return true; 773 } 774 #ifdef CONFIG_USER_ONLY 775 return true; 776 #else 777 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 778 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false); 779 #endif 780 } 781 782 static bool trans_lbui(DisasContext *dc, arg_typeb *arg) 783 { 784 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 785 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false); 786 } 787 788 static bool trans_lhu(DisasContext *dc, arg_typea *arg) 789 { 790 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 791 return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false); 792 } 793 794 static bool trans_lhur(DisasContext *dc, arg_typea *arg) 795 { 796 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 797 return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true); 798 } 799 800 static bool trans_lhuea(DisasContext *dc, arg_typea *arg) 801 { 802 if (trap_userspace(dc, true)) { 803 return true; 804 } 805 #ifdef CONFIG_USER_ONLY 806 return true; 807 #else 808 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 809 return do_load(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false); 810 #endif 811 } 812 813 static bool trans_lhui(DisasContext *dc, arg_typeb *arg) 814 { 815 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 816 return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false); 817 } 818 819 static bool trans_lw(DisasContext *dc, arg_typea *arg) 820 { 821 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 822 return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false); 823 } 824 825 static bool trans_lwr(DisasContext *dc, arg_typea *arg) 826 { 827 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 828 return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true); 829 } 830 831 static bool trans_lwea(DisasContext *dc, arg_typea *arg) 832 { 833 if (trap_userspace(dc, true)) { 834 return true; 835 } 836 #ifdef CONFIG_USER_ONLY 837 return true; 838 #else 839 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 840 return do_load(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false); 841 #endif 842 } 843 844 static bool trans_lwi(DisasContext *dc, arg_typeb *arg) 845 { 846 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 847 return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false); 848 } 849 850 static bool trans_lwx(DisasContext *dc, arg_typea *arg) 851 { 852 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 853 854 /* lwx does not throw unaligned access errors, so force alignment */ 855 tcg_gen_andi_tl(addr, addr, ~3); 856 857 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, 858 mo_endian(dc) | MO_UL); 859 tcg_gen_mov_tl(cpu_res_addr, addr); 860 861 if (arg->rd) { 862 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val); 863 } 864 865 /* No support for AXI exclusive so always clear C */ 866 tcg_gen_movi_i32(cpu_msr_c, 0); 867 return true; 868 } 869 870 static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, 871 int mem_index, bool rev) 872 { 873 MemOp size = mop & MO_SIZE; 874 875 mop |= mo_endian(dc); 876 877 /* 878 * When doing reverse accesses we need to do two things. 879 * 880 * 1. Reverse the address wrt endianness. 881 * 2. Byteswap the data lanes on the way back into the CPU core. 882 */ 883 if (rev) { 884 if (size > MO_8) { 885 mop ^= MO_BSWAP; 886 } 887 if (size < MO_32) { 888 tcg_gen_xori_tl(addr, addr, 3 - size); 889 } 890 } 891 892 /* 893 * For system mode, enforce alignment if the cpu configuration 894 * requires it. For user-mode, the Linux kernel will have fixed up 895 * any unaligned access, so emulate that by *not* setting MO_ALIGN. 896 */ 897 #ifndef CONFIG_USER_ONLY 898 if (size > MO_8 && 899 (dc->tb_flags & MSR_EE) && 900 dc->cfg->unaligned_exceptions) { 901 record_unaligned_ess(dc, rd, size, true); 902 mop |= MO_ALIGN; 903 } 904 #endif 905 906 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop); 907 return true; 908 } 909 910 static bool trans_sb(DisasContext *dc, arg_typea *arg) 911 { 912 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 913 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false); 914 } 915 916 static bool trans_sbr(DisasContext *dc, arg_typea *arg) 917 { 918 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 919 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true); 920 } 921 922 static bool trans_sbea(DisasContext *dc, arg_typea *arg) 923 { 924 if (trap_userspace(dc, true)) { 925 return true; 926 } 927 #ifdef CONFIG_USER_ONLY 928 return true; 929 #else 930 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 931 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false); 932 #endif 933 } 934 935 static bool trans_sbi(DisasContext *dc, arg_typeb *arg) 936 { 937 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 938 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false); 939 } 940 941 static bool trans_sh(DisasContext *dc, arg_typea *arg) 942 { 943 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 944 return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false); 945 } 946 947 static bool trans_shr(DisasContext *dc, arg_typea *arg) 948 { 949 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 950 return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true); 951 } 952 953 static bool trans_shea(DisasContext *dc, arg_typea *arg) 954 { 955 if (trap_userspace(dc, true)) { 956 return true; 957 } 958 #ifdef CONFIG_USER_ONLY 959 return true; 960 #else 961 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 962 return do_store(dc, arg->rd, addr, MO_UW, MMU_NOMMU_IDX, false); 963 #endif 964 } 965 966 static bool trans_shi(DisasContext *dc, arg_typeb *arg) 967 { 968 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 969 return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false); 970 } 971 972 static bool trans_sw(DisasContext *dc, arg_typea *arg) 973 { 974 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 975 return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false); 976 } 977 978 static bool trans_swr(DisasContext *dc, arg_typea *arg) 979 { 980 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 981 return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true); 982 } 983 984 static bool trans_swea(DisasContext *dc, arg_typea *arg) 985 { 986 if (trap_userspace(dc, true)) { 987 return true; 988 } 989 #ifdef CONFIG_USER_ONLY 990 return true; 991 #else 992 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); 993 return do_store(dc, arg->rd, addr, MO_UL, MMU_NOMMU_IDX, false); 994 #endif 995 } 996 997 static bool trans_swi(DisasContext *dc, arg_typeb *arg) 998 { 999 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); 1000 return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false); 1001 } 1002 1003 static bool trans_swx(DisasContext *dc, arg_typea *arg) 1004 { 1005 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); 1006 TCGLabel *swx_done = gen_new_label(); 1007 TCGLabel *swx_fail = gen_new_label(); 1008 TCGv_i32 tval; 1009 1010 /* swx does not throw unaligned access errors, so force alignment */ 1011 tcg_gen_andi_tl(addr, addr, ~3); 1012 1013 /* 1014 * Compare the address vs the one we used during lwx. 1015 * On mismatch, the operation fails. On match, addr dies at the 1016 * branch, but we know we can use the equal version in the global. 1017 * In either case, addr is no longer needed. 1018 */ 1019 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail); 1020 1021 /* 1022 * Compare the value loaded during lwx with current contents of 1023 * the reserved location. 1024 */ 1025 tval = tcg_temp_new_i32(); 1026 1027 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val, 1028 reg_for_write(dc, arg->rd), 1029 dc->mem_index, mo_endian(dc) | MO_UL); 1030 1031 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail); 1032 1033 /* Success */ 1034 tcg_gen_movi_i32(cpu_msr_c, 0); 1035 tcg_gen_br(swx_done); 1036 1037 /* Failure */ 1038 gen_set_label(swx_fail); 1039 tcg_gen_movi_i32(cpu_msr_c, 1); 1040 1041 gen_set_label(swx_done); 1042 1043 /* 1044 * Prevent the saved address from working again without another ldx. 1045 * Akin to the pseudocode setting reservation = 0. 1046 */ 1047 tcg_gen_movi_tl(cpu_res_addr, -1); 1048 return true; 1049 } 1050 1051 static void setup_dslot(DisasContext *dc, bool type_b) 1052 { 1053 dc->tb_flags_to_set |= D_FLAG; 1054 if (type_b && (dc->tb_flags & IMM_FLAG)) { 1055 dc->tb_flags_to_set |= BIMM_FLAG; 1056 } 1057 } 1058 1059 static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm, 1060 bool delay, bool abs, int link) 1061 { 1062 uint32_t add_pc; 1063 1064 if (invalid_delay_slot(dc, "branch")) { 1065 return true; 1066 } 1067 if (delay) { 1068 setup_dslot(dc, dest_rb < 0); 1069 } 1070 1071 if (link) { 1072 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next); 1073 } 1074 1075 /* Store the branch taken destination into btarget. */ 1076 add_pc = abs ? 0 : dc->base.pc_next; 1077 if (dest_rb > 0) { 1078 dc->jmp_dest = -1; 1079 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc); 1080 } else { 1081 dc->jmp_dest = add_pc + dest_imm; 1082 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest); 1083 } 1084 dc->jmp_cond = TCG_COND_ALWAYS; 1085 return true; 1086 } 1087 1088 #define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \ 1089 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \ 1090 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \ 1091 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \ 1092 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); } 1093 1094 DO_BR(br, bri, false, false, false) 1095 DO_BR(bra, brai, false, true, false) 1096 DO_BR(brd, brid, true, false, false) 1097 DO_BR(brad, braid, true, true, false) 1098 DO_BR(brld, brlid, true, false, true) 1099 DO_BR(brald, bralid, true, true, true) 1100 1101 static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm, 1102 TCGCond cond, int ra, bool delay) 1103 { 1104 TCGv_i32 zero, next; 1105 1106 if (invalid_delay_slot(dc, "bcc")) { 1107 return true; 1108 } 1109 if (delay) { 1110 setup_dslot(dc, dest_rb < 0); 1111 } 1112 1113 dc->jmp_cond = cond; 1114 1115 /* Cache the condition register in cpu_bvalue across any delay slot. */ 1116 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra)); 1117 1118 /* Store the branch taken destination into btarget. */ 1119 if (dest_rb > 0) { 1120 dc->jmp_dest = -1; 1121 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next); 1122 } else { 1123 dc->jmp_dest = dc->base.pc_next + dest_imm; 1124 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest); 1125 } 1126 1127 /* Compute the final destination into btarget. */ 1128 zero = tcg_constant_i32(0); 1129 next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4); 1130 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget, 1131 reg_for_read(dc, ra), zero, 1132 cpu_btarget, next); 1133 1134 return true; 1135 } 1136 1137 #define DO_BCC(NAME, COND) \ 1138 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \ 1139 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \ 1140 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \ 1141 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \ 1142 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \ 1143 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \ 1144 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \ 1145 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); } 1146 1147 DO_BCC(beq, TCG_COND_EQ) 1148 DO_BCC(bge, TCG_COND_GE) 1149 DO_BCC(bgt, TCG_COND_GT) 1150 DO_BCC(ble, TCG_COND_LE) 1151 DO_BCC(blt, TCG_COND_LT) 1152 DO_BCC(bne, TCG_COND_NE) 1153 1154 static bool trans_brk(DisasContext *dc, arg_typea_br *arg) 1155 { 1156 if (trap_userspace(dc, true)) { 1157 return true; 1158 } 1159 if (invalid_delay_slot(dc, "brk")) { 1160 return true; 1161 } 1162 1163 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb)); 1164 if (arg->rd) { 1165 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next); 1166 } 1167 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP); 1168 tcg_gen_movi_tl(cpu_res_addr, -1); 1169 1170 dc->base.is_jmp = DISAS_EXIT; 1171 return true; 1172 } 1173 1174 static bool trans_brki(DisasContext *dc, arg_typeb_br *arg) 1175 { 1176 uint32_t imm = arg->imm; 1177 1178 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) { 1179 return true; 1180 } 1181 if (invalid_delay_slot(dc, "brki")) { 1182 return true; 1183 } 1184 1185 tcg_gen_movi_i32(cpu_pc, imm); 1186 if (arg->rd) { 1187 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next); 1188 } 1189 tcg_gen_movi_tl(cpu_res_addr, -1); 1190 1191 #ifdef CONFIG_USER_ONLY 1192 switch (imm) { 1193 case 0x8: /* syscall trap */ 1194 gen_raise_exception_sync(dc, EXCP_SYSCALL); 1195 break; 1196 case 0x18: /* debug trap */ 1197 gen_raise_exception_sync(dc, EXCP_DEBUG); 1198 break; 1199 default: /* eliminated with trap_userspace check */ 1200 g_assert_not_reached(); 1201 } 1202 #else 1203 uint32_t msr_to_set = 0; 1204 1205 if (imm != 0x18) { 1206 msr_to_set |= MSR_BIP; 1207 } 1208 if (imm == 0x8 || imm == 0x18) { 1209 /* MSR_UM and MSR_VM are in tb_flags, so we know their value. */ 1210 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1; 1211 tcg_gen_andi_i32(cpu_msr, cpu_msr, 1212 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM)); 1213 } 1214 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set); 1215 dc->base.is_jmp = DISAS_EXIT; 1216 #endif 1217 1218 return true; 1219 } 1220 1221 static bool trans_mbar(DisasContext *dc, arg_mbar *arg) 1222 { 1223 int mbar_imm = arg->imm; 1224 1225 /* Note that mbar is a specialized branch instruction. */ 1226 if (invalid_delay_slot(dc, "mbar")) { 1227 return true; 1228 } 1229 1230 /* Data access memory barrier. */ 1231 if ((mbar_imm & 2) == 0) { 1232 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 1233 } 1234 1235 /* Sleep. */ 1236 if (mbar_imm & 16) { 1237 if (trap_userspace(dc, true)) { 1238 /* Sleep is a privileged instruction. */ 1239 return true; 1240 } 1241 1242 t_sync_flags(dc); 1243 1244 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 1245 -offsetof(MicroBlazeCPU, env) 1246 +offsetof(CPUState, halted)); 1247 1248 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4); 1249 1250 gen_raise_exception(dc, EXCP_HLT); 1251 } 1252 1253 /* 1254 * If !(mbar_imm & 1), this is an instruction access memory barrier 1255 * and we need to end the TB so that we recognize self-modified 1256 * code immediately. 1257 * 1258 * However, there are some data mbars that need the TB break 1259 * (and return to main loop) to recognize interrupts right away. 1260 * E.g. recognizing a change to an interrupt controller register. 1261 * 1262 * Therefore, choose to end the TB always. 1263 */ 1264 dc->base.is_jmp = DISAS_EXIT_NEXT; 1265 return true; 1266 } 1267 1268 static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set) 1269 { 1270 if (trap_userspace(dc, to_set)) { 1271 return true; 1272 } 1273 if (invalid_delay_slot(dc, "rts")) { 1274 return true; 1275 } 1276 1277 dc->tb_flags_to_set |= to_set; 1278 setup_dslot(dc, true); 1279 1280 dc->jmp_cond = TCG_COND_ALWAYS; 1281 dc->jmp_dest = -1; 1282 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm); 1283 return true; 1284 } 1285 1286 #define DO_RTS(NAME, IFLAG) \ 1287 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \ 1288 { return do_rts(dc, arg, IFLAG); } 1289 1290 DO_RTS(rtbd, DRTB_FLAG) 1291 DO_RTS(rtid, DRTI_FLAG) 1292 DO_RTS(rted, DRTE_FLAG) 1293 DO_RTS(rtsd, 0) 1294 1295 static bool trans_zero(DisasContext *dc, arg_zero *arg) 1296 { 1297 /* If opcode_0_illegal, trap. */ 1298 if (dc->cfg->opcode_0_illegal) { 1299 trap_illegal(dc, true); 1300 return true; 1301 } 1302 /* 1303 * Otherwise, this is "add r0, r0, r0". 1304 * Continue to trans_add so that MSR[C] gets cleared. 1305 */ 1306 return false; 1307 } 1308 1309 static void msr_read(DisasContext *dc, TCGv_i32 d) 1310 { 1311 TCGv_i32 t; 1312 1313 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */ 1314 t = tcg_temp_new_i32(); 1315 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC); 1316 tcg_gen_or_i32(d, cpu_msr, t); 1317 } 1318 1319 static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set) 1320 { 1321 uint32_t imm = arg->imm; 1322 1323 if (trap_userspace(dc, imm != MSR_C)) { 1324 return true; 1325 } 1326 1327 if (arg->rd) { 1328 msr_read(dc, cpu_R[arg->rd]); 1329 } 1330 1331 /* 1332 * Handle the carry bit separately. 1333 * This is the only bit that userspace can modify. 1334 */ 1335 if (imm & MSR_C) { 1336 tcg_gen_movi_i32(cpu_msr_c, set); 1337 } 1338 1339 /* 1340 * MSR_C and MSR_CC set above. 1341 * MSR_PVR is not writable, and is always clear. 1342 */ 1343 imm &= ~(MSR_C | MSR_CC | MSR_PVR); 1344 1345 if (imm != 0) { 1346 if (set) { 1347 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm); 1348 } else { 1349 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm); 1350 } 1351 dc->base.is_jmp = DISAS_EXIT_NEXT; 1352 } 1353 return true; 1354 } 1355 1356 static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg) 1357 { 1358 return do_msrclrset(dc, arg, false); 1359 } 1360 1361 static bool trans_msrset(DisasContext *dc, arg_type_msr *arg) 1362 { 1363 return do_msrclrset(dc, arg, true); 1364 } 1365 1366 static bool trans_mts(DisasContext *dc, arg_mts *arg) 1367 { 1368 if (trap_userspace(dc, true)) { 1369 return true; 1370 } 1371 1372 #ifdef CONFIG_USER_ONLY 1373 g_assert_not_reached(); 1374 #else 1375 if (arg->e && arg->rs != 0x1003) { 1376 qemu_log_mask(LOG_GUEST_ERROR, 1377 "Invalid extended mts reg 0x%x\n", arg->rs); 1378 return true; 1379 } 1380 1381 TCGv_i32 src = reg_for_read(dc, arg->ra); 1382 switch (arg->rs) { 1383 case SR_MSR: 1384 /* Install MSR_C. */ 1385 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1); 1386 /* 1387 * Clear MSR_C and MSR_CC; 1388 * MSR_PVR is not writable, and is always clear. 1389 */ 1390 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR)); 1391 break; 1392 case SR_FSR: 1393 tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, fsr)); 1394 break; 1395 case 0x800: 1396 tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, slr)); 1397 break; 1398 case 0x802: 1399 tcg_gen_st_i32(src, tcg_env, offsetof(CPUMBState, shr)); 1400 break; 1401 1402 case 0x1000: /* PID */ 1403 case 0x1001: /* ZPR */ 1404 case 0x1002: /* TLBX */ 1405 case 0x1003: /* TLBLO */ 1406 case 0x1004: /* TLBHI */ 1407 case 0x1005: /* TLBSX */ 1408 { 1409 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e); 1410 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7); 1411 1412 gen_helper_mmu_write(tcg_env, tmp_ext, tmp_reg, src); 1413 } 1414 break; 1415 1416 default: 1417 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs); 1418 return true; 1419 } 1420 dc->base.is_jmp = DISAS_EXIT_NEXT; 1421 return true; 1422 #endif 1423 } 1424 1425 static bool trans_mfs(DisasContext *dc, arg_mfs *arg) 1426 { 1427 TCGv_i32 dest = reg_for_write(dc, arg->rd); 1428 1429 if (arg->e) { 1430 switch (arg->rs) { 1431 case SR_EAR: 1432 { 1433 TCGv_i64 t64 = tcg_temp_new_i64(); 1434 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear)); 1435 tcg_gen_extrh_i64_i32(dest, t64); 1436 } 1437 return true; 1438 #ifndef CONFIG_USER_ONLY 1439 case 0x1003: /* TLBLO */ 1440 /* Handled below. */ 1441 break; 1442 #endif 1443 case 0x2006 ... 0x2009: 1444 /* High bits of PVR6-9 not implemented. */ 1445 tcg_gen_movi_i32(dest, 0); 1446 return true; 1447 default: 1448 qemu_log_mask(LOG_GUEST_ERROR, 1449 "Invalid extended mfs reg 0x%x\n", arg->rs); 1450 return true; 1451 } 1452 } 1453 1454 switch (arg->rs) { 1455 case SR_PC: 1456 tcg_gen_movi_i32(dest, dc->base.pc_next); 1457 break; 1458 case SR_MSR: 1459 msr_read(dc, dest); 1460 break; 1461 case SR_EAR: 1462 { 1463 TCGv_i64 t64 = tcg_temp_new_i64(); 1464 tcg_gen_ld_i64(t64, tcg_env, offsetof(CPUMBState, ear)); 1465 tcg_gen_extrl_i64_i32(dest, t64); 1466 } 1467 break; 1468 case SR_ESR: 1469 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, esr)); 1470 break; 1471 case SR_FSR: 1472 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, fsr)); 1473 break; 1474 case SR_BTR: 1475 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, btr)); 1476 break; 1477 case SR_EDR: 1478 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, edr)); 1479 break; 1480 case 0x800: 1481 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, slr)); 1482 break; 1483 case 0x802: 1484 tcg_gen_ld_i32(dest, tcg_env, offsetof(CPUMBState, shr)); 1485 break; 1486 1487 #ifndef CONFIG_USER_ONLY 1488 case 0x1000: /* PID */ 1489 case 0x1001: /* ZPR */ 1490 case 0x1002: /* TLBX */ 1491 case 0x1003: /* TLBLO */ 1492 case 0x1004: /* TLBHI */ 1493 case 0x1005: /* TLBSX */ 1494 { 1495 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e); 1496 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7); 1497 1498 gen_helper_mmu_read(dest, tcg_env, tmp_ext, tmp_reg); 1499 } 1500 break; 1501 #endif 1502 1503 case 0x2000 ... 0x200c: 1504 tcg_gen_ld_i32(dest, tcg_env, 1505 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000]) 1506 - offsetof(MicroBlazeCPU, env)); 1507 break; 1508 default: 1509 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs); 1510 break; 1511 } 1512 return true; 1513 } 1514 1515 static void do_rti(DisasContext *dc) 1516 { 1517 TCGv_i32 tmp = tcg_temp_new_i32(); 1518 1519 tcg_gen_shri_i32(tmp, cpu_msr, 1); 1520 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE); 1521 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM); 1522 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM)); 1523 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); 1524 } 1525 1526 static void do_rtb(DisasContext *dc) 1527 { 1528 TCGv_i32 tmp = tcg_temp_new_i32(); 1529 1530 tcg_gen_shri_i32(tmp, cpu_msr, 1); 1531 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP)); 1532 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM)); 1533 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); 1534 } 1535 1536 static void do_rte(DisasContext *dc) 1537 { 1538 TCGv_i32 tmp = tcg_temp_new_i32(); 1539 1540 tcg_gen_shri_i32(tmp, cpu_msr, 1); 1541 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE); 1542 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM)); 1543 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP)); 1544 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp); 1545 } 1546 1547 /* Insns connected to FSL or AXI stream attached devices. */ 1548 static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl) 1549 { 1550 TCGv_i32 t_id, t_ctrl; 1551 1552 if (trap_userspace(dc, true)) { 1553 return true; 1554 } 1555 1556 t_id = tcg_temp_new_i32(); 1557 if (rb) { 1558 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf); 1559 } else { 1560 tcg_gen_movi_i32(t_id, imm); 1561 } 1562 1563 t_ctrl = tcg_constant_i32(ctrl); 1564 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl); 1565 return true; 1566 } 1567 1568 static bool trans_get(DisasContext *dc, arg_get *arg) 1569 { 1570 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl); 1571 } 1572 1573 static bool trans_getd(DisasContext *dc, arg_getd *arg) 1574 { 1575 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl); 1576 } 1577 1578 static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl) 1579 { 1580 TCGv_i32 t_id, t_ctrl; 1581 1582 if (trap_userspace(dc, true)) { 1583 return true; 1584 } 1585 1586 t_id = tcg_temp_new_i32(); 1587 if (rb) { 1588 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf); 1589 } else { 1590 tcg_gen_movi_i32(t_id, imm); 1591 } 1592 1593 t_ctrl = tcg_constant_i32(ctrl); 1594 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra)); 1595 return true; 1596 } 1597 1598 static bool trans_put(DisasContext *dc, arg_put *arg) 1599 { 1600 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl); 1601 } 1602 1603 static bool trans_putd(DisasContext *dc, arg_putd *arg) 1604 { 1605 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl); 1606 } 1607 1608 static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs) 1609 { 1610 DisasContext *dc = container_of(dcb, DisasContext, base); 1611 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); 1612 int bound; 1613 1614 dc->cfg = &cpu->cfg; 1615 dc->tb_flags = dc->base.tb->flags; 1616 dc->ext_imm = dc->base.tb->cs_base; 1617 dc->r0 = NULL; 1618 dc->r0_set = false; 1619 dc->mem_index = cpu_mmu_index(cs, false); 1620 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER; 1621 dc->jmp_dest = -1; 1622 1623 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; 1624 dc->base.max_insns = MIN(dc->base.max_insns, bound); 1625 } 1626 1627 static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs) 1628 { 1629 } 1630 1631 static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs) 1632 { 1633 DisasContext *dc = container_of(dcb, DisasContext, base); 1634 1635 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK); 1636 } 1637 1638 static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs) 1639 { 1640 DisasContext *dc = container_of(dcb, DisasContext, base); 1641 uint32_t ir; 1642 1643 /* TODO: This should raise an exception, not terminate qemu. */ 1644 if (dc->base.pc_next & 3) { 1645 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", 1646 (uint32_t)dc->base.pc_next); 1647 } 1648 1649 dc->tb_flags_to_set = 0; 1650 1651 ir = translator_ldl_swap(cpu_env(cs), &dc->base, dc->base.pc_next, 1652 mb_cpu_is_big_endian(cs) != TARGET_BIG_ENDIAN); 1653 if (!decode(dc, ir)) { 1654 trap_illegal(dc, true); 1655 } 1656 1657 if (dc->r0) { 1658 dc->r0 = NULL; 1659 dc->r0_set = false; 1660 } 1661 1662 /* Discard the imm global when its contents cannot be used. */ 1663 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) { 1664 tcg_gen_discard_i32(cpu_imm); 1665 } 1666 1667 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG); 1668 dc->tb_flags |= dc->tb_flags_to_set; 1669 dc->base.pc_next += 4; 1670 1671 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) { 1672 /* 1673 * Finish any return-from branch. 1674 */ 1675 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG); 1676 if (unlikely(rt_ibe != 0)) { 1677 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG); 1678 if (rt_ibe & DRTI_FLAG) { 1679 do_rti(dc); 1680 } else if (rt_ibe & DRTB_FLAG) { 1681 do_rtb(dc); 1682 } else { 1683 do_rte(dc); 1684 } 1685 } 1686 1687 /* Complete the branch, ending the TB. */ 1688 switch (dc->base.is_jmp) { 1689 case DISAS_NORETURN: 1690 /* 1691 * E.g. illegal insn in a delay slot. We've already exited 1692 * and will handle D_FLAG in mb_cpu_do_interrupt. 1693 */ 1694 break; 1695 case DISAS_NEXT: 1696 /* 1697 * Normal insn a delay slot. 1698 * However, the return-from-exception type insns should 1699 * return to the main loop, as they have adjusted MSR. 1700 */ 1701 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP); 1702 break; 1703 case DISAS_EXIT_NEXT: 1704 /* 1705 * E.g. mts insn in a delay slot. Continue with btarget, 1706 * but still return to the main loop. 1707 */ 1708 dc->base.is_jmp = DISAS_EXIT_JUMP; 1709 break; 1710 default: 1711 g_assert_not_reached(); 1712 } 1713 } 1714 } 1715 1716 static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs) 1717 { 1718 DisasContext *dc = container_of(dcb, DisasContext, base); 1719 1720 if (dc->base.is_jmp == DISAS_NORETURN) { 1721 /* We have already exited the TB. */ 1722 return; 1723 } 1724 1725 t_sync_flags(dc); 1726 1727 switch (dc->base.is_jmp) { 1728 case DISAS_TOO_MANY: 1729 gen_goto_tb(dc, 0, dc->base.pc_next); 1730 return; 1731 1732 case DISAS_EXIT: 1733 break; 1734 case DISAS_EXIT_NEXT: 1735 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); 1736 break; 1737 case DISAS_EXIT_JUMP: 1738 tcg_gen_mov_i32(cpu_pc, cpu_btarget); 1739 tcg_gen_discard_i32(cpu_btarget); 1740 break; 1741 1742 case DISAS_JUMP: 1743 if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) { 1744 /* Direct jump. */ 1745 tcg_gen_discard_i32(cpu_btarget); 1746 1747 if (dc->jmp_cond != TCG_COND_ALWAYS) { 1748 /* Conditional direct jump. */ 1749 TCGLabel *taken = gen_new_label(); 1750 TCGv_i32 tmp = tcg_temp_new_i32(); 1751 1752 /* 1753 * Copy bvalue to a temp now, so we can discard bvalue. 1754 * This can avoid writing bvalue to memory when the 1755 * delay slot cannot raise an exception. 1756 */ 1757 tcg_gen_mov_i32(tmp, cpu_bvalue); 1758 tcg_gen_discard_i32(cpu_bvalue); 1759 1760 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken); 1761 gen_goto_tb(dc, 1, dc->base.pc_next); 1762 gen_set_label(taken); 1763 } 1764 gen_goto_tb(dc, 0, dc->jmp_dest); 1765 return; 1766 } 1767 1768 /* Indirect jump (or direct jump w/ goto_tb disabled) */ 1769 tcg_gen_mov_i32(cpu_pc, cpu_btarget); 1770 tcg_gen_discard_i32(cpu_btarget); 1771 tcg_gen_lookup_and_goto_ptr(); 1772 return; 1773 1774 default: 1775 g_assert_not_reached(); 1776 } 1777 1778 /* Finish DISAS_EXIT_* */ 1779 if (unlikely(cs->singlestep_enabled)) { 1780 gen_raise_exception(dc, EXCP_DEBUG); 1781 } else { 1782 tcg_gen_exit_tb(NULL, 0); 1783 } 1784 } 1785 1786 static const TranslatorOps mb_tr_ops = { 1787 .init_disas_context = mb_tr_init_disas_context, 1788 .tb_start = mb_tr_tb_start, 1789 .insn_start = mb_tr_insn_start, 1790 .translate_insn = mb_tr_translate_insn, 1791 .tb_stop = mb_tr_tb_stop, 1792 }; 1793 1794 void mb_translate_code(CPUState *cpu, TranslationBlock *tb, 1795 int *max_insns, vaddr pc, void *host_pc) 1796 { 1797 DisasContext dc; 1798 translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base); 1799 } 1800 1801 void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags) 1802 { 1803 CPUMBState *env = cpu_env(cs); 1804 uint32_t iflags; 1805 int i; 1806 1807 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n", 1808 env->pc, env->msr, 1809 (env->msr & MSR_UM) ? "user" : "kernel", 1810 (env->msr & MSR_UMS) ? "user" : "kernel", 1811 (bool)(env->msr & MSR_EIP), 1812 (bool)(env->msr & MSR_IE)); 1813 1814 iflags = env->iflags; 1815 qemu_fprintf(f, "iflags: 0x%08x", iflags); 1816 if (iflags & IMM_FLAG) { 1817 qemu_fprintf(f, " IMM(0x%08x)", env->imm); 1818 } 1819 if (iflags & BIMM_FLAG) { 1820 qemu_fprintf(f, " BIMM"); 1821 } 1822 if (iflags & D_FLAG) { 1823 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget); 1824 } 1825 if (iflags & DRTI_FLAG) { 1826 qemu_fprintf(f, " DRTI"); 1827 } 1828 if (iflags & DRTE_FLAG) { 1829 qemu_fprintf(f, " DRTE"); 1830 } 1831 if (iflags & DRTB_FLAG) { 1832 qemu_fprintf(f, " DRTB"); 1833 } 1834 if (iflags & ESR_ESS_FLAG) { 1835 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK); 1836 } 1837 1838 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n" 1839 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n", 1840 env->esr, env->fsr, env->btr, env->edr, 1841 env->ear, env->slr, env->shr); 1842 1843 for (i = 0; i < 32; i++) { 1844 qemu_fprintf(f, "r%2.2d=%08x%c", 1845 i, env->regs[i], i % 4 == 3 ? '\n' : ' '); 1846 } 1847 qemu_fprintf(f, "\n"); 1848 } 1849 1850 void mb_tcg_init(void) 1851 { 1852 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X } 1853 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X } 1854 1855 static const struct { 1856 TCGv_i32 *var; int ofs; char name[8]; 1857 } i32s[] = { 1858 /* 1859 * Note that r0 is handled specially in reg_for_read 1860 * and reg_for_write. Nothing should touch cpu_R[0]. 1861 * Leave that element NULL, which will assert quickly 1862 * inside the tcg generator functions. 1863 */ 1864 R(1), R(2), R(3), R(4), R(5), R(6), R(7), 1865 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15), 1866 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23), 1867 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31), 1868 1869 SP(pc), 1870 SP(msr), 1871 SP(msr_c), 1872 SP(imm), 1873 SP(iflags), 1874 SP(bvalue), 1875 SP(btarget), 1876 SP(res_val), 1877 }; 1878 1879 #undef R 1880 #undef SP 1881 1882 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) { 1883 *i32s[i].var = 1884 tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name); 1885 } 1886 1887 cpu_res_addr = 1888 tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr"); 1889 } 1890