Lines Matching +full:no +full:- +full:unaligned +full:- +full:direct +full:- +full:access

5  *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
23 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
29 #include "qemu/qemu-print.h"
34 #include "exec/helper-info.c.inc"
38 (((src) >> start) & ((1 << (end - start + 1)) - 1))
77 /* Immediate branch-taken destination, or -1 for indirect. */
83 if (dc->tb_flags & IMM_FLAG) { in typeb_imm()
84 return deposit32(dc->ext_imm, 0, 16, x); in typeb_imm()
89 /* Include the auto-generated decoder. */
90 #include "decode-insns.c.inc"
95 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) { in t_sync_flags()
96 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK); in t_sync_flags()
103 dc->base.is_jmp = DISAS_NORETURN; in gen_raise_exception()
109 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); in gen_raise_exception_sync()
123 if (translator_use_goto_tb(&dc->base, dest)) { in gen_goto_tb()
126 tcg_gen_exit_tb(dc->base.tb, n); in gen_goto_tb()
131 dc->base.is_jmp = DISAS_NORETURN; in gen_goto_tb()
140 if (cond && (dc->tb_flags & MSR_EE) in trap_illegal()
141 && dc->cfg->illegal_opcode_exception) { in trap_illegal()
153 bool cond_user = cond && dc->mem_index == MMU_USER_IDX; in trap_userspace()
155 if (cond_user && (dc->tb_flags & MSR_EE)) { in trap_userspace()
167 if (dc->tb_flags & D_FLAG) { in invalid_delay_slot()
170 insn_type, (uint32_t)dc->base.pc_next); in invalid_delay_slot()
181 if (!dc->r0_set) { in reg_for_read()
182 if (dc->r0 == NULL) { in reg_for_read()
183 dc->r0 = tcg_temp_new_i32(); in reg_for_read()
185 tcg_gen_movi_i32(dc->r0, 0); in reg_for_read()
186 dc->r0_set = true; in reg_for_read()
188 return dc->r0; in reg_for_read()
196 if (dc->r0 == NULL) { in reg_for_write()
197 dc->r0 = tcg_temp_new_i32(); in reg_for_write()
199 return dc->r0; in reg_for_write()
207 if (arg->rd == 0 && !side_effects) { in do_typea()
211 rd = reg_for_write(dc, arg->rd); in do_typea()
212 ra = reg_for_read(dc, arg->ra); in do_typea()
213 rb = reg_for_read(dc, arg->rb); in do_typea()
223 if (arg->rd == 0 && !side_effects) { in do_typea0()
227 rd = reg_for_write(dc, arg->rd); in do_typea0()
228 ra = reg_for_read(dc, arg->ra); in do_typea0()
238 if (arg->rd == 0 && !side_effects) { in do_typeb_imm()
242 rd = reg_for_write(dc, arg->rd); in do_typeb_imm()
243 ra = reg_for_read(dc, arg->ra); in do_typeb_imm()
244 fni(rd, ra, arg->imm); in do_typeb_imm()
253 if (arg->rd == 0 && !side_effects) { in do_typeb_val()
257 rd = reg_for_write(dc, arg->rd); in do_typeb_val()
258 ra = reg_for_read(dc, arg->ra); in do_typeb_val()
259 imm = tcg_constant_i32(arg->imm); in do_typeb_val()
271 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
279 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
287 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
301 /* No input carry, but output carry. */
319 /* Input carry, but no output carry. */
387 int width = imm_w - imm_s + 1; in gen_bsifi()
488 dc->ext_imm = arg->imm << 16; in DO_TYPEA_CFG()
489 tcg_gen_movi_i32(cpu_imm, dc->ext_imm); in DO_TYPEA_CFG()
490 dc->tb_flags_to_set = IMM_FLAG; in DO_TYPEA_CFG()
535 /* No input carry, but output carry. */ in DO_TYPEA_CFG()
553 /* No input or output carry. */
559 /* Input carry, no output carry. */
642 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) { in DO_TYPEA()
661 if (ra == 1 && dc->cfg->stackprot) { in compute_ldst_addr_typeb()
670 int addr_size = dc->cfg->addr_size; in compute_ldst_addr_ea()
699 uint32_t iflags = tcg_get_insn_start_param(dc->base.insn_start, 1); in record_unaligned_ess()
706 tcg_set_insn_start_param(dc->base.insn_start, 1, iflags); in record_unaligned_ess()
726 tcg_gen_xori_tl(addr, addr, 3 - size); in do_load()
732 * requires it. For user-mode, the Linux kernel will have fixed up in do_load()
733 * any unaligned access, so emulate that by *not* setting MO_ALIGN. in do_load()
737 (dc->tb_flags & MSR_EE) && in do_load()
738 dc->cfg->unaligned_exceptions) { in do_load()
750 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_lbu()
751 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false); in trans_lbu()
756 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_lbur()
757 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true); in trans_lbur()
768 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); in trans_lbuea()
769 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false); in trans_lbuea()
775 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); in trans_lbui()
776 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false); in trans_lbui()
781 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_lhu()
782 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); in trans_lhu()
787 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_lhur()
788 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true); in trans_lhur()
799 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); in trans_lhuea()
800 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false); in trans_lhuea()
806 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); in trans_lhui()
807 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); in trans_lhui()
812 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_lw()
813 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); in trans_lw()
818 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_lwr()
819 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true); in trans_lwr()
830 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); in trans_lwea()
831 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false); in trans_lwea()
837 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); in trans_lwi()
838 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); in trans_lwi()
843 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_lwx()
845 /* lwx does not throw unaligned access errors, so force alignment */ in trans_lwx()
848 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL); in trans_lwx()
851 if (arg->rd) { in trans_lwx()
852 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val); in trans_lwx()
855 /* No support for AXI exclusive so always clear C */ in trans_lwx()
876 tcg_gen_xori_tl(addr, addr, 3 - size); in do_store()
882 * requires it. For user-mode, the Linux kernel will have fixed up in do_store()
883 * any unaligned access, so emulate that by *not* setting MO_ALIGN. in do_store()
887 (dc->tb_flags & MSR_EE) && in do_store()
888 dc->cfg->unaligned_exceptions) { in do_store()
900 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_sb()
901 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false); in trans_sb()
906 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_sbr()
907 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true); in trans_sbr()
918 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); in trans_sbea()
919 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false); in trans_sbea()
925 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); in trans_sbi()
926 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false); in trans_sbi()
931 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_sh()
932 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); in trans_sh()
937 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_shr()
938 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true); in trans_shr()
949 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); in trans_shea()
950 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false); in trans_shea()
956 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); in trans_shi()
957 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false); in trans_shi()
962 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_sw()
963 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); in trans_sw()
968 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_swr()
969 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true); in trans_swr()
980 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb); in trans_swea()
981 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false); in trans_swea()
987 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); in trans_swi()
988 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false); in trans_swi()
993 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); in trans_swx()
998 /* swx does not throw unaligned access errors, so force alignment */ in trans_swx()
1005 * In either case, addr is no longer needed. in trans_swx()
1016 reg_for_write(dc, arg->rd), in trans_swx()
1017 dc->mem_index, MO_TEUL); in trans_swx()
1035 tcg_gen_movi_tl(cpu_res_addr, -1); in trans_swx()
1041 dc->tb_flags_to_set |= D_FLAG; in setup_dslot()
1042 if (type_b && (dc->tb_flags & IMM_FLAG)) { in setup_dslot()
1043 dc->tb_flags_to_set |= BIMM_FLAG; in setup_dslot()
1060 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next); in do_branch()
1064 add_pc = abs ? 0 : dc->base.pc_next; in do_branch()
1066 dc->jmp_dest = -1; in do_branch()
1069 dc->jmp_dest = add_pc + dest_imm; in do_branch()
1070 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest); in do_branch()
1072 dc->jmp_cond = TCG_COND_ALWAYS; in do_branch()
1078 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1080 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1101 dc->jmp_cond = cond; in DO_BR()
1108 dc->jmp_dest = -1; in DO_BR()
1109 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next); in DO_BR()
1111 dc->jmp_dest = dc->base.pc_next + dest_imm; in DO_BR()
1112 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest); in DO_BR()
1117 next = tcg_constant_i32(dc->base.pc_next + (delay + 1) * 4); in DO_BR()
1118 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget, in DO_BR()
1127 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1129 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1131 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1133 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1151 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb)); in DO_BCC()
1152 if (arg->rd) { in DO_BCC()
1153 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next); in DO_BCC()
1156 tcg_gen_movi_tl(cpu_res_addr, -1); in DO_BCC()
1158 dc->base.is_jmp = DISAS_EXIT; in DO_BCC()
1164 uint32_t imm = arg->imm; in trans_brki()
1174 if (arg->rd) { in trans_brki()
1175 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next); in trans_brki()
1177 tcg_gen_movi_tl(cpu_res_addr, -1); in trans_brki()
1198 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1; in trans_brki()
1203 dc->base.is_jmp = DISAS_EXIT; in trans_brki()
1211 int mbar_imm = arg->imm; in trans_mbar()
1218 /* Data access memory barrier. */ in trans_mbar()
1233 -offsetof(MicroBlazeCPU, env) in trans_mbar()
1236 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4); in trans_mbar()
1242 * If !(mbar_imm & 1), this is an instruction access memory barrier in trans_mbar()
1243 * and we need to end the TB so that we recognize self-modified in trans_mbar()
1252 dc->base.is_jmp = DISAS_EXIT_NEXT; in trans_mbar()
1265 dc->tb_flags_to_set |= to_set; in do_rts()
1268 dc->jmp_cond = TCG_COND_ALWAYS; in do_rts()
1269 dc->jmp_dest = -1; in do_rts()
1270 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm); in do_rts()
1286 if (dc->cfg->opcode_0_illegal) { in DO_RTS()
1309 uint32_t imm = arg->imm; in do_msrclrset()
1315 if (arg->rd) { in do_msrclrset()
1316 msr_read(dc, cpu_R[arg->rd]); in do_msrclrset()
1339 dc->base.is_jmp = DISAS_EXIT_NEXT; in do_msrclrset()
1363 if (arg->e && arg->rs != 0x1003) { in trans_mts()
1365 "Invalid extended mts reg 0x%x\n", arg->rs); in trans_mts()
1369 TCGv_i32 src = reg_for_read(dc, arg->ra); in trans_mts()
1370 switch (arg->rs) { in trans_mts()
1397 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e); in trans_mts()
1398 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7); in trans_mts()
1405 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs); in trans_mts()
1408 dc->base.is_jmp = DISAS_EXIT_NEXT; in trans_mts()
1415 TCGv_i32 dest = reg_for_write(dc, arg->rd); in trans_mfs()
1417 if (arg->e) { in trans_mfs()
1418 switch (arg->rs) { in trans_mfs()
1432 /* High bits of PVR6-9 not implemented. */ in trans_mfs()
1437 "Invalid extended mfs reg 0x%x\n", arg->rs); in trans_mfs()
1442 switch (arg->rs) { in trans_mfs()
1444 tcg_gen_movi_i32(dest, dc->base.pc_next); in trans_mfs()
1483 TCGv_i32 tmp_ext = tcg_constant_i32(arg->e); in trans_mfs()
1484 TCGv_i32 tmp_reg = tcg_constant_i32(arg->rs & 7); in trans_mfs()
1493 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000]) in trans_mfs()
1494 - offsetof(MicroBlazeCPU, env)); in trans_mfs()
1497 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs); in trans_mfs()
1558 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl); in trans_get()
1563 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl); in trans_getd()
1588 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl); in trans_put()
1593 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl); in trans_putd()
1602 dc->cfg = &cpu->cfg; in mb_tr_init_disas_context()
1603 dc->tb_flags = dc->base.tb->flags; in mb_tr_init_disas_context()
1604 dc->ext_imm = dc->base.tb->cs_base; in mb_tr_init_disas_context()
1605 dc->r0 = NULL; in mb_tr_init_disas_context()
1606 dc->r0_set = false; in mb_tr_init_disas_context()
1607 dc->mem_index = cpu_mmu_index(cs, false); in mb_tr_init_disas_context()
1608 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER; in mb_tr_init_disas_context()
1609 dc->jmp_dest = -1; in mb_tr_init_disas_context()
1611 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; in mb_tr_init_disas_context()
1612 dc->base.max_insns = MIN(dc->base.max_insns, bound); in mb_tr_init_disas_context()
1623 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK); in mb_tr_insn_start()
1632 if (dc->base.pc_next & 3) { in mb_tr_translate_insn()
1633 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", in mb_tr_translate_insn()
1634 (uint32_t)dc->base.pc_next); in mb_tr_translate_insn()
1637 dc->tb_flags_to_set = 0; in mb_tr_translate_insn()
1639 ir = translator_ldl(cpu_env(cs), &dc->base, dc->base.pc_next); in mb_tr_translate_insn()
1644 if (dc->r0) { in mb_tr_translate_insn()
1645 dc->r0 = NULL; in mb_tr_translate_insn()
1646 dc->r0_set = false; in mb_tr_translate_insn()
1650 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) { in mb_tr_translate_insn()
1654 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG); in mb_tr_translate_insn()
1655 dc->tb_flags |= dc->tb_flags_to_set; in mb_tr_translate_insn()
1656 dc->base.pc_next += 4; in mb_tr_translate_insn()
1658 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) { in mb_tr_translate_insn()
1660 * Finish any return-from branch. in mb_tr_translate_insn()
1662 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG); in mb_tr_translate_insn()
1664 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG); in mb_tr_translate_insn()
1675 switch (dc->base.is_jmp) { in mb_tr_translate_insn()
1685 * However, the return-from-exception type insns should in mb_tr_translate_insn()
1688 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP); in mb_tr_translate_insn()
1695 dc->base.is_jmp = DISAS_EXIT_JUMP; in mb_tr_translate_insn()
1707 if (dc->base.is_jmp == DISAS_NORETURN) { in mb_tr_tb_stop()
1714 switch (dc->base.is_jmp) { in mb_tr_tb_stop()
1716 gen_goto_tb(dc, 0, dc->base.pc_next); in mb_tr_tb_stop()
1722 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); in mb_tr_tb_stop()
1730 if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) { in mb_tr_tb_stop()
1731 /* Direct jump. */ in mb_tr_tb_stop()
1734 if (dc->jmp_cond != TCG_COND_ALWAYS) { in mb_tr_tb_stop()
1735 /* Conditional direct jump. */ in mb_tr_tb_stop()
1747 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken); in mb_tr_tb_stop()
1748 gen_goto_tb(dc, 1, dc->base.pc_next); in mb_tr_tb_stop()
1751 gen_goto_tb(dc, 0, dc->jmp_dest); in mb_tr_tb_stop()
1755 /* Indirect jump (or direct jump w/ goto_tb disabled) */ in mb_tr_tb_stop()
1766 if (unlikely(cs->singlestep_enabled)) { in mb_tr_tb_stop()
1795 env->pc, env->msr, in mb_cpu_dump_state()
1796 (env->msr & MSR_UM) ? "user" : "kernel", in mb_cpu_dump_state()
1797 (env->msr & MSR_UMS) ? "user" : "kernel", in mb_cpu_dump_state()
1798 (bool)(env->msr & MSR_EIP), in mb_cpu_dump_state()
1799 (bool)(env->msr & MSR_IE)); in mb_cpu_dump_state()
1801 iflags = env->iflags; in mb_cpu_dump_state()
1804 qemu_fprintf(f, " IMM(0x%08x)", env->imm); in mb_cpu_dump_state()
1810 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget); in mb_cpu_dump_state()
1827 env->esr, env->fsr, env->btr, env->edr, in mb_cpu_dump_state()
1828 env->ear, env->slr, env->shr); in mb_cpu_dump_state()
1832 i, env->regs[i], i % 4 == 3 ? '\n' : ' '); in mb_cpu_dump_state()