translate.c (c148a0572130ff485cd2249fbdd1a3260d5e10a4) translate.c (b414df757d73d0a1d37f14a866ff1338b93a4a27)
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public

--- 708 unchanged lines hidden (view full) ---

717 /* Mask off out of range bits. */
718 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
719 }
720 }
721 return ret;
722}
723#endif
724
1/*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public

--- 708 unchanged lines hidden (view full) ---

717 /* Mask off out of range bits. */
718 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
719 }
720 }
721 return ret;
722}
723#endif
724
725#ifndef CONFIG_USER_ONLY
725static void record_unaligned_ess(DisasContext *dc, int rd,
726 MemOp size, bool store)
727{
728 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
729
730 iflags |= ESR_ESS_FLAG;
731 iflags |= rd << 5;
732 iflags |= store * ESR_S;
733 iflags |= (size == MO_32) * ESR_W;
734
735 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
736}
726static void record_unaligned_ess(DisasContext *dc, int rd,
727 MemOp size, bool store)
728{
729 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
730
731 iflags |= ESR_ESS_FLAG;
732 iflags |= rd << 5;
733 iflags |= store * ESR_S;
734 iflags |= (size == MO_32) * ESR_W;
735
736 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
737}
738#endif
737
738static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
739 int mem_index, bool rev)
740{
741 MemOp size = mop & MO_SIZE;
742
743 /*
744 * When doing reverse accesses we need to do two things.

--- 5 unchanged lines hidden (view full) ---

750 if (size > MO_8) {
751 mop ^= MO_BSWAP;
752 }
753 if (size < MO_32) {
754 tcg_gen_xori_tl(addr, addr, 3 - size);
755 }
756 }
757
739
740static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
741 int mem_index, bool rev)
742{
743 MemOp size = mop & MO_SIZE;
744
745 /*
746 * When doing reverse accesses we need to do two things.

--- 5 unchanged lines hidden (view full) ---

752 if (size > MO_8) {
753 mop ^= MO_BSWAP;
754 }
755 if (size < MO_32) {
756 tcg_gen_xori_tl(addr, addr, 3 - size);
757 }
758 }
759
760 /*
761 * For system mode, enforce alignment if the cpu configuration
762 * requires it. For user-mode, the Linux kernel will have fixed up
763 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
764 */
765#ifndef CONFIG_USER_ONLY
758 if (size > MO_8 &&
759 (dc->tb_flags & MSR_EE) &&
760 dc->cfg->unaligned_exceptions) {
761 record_unaligned_ess(dc, rd, size, false);
762 mop |= MO_ALIGN;
763 }
766 if (size > MO_8 &&
767 (dc->tb_flags & MSR_EE) &&
768 dc->cfg->unaligned_exceptions) {
769 record_unaligned_ess(dc, rd, size, false);
770 mop |= MO_ALIGN;
771 }
772#endif
764
765 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
766
767 tcg_temp_free(addr);
768 return true;
769}
770
771static bool trans_lbu(DisasContext *dc, arg_typea *arg)

--- 124 unchanged lines hidden (view full) ---

896 if (size > MO_8) {
897 mop ^= MO_BSWAP;
898 }
899 if (size < MO_32) {
900 tcg_gen_xori_tl(addr, addr, 3 - size);
901 }
902 }
903
773
774 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
775
776 tcg_temp_free(addr);
777 return true;
778}
779
780static bool trans_lbu(DisasContext *dc, arg_typea *arg)

--- 124 unchanged lines hidden (view full) ---

905 if (size > MO_8) {
906 mop ^= MO_BSWAP;
907 }
908 if (size < MO_32) {
909 tcg_gen_xori_tl(addr, addr, 3 - size);
910 }
911 }
912
913 /*
914 * For system mode, enforce alignment if the cpu configuration
915 * requires it. For user-mode, the Linux kernel will have fixed up
916 * any unaligned access, so emulate that by *not* setting MO_ALIGN.
917 */
918#ifndef CONFIG_USER_ONLY
904 if (size > MO_8 &&
905 (dc->tb_flags & MSR_EE) &&
906 dc->cfg->unaligned_exceptions) {
907 record_unaligned_ess(dc, rd, size, true);
908 mop |= MO_ALIGN;
909 }
919 if (size > MO_8 &&
920 (dc->tb_flags & MSR_EE) &&
921 dc->cfg->unaligned_exceptions) {
922 record_unaligned_ess(dc, rd, size, true);
923 mop |= MO_ALIGN;
924 }
925#endif
910
911 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
912
913 tcg_temp_free(addr);
914 return true;
915}
916
917static bool trans_sb(DisasContext *dc, arg_typea *arg)

--- 1020 unchanged lines hidden ---
926
927 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
928
929 tcg_temp_free(addr);
930 return true;
931}
932
933static bool trans_sb(DisasContext *dc, arg_typea *arg)

--- 1020 unchanged lines hidden ---