translate.c (bc921866cefb3ec4031714aeb4569e0e7622dfba) | translate.c (0d89cb7c29d9030d96c32ea4cdde7b4ee6f3dcf4) |
---|---|
1/* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either --- 31 unchanged lines hidden (view full) --- 40typedef struct DisasCond { 41 TCGCond c; 42 TCGv_i64 a0, a1; 43} DisasCond; 44 45typedef struct DisasIAQE { 46 /* IASQ; may be null for no change from TB. */ 47 TCGv_i64 space; | 1/* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either --- 31 unchanged lines hidden (view full) --- 40typedef struct DisasCond { 41 TCGCond c; 42 TCGv_i64 a0, a1; 43} DisasCond; 44 45typedef struct DisasIAQE { 46 /* IASQ; may be null for no change from TB. */ 47 TCGv_i64 space; |
48 /* IAOQ base; may be null for immediate absolute address. */ | 48 /* IAOQ base; may be null for relative address. */ |
49 TCGv_i64 base; | 49 TCGv_i64 base; |
50 /* IAOQ addend; absolute immedate address if base is null. */ | 50 /* IAOQ addend; if base is null, relative to ctx->iaoq_first. */ |
51 int64_t disp; 52} DisasIAQE; 53 54typedef struct DisasContext { 55 DisasContextBase base; 56 CPUState *cs; 57 58 /* IAQ_Front, IAQ_Back. */ 59 DisasIAQE iaq_f, iaq_b; 60 /* IAQ_Next, for jumps, otherwise null for simple advance. */ 61 DisasIAQE iaq_j, *iaq_n; 62 | 51 int64_t disp; 52} DisasIAQE; 53 54typedef struct DisasContext { 55 DisasContextBase base; 56 CPUState *cs; 57 58 /* IAQ_Front, IAQ_Back. */ 59 DisasIAQE iaq_f, iaq_b; 60 /* IAQ_Next, for jumps, otherwise null for simple advance. */ 61 DisasIAQE iaq_j, *iaq_n; 62 |
63 /* IAOQ_Front at entry to TB. */ 64 uint64_t iaoq_first; 65 |
|
63 DisasCond null_cond; 64 TCGLabel *null_lab; 65 66 TCGv_i64 zero; 67 68 uint32_t insn; 69 uint32_t tb_flags; 70 int mmu_idx; --- 564 unchanged lines hidden (view full) --- 635} 636 637static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest, 638 const DisasIAQE *src) 639{ 640 uint64_t mask = gva_offset_mask(ctx->tb_flags); 641 642 if (src->base == NULL) { | 66 DisasCond null_cond; 67 TCGLabel *null_lab; 68 69 TCGv_i64 zero; 70 71 uint32_t insn; 72 uint32_t tb_flags; 73 int mmu_idx; --- 564 unchanged lines hidden (view full) --- 638} 639 640static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest, 641 const DisasIAQE *src) 642{ 643 uint64_t mask = gva_offset_mask(ctx->tb_flags); 644 645 if (src->base == NULL) { |
643 tcg_gen_movi_i64(dest, src->disp & mask); | 646 tcg_gen_movi_i64(dest, (ctx->iaoq_first + src->disp) & mask); |
644 } else if (src->disp == 0) { 645 tcg_gen_andi_i64(dest, src->base, mask); 646 } else { 647 tcg_gen_addi_i64(dest, src->base, src->disp); 648 tcg_gen_andi_i64(dest, dest, mask); 649 } 650} 651 --- 17 unchanged lines hidden (view full) --- 669} 670 671static void install_link(DisasContext *ctx, unsigned link, bool with_sr0) 672{ 673 tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER); 674 if (!link) { 675 return; 676 } | 647 } else if (src->disp == 0) { 648 tcg_gen_andi_i64(dest, src->base, mask); 649 } else { 650 tcg_gen_addi_i64(dest, src->base, src->disp); 651 tcg_gen_andi_i64(dest, dest, mask); 652 } 653} 654 --- 17 unchanged lines hidden (view full) --- 672} 673 674static void install_link(DisasContext *ctx, unsigned link, bool with_sr0) 675{ 676 tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER); 677 if (!link) { 678 return; 679 } |
677 if (ctx->iaq_b.base) { 678 tcg_gen_addi_i64(cpu_gr[link], ctx->iaq_b.base, 679 ctx->iaq_b.disp + 4); 680 } else { 681 tcg_gen_movi_i64(cpu_gr[link], ctx->iaq_b.disp + 4); 682 } | 680 DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4); 681 copy_iaoq_entry(ctx, cpu_gr[link], &next); |
683#ifndef CONFIG_USER_ONLY 684 if (with_sr0) { 685 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b); 686 } 687#endif 688} 689 690static void gen_excp_1(int exception) --- 35 unchanged lines hidden (view full) --- 726 } while (0) 727#endif 728 729static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f, 730 const DisasIAQE *b) 731{ 732 return (!iaqe_variable(f) && 733 (b == NULL || !iaqe_variable(b)) && | 682#ifndef CONFIG_USER_ONLY 683 if (with_sr0) { 684 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b); 685 } 686#endif 687} 688 689static void gen_excp_1(int exception) --- 35 unchanged lines hidden (view full) --- 725 } while (0) 726#endif 727 728static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f, 729 const DisasIAQE *b) 730{ 731 return (!iaqe_variable(f) && 732 (b == NULL || !iaqe_variable(b)) && |
734 translator_use_goto_tb(&ctx->base, f->disp)); | 733 translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp)); |
735} 736 737/* If the next insn is to be nullified, and it's on the same page, 738 and we're not attempting to set a breakpoint on it, then we can 739 totally skip the nullified insn. This avoids creating and 740 executing a TB that merely branches to the next TB. */ 741static bool use_nullify_skip(DisasContext *ctx) 742{ 743 return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE) 744 && !iaqe_variable(&ctx->iaq_b) | 734} 735 736/* If the next insn is to be nullified, and it's on the same page, 737 and we're not attempting to set a breakpoint on it, then we can 738 totally skip the nullified insn. This avoids creating and 739 executing a TB that merely branches to the next TB. */ 740static bool use_nullify_skip(DisasContext *ctx) 741{ 742 return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE) 743 && !iaqe_variable(&ctx->iaq_b) |
745 && is_same_page(&ctx->base, ctx->iaq_b.disp)); | 744 && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first) 745 & TARGET_PAGE_MASK) == 0); |
746} 747 748static void gen_goto_tb(DisasContext *ctx, int which, 749 const DisasIAQE *f, const DisasIAQE *b) 750{ 751 if (use_goto_tb(ctx, f, b)) { 752 tcg_gen_goto_tb(which); 753 install_iaq_entries(ctx, f, b); --- 1246 unchanged lines hidden (view full) --- 2000 Therefore normal read or write is supposed to fail, but specific 2001 offsets have kernel code mapped to raise permissions to implement 2002 system calls. Handling this via an explicit check here, rather 2003 in than the "be disp(sr2,r0)" instruction that probably sent us 2004 here, is the easiest way to handle the branch delay slot on the 2005 aforementioned BE. */ 2006static void do_page_zero(DisasContext *ctx) 2007{ | 746} 747 748static void gen_goto_tb(DisasContext *ctx, int which, 749 const DisasIAQE *f, const DisasIAQE *b) 750{ 751 if (use_goto_tb(ctx, f, b)) { 752 tcg_gen_goto_tb(which); 753 install_iaq_entries(ctx, f, b); --- 1246 unchanged lines hidden (view full) --- 2000 Therefore normal read or write is supposed to fail, but specific 2001 offsets have kernel code mapped to raise permissions to implement 2002 system calls. Handling this via an explicit check here, rather 2003 in than the "be disp(sr2,r0)" instruction that probably sent us 2004 here, is the easiest way to handle the branch delay slot on the 2005 aforementioned BE. */ 2006static void do_page_zero(DisasContext *ctx) 2007{ |
2008 assert(ctx->iaq_f.disp == 0); 2009 |
|
2008 /* If by some means we get here with PSW[N]=1, that implies that 2009 the B,GATE instruction would be skipped, and we'd fault on the 2010 next insn within the privileged page. */ 2011 switch (ctx->null_cond.c) { 2012 case TCG_COND_NEVER: 2013 break; 2014 case TCG_COND_ALWAYS: 2015 tcg_gen_movi_i64(cpu_psw_n, 0); 2016 goto do_sigill; 2017 default: 2018 /* Since this is always the first (and only) insn within the 2019 TB, we should know the state of PSW[N] from TB->FLAGS. */ 2020 g_assert_not_reached(); 2021 } 2022 2023 /* Check that we didn't arrive here via some means that allowed 2024 non-sequential instruction execution. Normally the PSW[B] bit 2025 detects this by disallowing the B,GATE instruction to execute 2026 under such conditions. */ | 2010 /* If by some means we get here with PSW[N]=1, that implies that 2011 the B,GATE instruction would be skipped, and we'd fault on the 2012 next insn within the privileged page. */ 2013 switch (ctx->null_cond.c) { 2014 case TCG_COND_NEVER: 2015 break; 2016 case TCG_COND_ALWAYS: 2017 tcg_gen_movi_i64(cpu_psw_n, 0); 2018 goto do_sigill; 2019 default: 2020 /* Since this is always the first (and only) insn within the 2021 TB, we should know the state of PSW[N] from TB->FLAGS. */ 2022 g_assert_not_reached(); 2023 } 2024 2025 /* Check that we didn't arrive here via some means that allowed 2026 non-sequential instruction execution. Normally the PSW[B] bit 2027 detects this by disallowing the B,GATE instruction to execute 2028 under such conditions. */ |
2027 if (iaqe_variable(&ctx->iaq_b) || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) { | 2029 if (iaqe_variable(&ctx->iaq_b) || ctx->iaq_b.disp != 4) { |
2028 goto do_sigill; 2029 } 2030 | 2030 goto do_sigill; 2031 } 2032 |
2031 switch (ctx->iaq_f.disp & -4) { | 2033 switch (ctx->base.pc_first) { |
2032 case 0x00: /* Null pointer call */ 2033 gen_excp_1(EXCP_IMP); 2034 ctx->base.is_jmp = DISAS_NORETURN; 2035 break; 2036 2037 case 0xb0: /* LWS */ 2038 gen_excp_1(EXCP_SYSCALL_LWS); 2039 ctx->base.is_jmp = DISAS_NORETURN; --- 2574 unchanged lines hidden (view full) --- 4614 4615 ctx->cs = cs; 4616 ctx->tb_flags = ctx->base.tb->flags; 4617 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); 4618 4619#ifdef CONFIG_USER_ONLY 4620 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX); 4621 ctx->mmu_idx = MMU_USER_IDX; | 2034 case 0x00: /* Null pointer call */ 2035 gen_excp_1(EXCP_IMP); 2036 ctx->base.is_jmp = DISAS_NORETURN; 2037 break; 2038 2039 case 0xb0: /* LWS */ 2040 gen_excp_1(EXCP_SYSCALL_LWS); 2041 ctx->base.is_jmp = DISAS_NORETURN; --- 2574 unchanged lines hidden (view full) --- 4616 4617 ctx->cs = cs; 4618 ctx->tb_flags = ctx->base.tb->flags; 4619 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); 4620 4621#ifdef CONFIG_USER_ONLY 4622 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX); 4623 ctx->mmu_idx = MMU_USER_IDX; |
4622 ctx->iaq_f.disp = ctx->base.pc_first | ctx->privilege; 4623 ctx->iaq_b.disp = ctx->base.tb->cs_base | ctx->privilege; | 4624 ctx->iaoq_first = ctx->base.pc_first | ctx->privilege; 4625 ctx->iaq_b.disp = ctx->base.tb->cs_base - ctx->base.pc_first; |
4624 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4625#else 4626 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4627 ctx->mmu_idx = (ctx->tb_flags & PSW_D 4628 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) 4629 : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); 4630 4631 /* Recover the IAOQ values from the GVA + PRIV. */ 4632 uint64_t cs_base = ctx->base.tb->cs_base; 4633 uint64_t iasq_f = cs_base & ~0xffffffffull; 4634 int32_t diff = cs_base; 4635 | 4626 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4627#else 4628 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4629 ctx->mmu_idx = (ctx->tb_flags & PSW_D 4630 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) 4631 : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); 4632 4633 /* Recover the IAOQ values from the GVA + PRIV. */ 4634 uint64_t cs_base = ctx->base.tb->cs_base; 4635 uint64_t iasq_f = cs_base & ~0xffffffffull; 4636 int32_t diff = cs_base; 4637 |
4636 ctx->iaq_f.disp = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; | 4638 ctx->iaoq_first = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; 4639 |
4637 if (diff) { | 4640 if (diff) { |
4638 ctx->iaq_b.disp = ctx->iaq_f.disp + diff; | 4641 ctx->iaq_b.disp = diff; |
4639 } else { 4640 ctx->iaq_b.base = cpu_iaoq_b; 4641 ctx->iaq_b.space = cpu_iasq_b; 4642 } 4643#endif 4644 4645 ctx->zero = tcg_constant_i64(0); 4646 --- 16 unchanged lines hidden (view full) --- 4663 ctx->null_lab = NULL; 4664} 4665 4666static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4667{ 4668 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4669 4670 tcg_debug_assert(!iaqe_variable(&ctx->iaq_f)); | 4642 } else { 4643 ctx->iaq_b.base = cpu_iaoq_b; 4644 ctx->iaq_b.space = cpu_iasq_b; 4645 } 4646#endif 4647 4648 ctx->zero = tcg_constant_i64(0); 4649 --- 16 unchanged lines hidden (view full) --- 4666 ctx->null_lab = NULL; 4667} 4668 4669static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4670{ 4671 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4672 4673 tcg_debug_assert(!iaqe_variable(&ctx->iaq_f)); |
4671 tcg_gen_insn_start(ctx->iaq_f.disp, 4672 iaqe_variable(&ctx->iaq_b) ? -1 : ctx->iaq_b.disp, 4673 0); | 4674 tcg_gen_insn_start(ctx->iaoq_first + ctx->iaq_f.disp, 4675 (iaqe_variable(&ctx->iaq_b) ? -1 : 4676 ctx->iaoq_first + ctx->iaq_b.disp), 0); |
4674 ctx->insn_start_updated = false; 4675} 4676 4677static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4678{ 4679 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4680 CPUHPPAState *env = cpu_env(cs); 4681 DisasJumpType ret; --- 157 unchanged lines hidden --- | 4677 ctx->insn_start_updated = false; 4678} 4679 4680static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4681{ 4682 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4683 CPUHPPAState *env = cpu_env(cs); 4684 DisasJumpType ret; --- 157 unchanged lines hidden --- |