switch.c (4efc0ede4f31d7ec25c3dee0c8f07f93735cee6d) switch.c (1460b4b25fde52cbee746c11a4b1d3185f2e2847)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <hyp/switch.h>
8#include <hyp/sysreg-sr.h>

--- 55 unchanged lines hidden (view full) ---

64 isb();
65 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
66 }
67}
68
69static void __deactivate_traps(struct kvm_vcpu *vcpu)
70{
71 extern char __kvm_hyp_host_vector[];
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <hyp/switch.h>
8#include <hyp/sysreg-sr.h>

--- 55 unchanged lines hidden (view full) ---

64 isb();
65 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
66 }
67}
68
69static void __deactivate_traps(struct kvm_vcpu *vcpu)
70{
71 extern char __kvm_hyp_host_vector[];
72 u64 mdcr_el2, cptr;
72 u64 cptr;
73
74 ___deactivate_traps(vcpu);
75
73
74 ___deactivate_traps(vcpu);
75
76 mdcr_el2 = read_sysreg(mdcr_el2);
77
78 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
79 u64 val;
80
81 /*
82 * Set the TCR and SCTLR registers in the exact opposite
83 * sequence as __activate_traps (first prevent walks,
84 * then force the MMU on). A generous sprinkling of isb()
85 * ensure that things happen in this exact order.
86 */
87 val = read_sysreg_el1(SYS_TCR);
88 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
89 isb();
90 val = read_sysreg_el1(SYS_SCTLR);
91 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
92 isb();
93 }
94
76 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
77 u64 val;
78
79 /*
80 * Set the TCR and SCTLR registers in the exact opposite
81 * sequence as __activate_traps (first prevent walks,
82 * then force the MMU on). A generous sprinkling of isb()
83 * ensure that things happen in this exact order.
84 */
85 val = read_sysreg_el1(SYS_TCR);
86 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
87 isb();
88 val = read_sysreg_el1(SYS_SCTLR);
89 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
90 isb();
91 }
92
95 __deactivate_traps_common();
93 vcpu->arch.mdcr_el2_host &= MDCR_EL2_HPMN_MASK |
94 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
95 MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
96
96
97 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
98 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
99 mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
97 __deactivate_traps_common(vcpu);
100
98
101 write_sysreg(mdcr_el2, mdcr_el2);
102 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
103
104 cptr = CPTR_EL2_DEFAULT;
105 if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
106 cptr |= CPTR_EL2_TZ;
107
108 write_sysreg(cptr, cptr_el2);
109 write_sysreg(__kvm_hyp_host_vector, vbar_el2);

--- 55 unchanged lines hidden (view full) ---

165 write_sysreg(pmu->events_host, pmcntenset_el0);
166}
167
168/* Switch to the guest for legacy non-VHE systems */
169int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
170{
171 struct kvm_cpu_context *host_ctxt;
172 struct kvm_cpu_context *guest_ctxt;
99 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
100
101 cptr = CPTR_EL2_DEFAULT;
102 if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
103 cptr |= CPTR_EL2_TZ;
104
105 write_sysreg(cptr, cptr_el2);
106 write_sysreg(__kvm_hyp_host_vector, vbar_el2);

--- 55 unchanged lines hidden (view full) ---

162 write_sysreg(pmu->events_host, pmcntenset_el0);
163}
164
165/* Switch to the guest for legacy non-VHE systems */
166int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
167{
168 struct kvm_cpu_context *host_ctxt;
169 struct kvm_cpu_context *guest_ctxt;
173 struct kvm_s2_mmu *mmu;
174 bool pmu_switch_needed;
175 u64 exit_code;
176
177 /*
178 * Having IRQs masked via PMR when entering the guest means the GIC
179 * will not signal the CPU of interrupts of lower priority, and the
180 * only way to get out will be via guest exceptions.
181 * Naturally, we want to avoid this.

--- 27 unchanged lines hidden (view full) ---

209 *
210 * Also, and in order to be able to deal with erratum #1319537 (A57)
211 * and #1319367 (A72), we must ensure that all VM-related sysreg are
212 * restored before we enable S2 translation.
213 */
214 __sysreg32_restore_state(vcpu);
215 __sysreg_restore_state_nvhe(guest_ctxt);
216
170 bool pmu_switch_needed;
171 u64 exit_code;
172
173 /*
174 * Having IRQs masked via PMR when entering the guest means the GIC
175 * will not signal the CPU of interrupts of lower priority, and the
176 * only way to get out will be via guest exceptions.
177 * Naturally, we want to avoid this.

--- 27 unchanged lines hidden (view full) ---

205 *
206 * Also, and in order to be able to deal with erratum #1319537 (A57)
207 * and #1319367 (A72), we must ensure that all VM-related sysreg are
208 * restored before we enable S2 translation.
209 */
210 __sysreg32_restore_state(vcpu);
211 __sysreg_restore_state_nvhe(guest_ctxt);
212
217 mmu = kern_hyp_va(vcpu->arch.hw_mmu);
218 __load_stage2(mmu, kern_hyp_va(mmu->arch));
213 __load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
219 __activate_traps(vcpu);
220
221 __hyp_vgic_restore_state(vcpu);
222 __timer_enable_traps(vcpu);
223
224 __debug_switch_to_guest(vcpu);
225
226 do {

--- 64 unchanged lines hidden ---
214 __activate_traps(vcpu);
215
216 __hyp_vgic_restore_state(vcpu);
217 __timer_enable_traps(vcpu);
218
219 __debug_switch_to_guest(vcpu);
220
221 do {

--- 64 unchanged lines hidden ---