xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/hyp-main.c (revision 840d9a813c8eaa5c55d86525e374a97ca5023b53)
14e3393a9SAndrew Scull // SPDX-License-Identifier: GPL-2.0-only
24e3393a9SAndrew Scull /*
34e3393a9SAndrew Scull  * Copyright (C) 2020 - Google Inc
44e3393a9SAndrew Scull  * Author: Andrew Scull <ascull@google.com>
54e3393a9SAndrew Scull  */
64e3393a9SAndrew Scull 
7cc1e6fdfSMarc Zyngier #include <hyp/adjust_pc.h>
8*7d566962SMark Rutland #include <hyp/switch.h>
94e3393a9SAndrew Scull 
10f320bc74SQuentin Perret #include <asm/pgtable-types.h>
114e3393a9SAndrew Scull #include <asm/kvm_asm.h>
124e3393a9SAndrew Scull #include <asm/kvm_emulate.h>
134e3393a9SAndrew Scull #include <asm/kvm_host.h>
144e3393a9SAndrew Scull #include <asm/kvm_hyp.h>
154e3393a9SAndrew Scull #include <asm/kvm_mmu.h>
164e3393a9SAndrew Scull 
17048be5feSWill Deacon #include <nvhe/ffa.h>
181025c8c0SQuentin Perret #include <nvhe/mem_protect.h>
19f320bc74SQuentin Perret #include <nvhe/mm.h>
20a1ec5c70SFuad Tabba #include <nvhe/pkvm.h>
21eeeee719SDavid Brazdil #include <nvhe/trap_handler.h>
2205469831SAndrew Scull 
2363fec243SDavid Brazdil DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
2463fec243SDavid Brazdil 
25a805e1fbSDavid Brazdil void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
flush_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)26a805e1fbSDavid Brazdil 
27be66e67fSWill Deacon static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
28be66e67fSWill Deacon {
29be66e67fSWill Deacon 	struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
30be66e67fSWill Deacon 
31be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.ctxt	= host_vcpu->arch.ctxt;
32be66e67fSWill Deacon 
33be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.sve_state	= kern_hyp_va(host_vcpu->arch.sve_state);
34be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.sve_max_vl	= host_vcpu->arch.sve_max_vl;
35be66e67fSWill Deacon 
36be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.hw_mmu	= host_vcpu->arch.hw_mmu;
37be66e67fSWill Deacon 
38be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.hcr_el2	= host_vcpu->arch.hcr_el2;
39be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.mdcr_el2	= host_vcpu->arch.mdcr_el2;
40be66e67fSWill Deacon 
41be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.iflags	= host_vcpu->arch.iflags;
42be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.fp_state	= host_vcpu->arch.fp_state;
43be66e67fSWill Deacon 
44be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.debug_ptr	= kern_hyp_va(host_vcpu->arch.debug_ptr);
45be66e67fSWill Deacon 
46be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.vsesr_el2	= host_vcpu->arch.vsesr_el2;
47be66e67fSWill Deacon 
48be66e67fSWill Deacon 	hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
49be66e67fSWill Deacon }
50be66e67fSWill Deacon 
51be66e67fSWill Deacon static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
sync_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)52be66e67fSWill Deacon {
53be66e67fSWill Deacon 	struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
54be66e67fSWill Deacon 	struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
55be66e67fSWill Deacon 	struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
56be66e67fSWill Deacon 	unsigned int i;
57be66e67fSWill Deacon 
58be66e67fSWill Deacon 	host_vcpu->arch.ctxt		= hyp_vcpu->vcpu.arch.ctxt;
59be66e67fSWill Deacon 
60be66e67fSWill Deacon 	host_vcpu->arch.hcr_el2		= hyp_vcpu->vcpu.arch.hcr_el2;
61be66e67fSWill Deacon 
62be66e67fSWill Deacon 	host_vcpu->arch.fault		= hyp_vcpu->vcpu.arch.fault;
63be66e67fSWill Deacon 
64be66e67fSWill Deacon 	host_vcpu->arch.iflags		= hyp_vcpu->vcpu.arch.iflags;
65be66e67fSWill Deacon 	host_vcpu->arch.fp_state	= hyp_vcpu->vcpu.arch.fp_state;
66be66e67fSWill Deacon 
67be66e67fSWill Deacon 	host_cpu_if->vgic_hcr		= hyp_cpu_if->vgic_hcr;
68be66e67fSWill Deacon 	for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
69be66e67fSWill Deacon 		host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
70be66e67fSWill Deacon }
71be66e67fSWill Deacon 
727cd0aaafSMarc Zyngier static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
7305469831SAndrew Scull {
handle___kvm_vcpu_run(struct kvm_cpu_context * host_ctxt)74be66e67fSWill Deacon 	DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
75be66e67fSWill Deacon 	int ret;
7605469831SAndrew Scull 
77be66e67fSWill Deacon 	host_vcpu = kern_hyp_va(host_vcpu);
78be66e67fSWill Deacon 
79be66e67fSWill Deacon 	if (unlikely(is_protected_kvm_enabled())) {
80be66e67fSWill Deacon 		struct pkvm_hyp_vcpu *hyp_vcpu;
81be66e67fSWill Deacon 		struct kvm *host_kvm;
82be66e67fSWill Deacon 
83be66e67fSWill Deacon 		host_kvm = kern_hyp_va(host_vcpu->kvm);
84be66e67fSWill Deacon 		hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
85be66e67fSWill Deacon 					      host_vcpu->vcpu_idx);
86be66e67fSWill Deacon 		if (!hyp_vcpu) {
87be66e67fSWill Deacon 			ret = -EINVAL;
88be66e67fSWill Deacon 			goto out;
89be66e67fSWill Deacon 		}
90be66e67fSWill Deacon 
91be66e67fSWill Deacon 		flush_hyp_vcpu(hyp_vcpu);
92be66e67fSWill Deacon 
93be66e67fSWill Deacon 		ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
94be66e67fSWill Deacon 
95be66e67fSWill Deacon 		sync_hyp_vcpu(hyp_vcpu);
96be66e67fSWill Deacon 		pkvm_put_hyp_vcpu(hyp_vcpu);
97be66e67fSWill Deacon 	} else {
98be66e67fSWill Deacon 		/* The host is fully trusted, run its vCPU directly. */
99*7d566962SMark Rutland 		fpsimd_lazy_switch_to_guest(host_vcpu);
100be66e67fSWill Deacon 		ret = __kvm_vcpu_run(host_vcpu);
101*7d566962SMark Rutland 		fpsimd_lazy_switch_to_host(host_vcpu);
102be66e67fSWill Deacon 	}
103be66e67fSWill Deacon 
104be66e67fSWill Deacon out:
105be66e67fSWill Deacon 	cpu_reg(host_ctxt, 1) =  ret;
10605469831SAndrew Scull }
1077cd0aaafSMarc Zyngier 
handle___kvm_adjust_pc(struct kvm_cpu_context * host_ctxt)10826778aaaSMarc Zyngier static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
10926778aaaSMarc Zyngier {
11026778aaaSMarc Zyngier 	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
11126778aaaSMarc Zyngier 
11226778aaaSMarc Zyngier 	__kvm_adjust_pc(kern_hyp_va(vcpu));
11326778aaaSMarc Zyngier }
11426778aaaSMarc Zyngier 
handle___kvm_flush_vm_context(struct kvm_cpu_context * host_ctxt)1157cd0aaafSMarc Zyngier static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
1167cd0aaafSMarc Zyngier {
11705469831SAndrew Scull 	__kvm_flush_vm_context();
1187cd0aaafSMarc Zyngier }
1197cd0aaafSMarc Zyngier 
handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context * host_ctxt)1207cd0aaafSMarc Zyngier static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
1217cd0aaafSMarc Zyngier {
1227cd0aaafSMarc Zyngier 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
1237cd0aaafSMarc Zyngier 	DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
1247cd0aaafSMarc Zyngier 	DECLARE_REG(int, level, host_ctxt, 3);
12505469831SAndrew Scull 
126a071261dSAndrew Scull 	__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
12705469831SAndrew Scull }
1287cd0aaafSMarc Zyngier 
handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context * host_ctxt)129a12ab137SMarc Zyngier static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctxt)
130a12ab137SMarc Zyngier {
131a12ab137SMarc Zyngier 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
132a12ab137SMarc Zyngier 	DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
133a12ab137SMarc Zyngier 	DECLARE_REG(int, level, host_ctxt, 3);
134a12ab137SMarc Zyngier 
135a12ab137SMarc Zyngier 	__kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level);
136a12ab137SMarc Zyngier }
137a12ab137SMarc Zyngier 
1386354d150SRaghavendra Rao Ananta static void
handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context * host_ctxt)1396354d150SRaghavendra Rao Ananta handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt)
1406354d150SRaghavendra Rao Ananta {
1416354d150SRaghavendra Rao Ananta 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
1426354d150SRaghavendra Rao Ananta 	DECLARE_REG(phys_addr_t, start, host_ctxt, 2);
1436354d150SRaghavendra Rao Ananta 	DECLARE_REG(unsigned long, pages, host_ctxt, 3);
1446354d150SRaghavendra Rao Ananta 
1456354d150SRaghavendra Rao Ananta 	__kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages);
1466354d150SRaghavendra Rao Ananta }
1476354d150SRaghavendra Rao Ananta 
handle___kvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt)1487cd0aaafSMarc Zyngier static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
1497cd0aaafSMarc Zyngier {
1507cd0aaafSMarc Zyngier 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
15105469831SAndrew Scull 
152a071261dSAndrew Scull 	__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
15305469831SAndrew Scull }
1547cd0aaafSMarc Zyngier 
handle___kvm_flush_cpu_context(struct kvm_cpu_context * host_ctxt)15501dc9262SMarc Zyngier static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
1567cd0aaafSMarc Zyngier {
1577cd0aaafSMarc Zyngier 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
15805469831SAndrew Scull 
15901dc9262SMarc Zyngier 	__kvm_flush_cpu_context(kern_hyp_va(mmu));
16005469831SAndrew Scull }
16105469831SAndrew Scull 
handle___kvm_timer_set_cntvoff(struct kvm_cpu_context * host_ctxt)1627cd0aaafSMarc Zyngier static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
1637cd0aaafSMarc Zyngier {
1647cd0aaafSMarc Zyngier 	__kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
16505469831SAndrew Scull }
1667cd0aaafSMarc Zyngier 
handle___kvm_enable_ssbs(struct kvm_cpu_context * host_ctxt)1677cd0aaafSMarc Zyngier static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
1687cd0aaafSMarc Zyngier {
16929052f1bSMarc Zyngier 	u64 tmp;
17029052f1bSMarc Zyngier 
17129052f1bSMarc Zyngier 	tmp = read_sysreg_el2(SYS_SCTLR);
17229052f1bSMarc Zyngier 	tmp |= SCTLR_ELx_DSSBS;
17329052f1bSMarc Zyngier 	write_sysreg_el2(tmp, SYS_SCTLR);
17405469831SAndrew Scull }
1757cd0aaafSMarc Zyngier 
handle___vgic_v3_get_gic_config(struct kvm_cpu_context * host_ctxt)176b9d699e2SMarc Zyngier static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
1777cd0aaafSMarc Zyngier {
178b9d699e2SMarc Zyngier 	cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
1797cd0aaafSMarc Zyngier }
1807cd0aaafSMarc Zyngier 
handle___vgic_v3_read_vmcr(struct kvm_cpu_context * host_ctxt)1817cd0aaafSMarc Zyngier static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
1827cd0aaafSMarc Zyngier {
1837cd0aaafSMarc Zyngier 	cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
1847cd0aaafSMarc Zyngier }
1857cd0aaafSMarc Zyngier 
handle___vgic_v3_write_vmcr(struct kvm_cpu_context * host_ctxt)1867cd0aaafSMarc Zyngier static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
1877cd0aaafSMarc Zyngier {
1887cd0aaafSMarc Zyngier 	__vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
1897cd0aaafSMarc Zyngier }
1907cd0aaafSMarc Zyngier 
handle___vgic_v3_init_lrs(struct kvm_cpu_context * host_ctxt)1917cd0aaafSMarc Zyngier static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
1927cd0aaafSMarc Zyngier {
19305469831SAndrew Scull 	__vgic_v3_init_lrs();
1947cd0aaafSMarc Zyngier }
1957cd0aaafSMarc Zyngier 
handle___kvm_get_mdcr_el2(struct kvm_cpu_context * host_ctxt)1967cd0aaafSMarc Zyngier static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
1977cd0aaafSMarc Zyngier {
1987cd0aaafSMarc Zyngier 	cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
1997cd0aaafSMarc Zyngier }
2007cd0aaafSMarc Zyngier 
handle___vgic_v3_save_aprs(struct kvm_cpu_context * host_ctxt)2017cd0aaafSMarc Zyngier static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
2027cd0aaafSMarc Zyngier {
2037cd0aaafSMarc Zyngier 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
20405469831SAndrew Scull 
205a071261dSAndrew Scull 	__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
20605469831SAndrew Scull }
2077cd0aaafSMarc Zyngier 
handle___vgic_v3_restore_aprs(struct kvm_cpu_context * host_ctxt)2087cd0aaafSMarc Zyngier static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
2097cd0aaafSMarc Zyngier {
2107cd0aaafSMarc Zyngier 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
21105469831SAndrew Scull 
212a071261dSAndrew Scull 	__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
21305469831SAndrew Scull }
21405469831SAndrew Scull 
handle___pkvm_init(struct kvm_cpu_context * host_ctxt)215f320bc74SQuentin Perret static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
216f320bc74SQuentin Perret {
217f320bc74SQuentin Perret 	DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
218f320bc74SQuentin Perret 	DECLARE_REG(unsigned long, size, host_ctxt, 2);
219f320bc74SQuentin Perret 	DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
220f320bc74SQuentin Perret 	DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
221f320bc74SQuentin Perret 	DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
222f320bc74SQuentin Perret 
223f320bc74SQuentin Perret 	/*
224f320bc74SQuentin Perret 	 * __pkvm_init() will return only if an error occurred, otherwise it
225f320bc74SQuentin Perret 	 * will tail-call in __pkvm_init_finalise() which will have to deal
226f320bc74SQuentin Perret 	 * with the host context directly.
227f320bc74SQuentin Perret 	 */
228f320bc74SQuentin Perret 	cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
229f320bc74SQuentin Perret 					    hyp_va_bits);
230f320bc74SQuentin Perret }
231f320bc74SQuentin Perret 
handle___pkvm_cpu_set_vector(struct kvm_cpu_context * host_ctxt)232f320bc74SQuentin Perret static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
233f320bc74SQuentin Perret {
234f320bc74SQuentin Perret 	DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
235f320bc74SQuentin Perret 
236f320bc74SQuentin Perret 	cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
237f320bc74SQuentin Perret }
238f320bc74SQuentin Perret 
handle___pkvm_host_share_hyp(struct kvm_cpu_context * host_ctxt)23966c57eddSQuentin Perret static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
240f320bc74SQuentin Perret {
24166c57eddSQuentin Perret 	DECLARE_REG(u64, pfn, host_ctxt, 1);
242f320bc74SQuentin Perret 
24366c57eddSQuentin Perret 	cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
244f320bc74SQuentin Perret }
245f320bc74SQuentin Perret 
handle___pkvm_host_unshare_hyp(struct kvm_cpu_context * host_ctxt)246b8cc6eb5SWill Deacon static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
247b8cc6eb5SWill Deacon {
248b8cc6eb5SWill Deacon 	DECLARE_REG(u64, pfn, host_ctxt, 1);
249b8cc6eb5SWill Deacon 
250b8cc6eb5SWill Deacon 	cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
251b8cc6eb5SWill Deacon }
252b8cc6eb5SWill Deacon 
handle___pkvm_create_private_mapping(struct kvm_cpu_context * host_ctxt)253f320bc74SQuentin Perret static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
254f320bc74SQuentin Perret {
255f320bc74SQuentin Perret 	DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
256f320bc74SQuentin Perret 	DECLARE_REG(size_t, size, host_ctxt, 2);
257f320bc74SQuentin Perret 	DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
258f320bc74SQuentin Perret 
259f922c13eSKalesh Singh 	/*
260f922c13eSKalesh Singh 	 * __pkvm_create_private_mapping() populates a pointer with the
261f922c13eSKalesh Singh 	 * hypervisor start address of the allocation.
262f922c13eSKalesh Singh 	 *
263f922c13eSKalesh Singh 	 * However, handle___pkvm_create_private_mapping() hypercall crosses the
264f922c13eSKalesh Singh 	 * EL1/EL2 boundary so the pointer would not be valid in this context.
265f922c13eSKalesh Singh 	 *
266f922c13eSKalesh Singh 	 * Instead pass the allocation address as the return value (or return
267f922c13eSKalesh Singh 	 * ERR_PTR() on failure).
268f922c13eSKalesh Singh 	 */
269f922c13eSKalesh Singh 	unsigned long haddr;
270f922c13eSKalesh Singh 	int err = __pkvm_create_private_mapping(phys, size, prot, &haddr);
271f922c13eSKalesh Singh 
272f922c13eSKalesh Singh 	if (err)
273f922c13eSKalesh Singh 		haddr = (unsigned long)ERR_PTR(err);
274f922c13eSKalesh Singh 
275f922c13eSKalesh Singh 	cpu_reg(host_ctxt, 1) = haddr;
276f320bc74SQuentin Perret }
277f320bc74SQuentin Perret 
handle___pkvm_prot_finalize(struct kvm_cpu_context * host_ctxt)2781025c8c0SQuentin Perret static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
2791025c8c0SQuentin Perret {
2801025c8c0SQuentin Perret 	cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
2811025c8c0SQuentin Perret }
2822a0c3433SFuad Tabba 
handle___pkvm_vcpu_init_traps(struct kvm_cpu_context * host_ctxt)2832a0c3433SFuad Tabba static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
2842a0c3433SFuad Tabba {
2852a0c3433SFuad Tabba 	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
2862a0c3433SFuad Tabba 
2872a0c3433SFuad Tabba 	__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
2882a0c3433SFuad Tabba }
2892a0c3433SFuad Tabba 
handle___pkvm_init_vm(struct kvm_cpu_context * host_ctxt)290a1ec5c70SFuad Tabba static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
291a1ec5c70SFuad Tabba {
292a1ec5c70SFuad Tabba 	DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
293a1ec5c70SFuad Tabba 	DECLARE_REG(unsigned long, vm_hva, host_ctxt, 2);
294a1ec5c70SFuad Tabba 	DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 3);
295a1ec5c70SFuad Tabba 
296a1ec5c70SFuad Tabba 	host_kvm = kern_hyp_va(host_kvm);
297a1ec5c70SFuad Tabba 	cpu_reg(host_ctxt, 1) = __pkvm_init_vm(host_kvm, vm_hva, pgd_hva);
298a1ec5c70SFuad Tabba }
299a1ec5c70SFuad Tabba 
handle___pkvm_init_vcpu(struct kvm_cpu_context * host_ctxt)300a1ec5c70SFuad Tabba static void handle___pkvm_init_vcpu(struct kvm_cpu_context *host_ctxt)
301a1ec5c70SFuad Tabba {
302a1ec5c70SFuad Tabba 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
303a1ec5c70SFuad Tabba 	DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2);
304a1ec5c70SFuad Tabba 	DECLARE_REG(unsigned long, vcpu_hva, host_ctxt, 3);
305a1ec5c70SFuad Tabba 
306a1ec5c70SFuad Tabba 	host_vcpu = kern_hyp_va(host_vcpu);
307a1ec5c70SFuad Tabba 	cpu_reg(host_ctxt, 1) = __pkvm_init_vcpu(handle, host_vcpu, vcpu_hva);
308a1ec5c70SFuad Tabba }
309a1ec5c70SFuad Tabba 
handle___pkvm_teardown_vm(struct kvm_cpu_context * host_ctxt)310a1ec5c70SFuad Tabba static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
311a1ec5c70SFuad Tabba {
312a1ec5c70SFuad Tabba 	DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
313a1ec5c70SFuad Tabba 
314a1ec5c70SFuad Tabba 	cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
315a1ec5c70SFuad Tabba }
316a1ec5c70SFuad Tabba 
3177cd0aaafSMarc Zyngier typedef void (*hcall_t)(struct kvm_cpu_context *);
3187cd0aaafSMarc Zyngier 
319537db4afSDavid Brazdil #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
3207cd0aaafSMarc Zyngier 
321537db4afSDavid Brazdil static const hcall_t host_hcall[] = {
322057bed20SWill Deacon 	/* ___kvm_hyp_init */
323057bed20SWill Deacon 	HANDLE_FUNC(__kvm_get_mdcr_el2),
324057bed20SWill Deacon 	HANDLE_FUNC(__pkvm_init),
325057bed20SWill Deacon 	HANDLE_FUNC(__pkvm_create_private_mapping),
326057bed20SWill Deacon 	HANDLE_FUNC(__pkvm_cpu_set_vector),
327057bed20SWill Deacon 	HANDLE_FUNC(__kvm_enable_ssbs),
328057bed20SWill Deacon 	HANDLE_FUNC(__vgic_v3_init_lrs),
329057bed20SWill Deacon 	HANDLE_FUNC(__vgic_v3_get_gic_config),
330057bed20SWill Deacon 	HANDLE_FUNC(__pkvm_prot_finalize),
331057bed20SWill Deacon 
332057bed20SWill Deacon 	HANDLE_FUNC(__pkvm_host_share_hyp),
333b8cc6eb5SWill Deacon 	HANDLE_FUNC(__pkvm_host_unshare_hyp),
33426778aaaSMarc Zyngier 	HANDLE_FUNC(__kvm_adjust_pc),
335057bed20SWill Deacon 	HANDLE_FUNC(__kvm_vcpu_run),
3367cd0aaafSMarc Zyngier 	HANDLE_FUNC(__kvm_flush_vm_context),
3377cd0aaafSMarc Zyngier 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
338a12ab137SMarc Zyngier 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh),
3397cd0aaafSMarc Zyngier 	HANDLE_FUNC(__kvm_tlb_flush_vmid),
3406354d150SRaghavendra Rao Ananta 	HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
34101dc9262SMarc Zyngier 	HANDLE_FUNC(__kvm_flush_cpu_context),
3427cd0aaafSMarc Zyngier 	HANDLE_FUNC(__kvm_timer_set_cntvoff),
3437cd0aaafSMarc Zyngier 	HANDLE_FUNC(__vgic_v3_read_vmcr),
3447cd0aaafSMarc Zyngier 	HANDLE_FUNC(__vgic_v3_write_vmcr),
3457cd0aaafSMarc Zyngier 	HANDLE_FUNC(__vgic_v3_save_aprs),
3467cd0aaafSMarc Zyngier 	HANDLE_FUNC(__vgic_v3_restore_aprs),
3472a0c3433SFuad Tabba 	HANDLE_FUNC(__pkvm_vcpu_init_traps),
348a1ec5c70SFuad Tabba 	HANDLE_FUNC(__pkvm_init_vm),
349a1ec5c70SFuad Tabba 	HANDLE_FUNC(__pkvm_init_vcpu),
350a1ec5c70SFuad Tabba 	HANDLE_FUNC(__pkvm_teardown_vm),
3517cd0aaafSMarc Zyngier };
3527cd0aaafSMarc Zyngier 
handle_host_hcall(struct kvm_cpu_context * host_ctxt)3537cd0aaafSMarc Zyngier static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
3547cd0aaafSMarc Zyngier {
3557cd0aaafSMarc Zyngier 	DECLARE_REG(unsigned long, id, host_ctxt, 0);
356057bed20SWill Deacon 	unsigned long hcall_min = 0;
3577cd0aaafSMarc Zyngier 	hcall_t hfn;
3587cd0aaafSMarc Zyngier 
359057bed20SWill Deacon 	/*
360057bed20SWill Deacon 	 * If pKVM has been initialised then reject any calls to the
361057bed20SWill Deacon 	 * early "privileged" hypercalls. Note that we cannot reject
362057bed20SWill Deacon 	 * calls to __pkvm_prot_finalize for two reasons: (1) The static
363057bed20SWill Deacon 	 * key used to determine initialisation must be toggled prior to
364057bed20SWill Deacon 	 * finalisation and (2) finalisation is performed on a per-CPU
365057bed20SWill Deacon 	 * basis. This is all fine, however, since __pkvm_prot_finalize
366057bed20SWill Deacon 	 * returns -EPERM after the first call for a given CPU.
367057bed20SWill Deacon 	 */
368057bed20SWill Deacon 	if (static_branch_unlikely(&kvm_protected_mode_initialized))
369057bed20SWill Deacon 		hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
370057bed20SWill Deacon 
371373beef0SJean-Philippe Brucker 	id &= ~ARM_SMCCC_CALL_HINTS;
3727cd0aaafSMarc Zyngier 	id -= KVM_HOST_SMCCC_ID(0);
3737cd0aaafSMarc Zyngier 
374057bed20SWill Deacon 	if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
3757cd0aaafSMarc Zyngier 		goto inval;
3767cd0aaafSMarc Zyngier 
377537db4afSDavid Brazdil 	hfn = host_hcall[id];
378537db4afSDavid Brazdil 	if (unlikely(!hfn))
3797cd0aaafSMarc Zyngier 		goto inval;
3807cd0aaafSMarc Zyngier 
3817cd0aaafSMarc Zyngier 	cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
3827cd0aaafSMarc Zyngier 	hfn(host_ctxt);
3837cd0aaafSMarc Zyngier 
3847cd0aaafSMarc Zyngier 	return;
3857cd0aaafSMarc Zyngier inval:
3867cd0aaafSMarc Zyngier 	cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
38705469831SAndrew Scull }
3884e3393a9SAndrew Scull 
default_host_smc_handler(struct kvm_cpu_context * host_ctxt)389a805e1fbSDavid Brazdil static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
390a805e1fbSDavid Brazdil {
391a805e1fbSDavid Brazdil 	__kvm_hyp_host_forward_smc(host_ctxt);
392a805e1fbSDavid Brazdil }
393a805e1fbSDavid Brazdil 
handle_host_smc(struct kvm_cpu_context * host_ctxt)394a805e1fbSDavid Brazdil static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
395a805e1fbSDavid Brazdil {
396373beef0SJean-Philippe Brucker 	DECLARE_REG(u64, func_id, host_ctxt, 0);
397eeeee719SDavid Brazdil 	bool handled;
398eeeee719SDavid Brazdil 
399373beef0SJean-Philippe Brucker 	func_id &= ~ARM_SMCCC_CALL_HINTS;
400373beef0SJean-Philippe Brucker 
401373beef0SJean-Philippe Brucker 	handled = kvm_host_psci_handler(host_ctxt, func_id);
402eeeee719SDavid Brazdil 	if (!handled)
403373beef0SJean-Philippe Brucker 		handled = kvm_host_ffa_handler(host_ctxt, func_id);
404048be5feSWill Deacon 	if (!handled)
405a805e1fbSDavid Brazdil 		default_host_smc_handler(host_ctxt);
406a805e1fbSDavid Brazdil 
407860a4c3dSDavid Brazdil 	/* SMC was trapped, move ELR past the current PC. */
408860a4c3dSDavid Brazdil 	kvm_skip_host_instr();
409a805e1fbSDavid Brazdil }
410a805e1fbSDavid Brazdil 
handle_trap(struct kvm_cpu_context * host_ctxt)4114e3393a9SAndrew Scull void handle_trap(struct kvm_cpu_context *host_ctxt)
4124e3393a9SAndrew Scull {
4134e3393a9SAndrew Scull 	u64 esr = read_sysreg_el2(SYS_ESR);
4144e3393a9SAndrew Scull 
415a805e1fbSDavid Brazdil 	switch (ESR_ELx_EC(esr)) {
416a805e1fbSDavid Brazdil 	case ESR_ELx_EC_HVC64:
4177cd0aaafSMarc Zyngier 		handle_host_hcall(host_ctxt);
418a805e1fbSDavid Brazdil 		break;
419a805e1fbSDavid Brazdil 	case ESR_ELx_EC_SMC64:
420a805e1fbSDavid Brazdil 		handle_host_smc(host_ctxt);
421a805e1fbSDavid Brazdil 		break;
4221025c8c0SQuentin Perret 	case ESR_ELx_EC_IABT_LOW:
4231025c8c0SQuentin Perret 	case ESR_ELx_EC_DABT_LOW:
4241025c8c0SQuentin Perret 		handle_host_mem_abort(host_ctxt);
4251025c8c0SQuentin Perret 		break;
426a805e1fbSDavid Brazdil 	default:
427f79e616fSAndrew Scull 		BUG();
428a805e1fbSDavid Brazdil 	}
4294e3393a9SAndrew Scull }
430