xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/hyp-main.c (revision cd6d421e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - Google Inc
4  * Author: Andrew Scull <ascull@google.com>
5  */
6 
7 #include <hyp/switch.h>
8 
9 #include <asm/kvm_asm.h>
10 #include <asm/kvm_emulate.h>
11 #include <asm/kvm_host.h>
12 #include <asm/kvm_hyp.h>
13 #include <asm/kvm_mmu.h>
14 
15 #include <nvhe/trap_handler.h>
16 
17 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
18 
19 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
20 
21 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
22 {
23 	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
24 
25 	cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
26 }
27 
28 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
29 {
30 	__kvm_flush_vm_context();
31 }
32 
33 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
34 {
35 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
36 	DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
37 	DECLARE_REG(int, level, host_ctxt, 3);
38 
39 	__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
40 }
41 
42 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
43 {
44 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
45 
46 	__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
47 }
48 
49 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
50 {
51 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
52 
53 	__kvm_flush_cpu_context(kern_hyp_va(mmu));
54 }
55 
56 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
57 {
58 	__kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
59 }
60 
61 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
62 {
63 	u64 tmp;
64 
65 	tmp = read_sysreg_el2(SYS_SCTLR);
66 	tmp |= SCTLR_ELx_DSSBS;
67 	write_sysreg_el2(tmp, SYS_SCTLR);
68 }
69 
70 static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
71 {
72 	cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
73 }
74 
75 static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
76 {
77 	cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
78 }
79 
80 static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
81 {
82 	__vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
83 }
84 
85 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
86 {
87 	__vgic_v3_init_lrs();
88 }
89 
90 static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
91 {
92 	cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
93 }
94 
95 static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
96 {
97 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
98 
99 	__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
100 }
101 
102 static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
103 {
104 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
105 
106 	__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
107 }
108 
109 typedef void (*hcall_t)(struct kvm_cpu_context *);
110 
111 #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
112 
113 static const hcall_t host_hcall[] = {
114 	HANDLE_FUNC(__kvm_vcpu_run),
115 	HANDLE_FUNC(__kvm_flush_vm_context),
116 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
117 	HANDLE_FUNC(__kvm_tlb_flush_vmid),
118 	HANDLE_FUNC(__kvm_flush_cpu_context),
119 	HANDLE_FUNC(__kvm_timer_set_cntvoff),
120 	HANDLE_FUNC(__kvm_enable_ssbs),
121 	HANDLE_FUNC(__vgic_v3_get_gic_config),
122 	HANDLE_FUNC(__vgic_v3_read_vmcr),
123 	HANDLE_FUNC(__vgic_v3_write_vmcr),
124 	HANDLE_FUNC(__vgic_v3_init_lrs),
125 	HANDLE_FUNC(__kvm_get_mdcr_el2),
126 	HANDLE_FUNC(__vgic_v3_save_aprs),
127 	HANDLE_FUNC(__vgic_v3_restore_aprs),
128 };
129 
130 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
131 {
132 	DECLARE_REG(unsigned long, id, host_ctxt, 0);
133 	hcall_t hfn;
134 
135 	id -= KVM_HOST_SMCCC_ID(0);
136 
137 	if (unlikely(id >= ARRAY_SIZE(host_hcall)))
138 		goto inval;
139 
140 	hfn = host_hcall[id];
141 	if (unlikely(!hfn))
142 		goto inval;
143 
144 	cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
145 	hfn(host_ctxt);
146 
147 	return;
148 inval:
149 	cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
150 }
151 
152 static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
153 {
154 	__kvm_hyp_host_forward_smc(host_ctxt);
155 }
156 
157 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
158 {
159 	bool handled;
160 
161 	handled = kvm_host_psci_handler(host_ctxt);
162 	if (!handled)
163 		default_host_smc_handler(host_ctxt);
164 
165 	/* SMC was trapped, move ELR past the current PC. */
166 	kvm_skip_host_instr();
167 }
168 
169 void handle_trap(struct kvm_cpu_context *host_ctxt)
170 {
171 	u64 esr = read_sysreg_el2(SYS_ESR);
172 
173 	switch (ESR_ELx_EC(esr)) {
174 	case ESR_ELx_EC_HVC64:
175 		handle_host_hcall(host_ctxt);
176 		break;
177 	case ESR_ELx_EC_SMC64:
178 		handle_host_smc(host_ctxt);
179 		break;
180 	default:
181 		hyp_panic();
182 	}
183 }
184