xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/hyp-main.c (revision 7b7090b4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - Google Inc
4  * Author: Andrew Scull <ascull@google.com>
5  */
6 
7 #include <hyp/adjust_pc.h>
8 
9 #include <asm/pgtable-types.h>
10 #include <asm/kvm_asm.h>
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_host.h>
13 #include <asm/kvm_hyp.h>
14 #include <asm/kvm_mmu.h>
15 
16 #include <nvhe/mem_protect.h>
17 #include <nvhe/mm.h>
18 #include <nvhe/trap_handler.h>
19 
20 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
21 
22 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
23 
24 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
25 {
26 	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
27 
28 	cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
29 }
30 
31 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
32 {
33 	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
34 
35 	__kvm_adjust_pc(kern_hyp_va(vcpu));
36 }
37 
38 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
39 {
40 	__kvm_flush_vm_context();
41 }
42 
43 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
44 {
45 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
46 	DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
47 	DECLARE_REG(int, level, host_ctxt, 3);
48 
49 	__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
50 }
51 
52 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
53 {
54 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
55 
56 	__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
57 }
58 
59 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
60 {
61 	DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
62 
63 	__kvm_flush_cpu_context(kern_hyp_va(mmu));
64 }
65 
66 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
67 {
68 	__kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
69 }
70 
71 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
72 {
73 	u64 tmp;
74 
75 	tmp = read_sysreg_el2(SYS_SCTLR);
76 	tmp |= SCTLR_ELx_DSSBS;
77 	write_sysreg_el2(tmp, SYS_SCTLR);
78 }
79 
80 static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
81 {
82 	cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
83 }
84 
85 static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
86 {
87 	cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
88 }
89 
90 static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
91 {
92 	__vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
93 }
94 
95 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
96 {
97 	__vgic_v3_init_lrs();
98 }
99 
100 static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
101 {
102 	cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
103 }
104 
105 static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
106 {
107 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
108 
109 	__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
110 }
111 
112 static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
113 {
114 	DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
115 
116 	__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
117 }
118 
119 static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
120 {
121 	DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
122 	DECLARE_REG(unsigned long, size, host_ctxt, 2);
123 	DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
124 	DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
125 	DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
126 
127 	/*
128 	 * __pkvm_init() will return only if an error occurred, otherwise it
129 	 * will tail-call in __pkvm_init_finalise() which will have to deal
130 	 * with the host context directly.
131 	 */
132 	cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
133 					    hyp_va_bits);
134 }
135 
136 static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
137 {
138 	DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
139 
140 	cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
141 }
142 
143 static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
144 {
145 	DECLARE_REG(u64, pfn, host_ctxt, 1);
146 
147 	cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
148 }
149 
150 static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
151 {
152 	DECLARE_REG(u64, pfn, host_ctxt, 1);
153 
154 	cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
155 }
156 
157 static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
158 {
159 	DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
160 	DECLARE_REG(size_t, size, host_ctxt, 2);
161 	DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
162 
163 	/*
164 	 * __pkvm_create_private_mapping() populates a pointer with the
165 	 * hypervisor start address of the allocation.
166 	 *
167 	 * However, handle___pkvm_create_private_mapping() hypercall crosses the
168 	 * EL1/EL2 boundary so the pointer would not be valid in this context.
169 	 *
170 	 * Instead pass the allocation address as the return value (or return
171 	 * ERR_PTR() on failure).
172 	 */
173 	unsigned long haddr;
174 	int err = __pkvm_create_private_mapping(phys, size, prot, &haddr);
175 
176 	if (err)
177 		haddr = (unsigned long)ERR_PTR(err);
178 
179 	cpu_reg(host_ctxt, 1) = haddr;
180 }
181 
182 static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
183 {
184 	cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
185 }
186 
187 static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
188 {
189 	DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
190 
191 	__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
192 }
193 
194 typedef void (*hcall_t)(struct kvm_cpu_context *);
195 
196 #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
197 
198 static const hcall_t host_hcall[] = {
199 	/* ___kvm_hyp_init */
200 	HANDLE_FUNC(__kvm_get_mdcr_el2),
201 	HANDLE_FUNC(__pkvm_init),
202 	HANDLE_FUNC(__pkvm_create_private_mapping),
203 	HANDLE_FUNC(__pkvm_cpu_set_vector),
204 	HANDLE_FUNC(__kvm_enable_ssbs),
205 	HANDLE_FUNC(__vgic_v3_init_lrs),
206 	HANDLE_FUNC(__vgic_v3_get_gic_config),
207 	HANDLE_FUNC(__pkvm_prot_finalize),
208 
209 	HANDLE_FUNC(__pkvm_host_share_hyp),
210 	HANDLE_FUNC(__pkvm_host_unshare_hyp),
211 	HANDLE_FUNC(__kvm_adjust_pc),
212 	HANDLE_FUNC(__kvm_vcpu_run),
213 	HANDLE_FUNC(__kvm_flush_vm_context),
214 	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
215 	HANDLE_FUNC(__kvm_tlb_flush_vmid),
216 	HANDLE_FUNC(__kvm_flush_cpu_context),
217 	HANDLE_FUNC(__kvm_timer_set_cntvoff),
218 	HANDLE_FUNC(__vgic_v3_read_vmcr),
219 	HANDLE_FUNC(__vgic_v3_write_vmcr),
220 	HANDLE_FUNC(__vgic_v3_save_aprs),
221 	HANDLE_FUNC(__vgic_v3_restore_aprs),
222 	HANDLE_FUNC(__pkvm_vcpu_init_traps),
223 };
224 
225 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
226 {
227 	DECLARE_REG(unsigned long, id, host_ctxt, 0);
228 	unsigned long hcall_min = 0;
229 	hcall_t hfn;
230 
231 	/*
232 	 * If pKVM has been initialised then reject any calls to the
233 	 * early "privileged" hypercalls. Note that we cannot reject
234 	 * calls to __pkvm_prot_finalize for two reasons: (1) The static
235 	 * key used to determine initialisation must be toggled prior to
236 	 * finalisation and (2) finalisation is performed on a per-CPU
237 	 * basis. This is all fine, however, since __pkvm_prot_finalize
238 	 * returns -EPERM after the first call for a given CPU.
239 	 */
240 	if (static_branch_unlikely(&kvm_protected_mode_initialized))
241 		hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
242 
243 	id -= KVM_HOST_SMCCC_ID(0);
244 
245 	if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
246 		goto inval;
247 
248 	hfn = host_hcall[id];
249 	if (unlikely(!hfn))
250 		goto inval;
251 
252 	cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
253 	hfn(host_ctxt);
254 
255 	return;
256 inval:
257 	cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
258 }
259 
260 static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
261 {
262 	__kvm_hyp_host_forward_smc(host_ctxt);
263 }
264 
265 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
266 {
267 	bool handled;
268 
269 	handled = kvm_host_psci_handler(host_ctxt);
270 	if (!handled)
271 		default_host_smc_handler(host_ctxt);
272 
273 	/* SMC was trapped, move ELR past the current PC. */
274 	kvm_skip_host_instr();
275 }
276 
277 void handle_trap(struct kvm_cpu_context *host_ctxt)
278 {
279 	u64 esr = read_sysreg_el2(SYS_ESR);
280 
281 	switch (ESR_ELx_EC(esr)) {
282 	case ESR_ELx_EC_HVC64:
283 		handle_host_hcall(host_ctxt);
284 		break;
285 	case ESR_ELx_EC_SMC64:
286 		handle_host_smc(host_ctxt);
287 		break;
288 	case ESR_ELx_EC_SVE:
289 		sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
290 		isb();
291 		sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
292 		break;
293 	case ESR_ELx_EC_IABT_LOW:
294 	case ESR_ELx_EC_DABT_LOW:
295 		handle_host_mem_abort(host_ctxt);
296 		break;
297 	default:
298 		BUG();
299 	}
300 }
301