xref: /openbmc/linux/arch/arm64/include/asm/kvm_host.h (revision 83c4a4ee)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13 
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <asm/arch_gicv3.h>
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/daifflags.h>
25 #include <asm/fpsimd.h>
26 #include <asm/kvm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/thread_info.h>
29 
30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
31 
32 #define KVM_USER_MEM_SLOTS 512
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34 
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38 
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40 
41 #define KVM_VCPU_MAX_FEATURES 7
42 
43 #define KVM_REQ_SLEEP \
44 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
47 #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
48 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
49 
50 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51 				     KVM_DIRTY_LOG_INITIALLY_SET)
52 
53 /*
54  * Mode of operation configurable with kvm-arm.mode early param.
55  * See Documentation/admin-guide/kernel-parameters.txt for more information.
56  */
57 enum kvm_mode {
58 	KVM_MODE_DEFAULT,
59 	KVM_MODE_PROTECTED,
60 };
61 enum kvm_mode kvm_get_mode(void);
62 
63 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
64 
65 extern unsigned int kvm_sve_max_vl;
66 int kvm_arm_init_sve(void);
67 
68 int __attribute_const__ kvm_target_cpu(void);
69 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
70 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
71 
72 struct kvm_vmid {
73 	/* The VMID generation used for the virt. memory system */
74 	u64    vmid_gen;
75 	u32    vmid;
76 };
77 
78 struct kvm_s2_mmu {
79 	struct kvm_vmid vmid;
80 
81 	/*
82 	 * stage2 entry level table
83 	 *
84 	 * Two kvm_s2_mmu structures in the same VM can point to the same
85 	 * pgd here.  This happens when running a guest using a
86 	 * translation regime that isn't affected by its own stage-2
87 	 * translation, such as a non-VHE hypervisor running at vEL2, or
88 	 * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
89 	 * canonical stage-2 page tables.
90 	 */
91 	phys_addr_t	pgd_phys;
92 	struct kvm_pgtable *pgt;
93 
94 	/* The last vcpu id that ran on each physical CPU */
95 	int __percpu *last_vcpu_ran;
96 
97 	struct kvm *kvm;
98 };
99 
100 struct kvm_arch_memory_slot {
101 };
102 
103 struct kvm_arch {
104 	struct kvm_s2_mmu mmu;
105 
106 	/* VTCR_EL2 value for this VM */
107 	u64    vtcr;
108 
109 	/* The maximum number of vCPUs depends on the used GIC model */
110 	int max_vcpus;
111 
112 	/* Interrupt controller */
113 	struct vgic_dist	vgic;
114 
115 	/* Mandated version of PSCI */
116 	u32 psci_version;
117 
118 	/*
119 	 * If we encounter a data abort without valid instruction syndrome
120 	 * information, report this to user space.  User space can (and
121 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
122 	 * supported.
123 	 */
124 	bool return_nisv_io_abort_to_user;
125 
126 	/*
127 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
128 	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
129 	 */
130 	unsigned long *pmu_filter;
131 	unsigned int pmuver;
132 
133 	u8 pfr0_csv2;
134 	u8 pfr0_csv3;
135 };
136 
137 struct kvm_vcpu_fault_info {
138 	u32 esr_el2;		/* Hyp Syndrom Register */
139 	u64 far_el2;		/* Hyp Fault Address Register */
140 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
141 	u64 disr_el1;		/* Deferred [SError] Status Register */
142 };
143 
144 enum vcpu_sysreg {
145 	__INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
146 	MPIDR_EL1,	/* MultiProcessor Affinity Register */
147 	CSSELR_EL1,	/* Cache Size Selection Register */
148 	SCTLR_EL1,	/* System Control Register */
149 	ACTLR_EL1,	/* Auxiliary Control Register */
150 	CPACR_EL1,	/* Coprocessor Access Control */
151 	ZCR_EL1,	/* SVE Control */
152 	TTBR0_EL1,	/* Translation Table Base Register 0 */
153 	TTBR1_EL1,	/* Translation Table Base Register 1 */
154 	TCR_EL1,	/* Translation Control Register */
155 	ESR_EL1,	/* Exception Syndrome Register */
156 	AFSR0_EL1,	/* Auxiliary Fault Status Register 0 */
157 	AFSR1_EL1,	/* Auxiliary Fault Status Register 1 */
158 	FAR_EL1,	/* Fault Address Register */
159 	MAIR_EL1,	/* Memory Attribute Indirection Register */
160 	VBAR_EL1,	/* Vector Base Address Register */
161 	CONTEXTIDR_EL1,	/* Context ID Register */
162 	TPIDR_EL0,	/* Thread ID, User R/W */
163 	TPIDRRO_EL0,	/* Thread ID, User R/O */
164 	TPIDR_EL1,	/* Thread ID, Privileged */
165 	AMAIR_EL1,	/* Aux Memory Attribute Indirection Register */
166 	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
167 	PAR_EL1,	/* Physical Address Register */
168 	MDSCR_EL1,	/* Monitor Debug System Control Register */
169 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
170 	DISR_EL1,	/* Deferred Interrupt Status Register */
171 
172 	/* Performance Monitors Registers */
173 	PMCR_EL0,	/* Control Register */
174 	PMSELR_EL0,	/* Event Counter Selection Register */
175 	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
176 	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
177 	PMCCNTR_EL0,	/* Cycle Counter Register */
178 	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
179 	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
180 	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
181 	PMCNTENSET_EL0,	/* Count Enable Set Register */
182 	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
183 	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
184 	PMSWINC_EL0,	/* Software Increment Register */
185 	PMUSERENR_EL0,	/* User Enable Register */
186 
187 	/* Pointer Authentication Registers in a strict increasing order. */
188 	APIAKEYLO_EL1,
189 	APIAKEYHI_EL1,
190 	APIBKEYLO_EL1,
191 	APIBKEYHI_EL1,
192 	APDAKEYLO_EL1,
193 	APDAKEYHI_EL1,
194 	APDBKEYLO_EL1,
195 	APDBKEYHI_EL1,
196 	APGAKEYLO_EL1,
197 	APGAKEYHI_EL1,
198 
199 	ELR_EL1,
200 	SP_EL1,
201 	SPSR_EL1,
202 
203 	CNTVOFF_EL2,
204 	CNTV_CVAL_EL0,
205 	CNTV_CTL_EL0,
206 	CNTP_CVAL_EL0,
207 	CNTP_CTL_EL0,
208 
209 	/* 32bit specific registers. Keep them at the end of the range */
210 	DACR32_EL2,	/* Domain Access Control Register */
211 	IFSR32_EL2,	/* Instruction Fault Status Register */
212 	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
213 	DBGVCR32_EL2,	/* Debug Vector Catch Register */
214 
215 	NR_SYS_REGS	/* Nothing after this line! */
216 };
217 
218 struct kvm_cpu_context {
219 	struct user_pt_regs regs;	/* sp = sp_el0 */
220 
221 	u64	spsr_abt;
222 	u64	spsr_und;
223 	u64	spsr_irq;
224 	u64	spsr_fiq;
225 
226 	struct user_fpsimd_state fp_regs;
227 
228 	u64 sys_regs[NR_SYS_REGS];
229 
230 	struct kvm_vcpu *__hyp_running_vcpu;
231 };
232 
233 struct kvm_pmu_events {
234 	u32 events_host;
235 	u32 events_guest;
236 };
237 
238 struct kvm_host_data {
239 	struct kvm_cpu_context host_ctxt;
240 	struct kvm_pmu_events pmu_events;
241 };
242 
243 struct vcpu_reset_state {
244 	unsigned long	pc;
245 	unsigned long	r0;
246 	bool		be;
247 	bool		reset;
248 };
249 
250 struct kvm_vcpu_arch {
251 	struct kvm_cpu_context ctxt;
252 	void *sve_state;
253 	unsigned int sve_max_vl;
254 
255 	/* Stage 2 paging state used by the hardware on next switch */
256 	struct kvm_s2_mmu *hw_mmu;
257 
258 	/* HYP configuration */
259 	u64 hcr_el2;
260 	u32 mdcr_el2;
261 
262 	/* Exception Information */
263 	struct kvm_vcpu_fault_info fault;
264 
265 	/* State of various workarounds, see kvm_asm.h for bit assignment */
266 	u64 workaround_flags;
267 
268 	/* Miscellaneous vcpu state flags */
269 	u64 flags;
270 
271 	/*
272 	 * We maintain more than a single set of debug registers to support
273 	 * debugging the guest from the host and to maintain separate host and
274 	 * guest state during world switches. vcpu_debug_state are the debug
275 	 * registers of the vcpu as the guest sees them.  host_debug_state are
276 	 * the host registers which are saved and restored during
277 	 * world switches. external_debug_state contains the debug
278 	 * values we want to debug the guest. This is set via the
279 	 * KVM_SET_GUEST_DEBUG ioctl.
280 	 *
281 	 * debug_ptr points to the set of debug registers that should be loaded
282 	 * onto the hardware when running the guest.
283 	 */
284 	struct kvm_guest_debug_arch *debug_ptr;
285 	struct kvm_guest_debug_arch vcpu_debug_state;
286 	struct kvm_guest_debug_arch external_debug_state;
287 
288 	struct thread_info *host_thread_info;	/* hyp VA */
289 	struct user_fpsimd_state *host_fpsimd_state;	/* hyp VA */
290 
291 	struct {
292 		/* {Break,watch}point registers */
293 		struct kvm_guest_debug_arch regs;
294 		/* Statistical profiling extension */
295 		u64 pmscr_el1;
296 	} host_debug_state;
297 
298 	/* VGIC state */
299 	struct vgic_cpu vgic_cpu;
300 	struct arch_timer_cpu timer_cpu;
301 	struct kvm_pmu pmu;
302 
303 	/*
304 	 * Anything that is not used directly from assembly code goes
305 	 * here.
306 	 */
307 
308 	/*
309 	 * Guest registers we preserve during guest debugging.
310 	 *
311 	 * These shadow registers are updated by the kvm_handle_sys_reg
312 	 * trap handler if the guest accesses or updates them while we
313 	 * are using guest debug.
314 	 */
315 	struct {
316 		u32	mdscr_el1;
317 	} guest_debug_preserved;
318 
319 	/* vcpu power-off state */
320 	bool power_off;
321 
322 	/* Don't run the guest (internal implementation need) */
323 	bool pause;
324 
325 	/* Cache some mmu pages needed inside spinlock regions */
326 	struct kvm_mmu_memory_cache mmu_page_cache;
327 
328 	/* Target CPU and feature flags */
329 	int target;
330 	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
331 
332 	/* Detect first run of a vcpu */
333 	bool has_run_once;
334 
335 	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
336 	u64 vsesr_el2;
337 
338 	/* Additional reset state */
339 	struct vcpu_reset_state	reset_state;
340 
341 	/* True when deferrable sysregs are loaded on the physical CPU,
342 	 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
343 	bool sysregs_loaded_on_cpu;
344 
345 	/* Guest PV state */
346 	struct {
347 		u64 last_steal;
348 		gpa_t base;
349 	} steal;
350 };
351 
352 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
353 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
354 				      sve_ffr_offset((vcpu)->arch.sve_max_vl)))
355 
356 #define vcpu_sve_state_size(vcpu) ({					\
357 	size_t __size_ret;						\
358 	unsigned int __vcpu_vq;						\
359 									\
360 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
361 		__size_ret = 0;						\
362 	} else {							\
363 		__vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);	\
364 		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
365 	}								\
366 									\
367 	__size_ret;							\
368 })
369 
370 /* vcpu_arch flags field values: */
371 #define KVM_ARM64_DEBUG_DIRTY		(1 << 0)
372 #define KVM_ARM64_FP_ENABLED		(1 << 1) /* guest FP regs loaded */
373 #define KVM_ARM64_FP_HOST		(1 << 2) /* host FP regs loaded */
374 #define KVM_ARM64_HOST_SVE_IN_USE	(1 << 3) /* backup for host TIF_SVE */
375 #define KVM_ARM64_HOST_SVE_ENABLED	(1 << 4) /* SVE enabled for EL0 */
376 #define KVM_ARM64_GUEST_HAS_SVE		(1 << 5) /* SVE exposed to guest */
377 #define KVM_ARM64_VCPU_SVE_FINALIZED	(1 << 6) /* SVE config completed */
378 #define KVM_ARM64_GUEST_HAS_PTRAUTH	(1 << 7) /* PTRAUTH exposed to guest */
379 #define KVM_ARM64_PENDING_EXCEPTION	(1 << 8) /* Exception pending */
380 #define KVM_ARM64_EXCEPT_MASK		(7 << 9) /* Target EL/MODE */
381 
382 /*
383  * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
384  * take the following values:
385  *
386  * For AArch32 EL1:
387  */
388 #define KVM_ARM64_EXCEPT_AA32_UND	(0 << 9)
389 #define KVM_ARM64_EXCEPT_AA32_IABT	(1 << 9)
390 #define KVM_ARM64_EXCEPT_AA32_DABT	(2 << 9)
391 /* For AArch64: */
392 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC	(0 << 9)
393 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ	(1 << 9)
394 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ	(2 << 9)
395 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR	(3 << 9)
396 #define KVM_ARM64_EXCEPT_AA64_EL1	(0 << 11)
397 #define KVM_ARM64_EXCEPT_AA64_EL2	(1 << 11)
398 
399 /*
400  * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
401  * set together with an exception...
402  */
403 #define KVM_ARM64_INCREMENT_PC		(1 << 9) /* Increment PC */
404 
405 #define vcpu_has_sve(vcpu) (system_supports_sve() &&			\
406 			    ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
407 
408 #ifdef CONFIG_ARM64_PTR_AUTH
409 #define vcpu_has_ptrauth(vcpu)						\
410 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
411 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
412 	 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
413 #else
414 #define vcpu_has_ptrauth(vcpu)		false
415 #endif
416 
417 #define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
418 
419 /*
420  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
421  * memory backed version of a register, and not the one most recently
422  * accessed by a running VCPU.  For example, for userspace access or
423  * for system registers that are never context switched, but only
424  * emulated.
425  */
426 #define __ctxt_sys_reg(c,r)	(&(c)->sys_regs[(r)])
427 
428 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
429 
430 #define __vcpu_sys_reg(v,r)	(ctxt_sys_reg(&(v)->arch.ctxt, (r)))
431 
432 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
433 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
434 
435 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
436 {
437 	/*
438 	 * *** VHE ONLY ***
439 	 *
440 	 * System registers listed in the switch are not saved on every
441 	 * exit from the guest but are only saved on vcpu_put.
442 	 *
443 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
444 	 * should never be listed below, because the guest cannot modify its
445 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
446 	 * thread when emulating cross-VCPU communication.
447 	 */
448 	if (!has_vhe())
449 		return false;
450 
451 	switch (reg) {
452 	case CSSELR_EL1:	*val = read_sysreg_s(SYS_CSSELR_EL1);	break;
453 	case SCTLR_EL1:		*val = read_sysreg_s(SYS_SCTLR_EL12);	break;
454 	case CPACR_EL1:		*val = read_sysreg_s(SYS_CPACR_EL12);	break;
455 	case TTBR0_EL1:		*val = read_sysreg_s(SYS_TTBR0_EL12);	break;
456 	case TTBR1_EL1:		*val = read_sysreg_s(SYS_TTBR1_EL12);	break;
457 	case TCR_EL1:		*val = read_sysreg_s(SYS_TCR_EL12);	break;
458 	case ESR_EL1:		*val = read_sysreg_s(SYS_ESR_EL12);	break;
459 	case AFSR0_EL1:		*val = read_sysreg_s(SYS_AFSR0_EL12);	break;
460 	case AFSR1_EL1:		*val = read_sysreg_s(SYS_AFSR1_EL12);	break;
461 	case FAR_EL1:		*val = read_sysreg_s(SYS_FAR_EL12);	break;
462 	case MAIR_EL1:		*val = read_sysreg_s(SYS_MAIR_EL12);	break;
463 	case VBAR_EL1:		*val = read_sysreg_s(SYS_VBAR_EL12);	break;
464 	case CONTEXTIDR_EL1:	*val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
465 	case TPIDR_EL0:		*val = read_sysreg_s(SYS_TPIDR_EL0);	break;
466 	case TPIDRRO_EL0:	*val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
467 	case TPIDR_EL1:		*val = read_sysreg_s(SYS_TPIDR_EL1);	break;
468 	case AMAIR_EL1:		*val = read_sysreg_s(SYS_AMAIR_EL12);	break;
469 	case CNTKCTL_EL1:	*val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
470 	case ELR_EL1:		*val = read_sysreg_s(SYS_ELR_EL12);	break;
471 	case PAR_EL1:		*val = read_sysreg_par();		break;
472 	case DACR32_EL2:	*val = read_sysreg_s(SYS_DACR32_EL2);	break;
473 	case IFSR32_EL2:	*val = read_sysreg_s(SYS_IFSR32_EL2);	break;
474 	case DBGVCR32_EL2:	*val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
475 	default:		return false;
476 	}
477 
478 	return true;
479 }
480 
481 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
482 {
483 	/*
484 	 * *** VHE ONLY ***
485 	 *
486 	 * System registers listed in the switch are not restored on every
487 	 * entry to the guest but are only restored on vcpu_load.
488 	 *
489 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
490 	 * should never be listed below, because the MPIDR should only be set
491 	 * once, before running the VCPU, and never changed later.
492 	 */
493 	if (!has_vhe())
494 		return false;
495 
496 	switch (reg) {
497 	case CSSELR_EL1:	write_sysreg_s(val, SYS_CSSELR_EL1);	break;
498 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
499 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
500 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
501 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
502 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
503 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
504 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
505 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
506 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
507 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
508 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
509 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
510 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
511 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
512 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
513 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
514 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
515 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
516 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
517 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
518 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
519 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
520 	default:		return false;
521 	}
522 
523 	return true;
524 }
525 
526 struct kvm_vm_stat {
527 	ulong remote_tlb_flush;
528 };
529 
530 struct kvm_vcpu_stat {
531 	u64 halt_successful_poll;
532 	u64 halt_attempted_poll;
533 	u64 halt_poll_success_ns;
534 	u64 halt_poll_fail_ns;
535 	u64 halt_poll_invalid;
536 	u64 halt_wakeup;
537 	u64 hvc_exit_stat;
538 	u64 wfe_exit_stat;
539 	u64 wfi_exit_stat;
540 	u64 mmio_exit_user;
541 	u64 mmio_exit_kernel;
542 	u64 exits;
543 };
544 
545 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
546 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
547 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
548 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
549 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
550 
551 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
552 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
553 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
554 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
555 
556 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
557 			      struct kvm_vcpu_events *events);
558 
559 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
560 			      struct kvm_vcpu_events *events);
561 
562 #define KVM_ARCH_WANT_MMU_NOTIFIER
563 int kvm_unmap_hva_range(struct kvm *kvm,
564 			unsigned long start, unsigned long end, unsigned flags);
565 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
566 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
567 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
568 
569 void kvm_arm_halt_guest(struct kvm *kvm);
570 void kvm_arm_resume_guest(struct kvm *kvm);
571 
572 #define kvm_call_hyp_nvhe(f, ...)						\
573 	({								\
574 		struct arm_smccc_res res;				\
575 									\
576 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
577 				  ##__VA_ARGS__, &res);			\
578 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
579 									\
580 		res.a1;							\
581 	})
582 
583 /*
584  * The couple of isb() below are there to guarantee the same behaviour
585  * on VHE as on !VHE, where the eret to EL1 acts as a context
586  * synchronization event.
587  */
588 #define kvm_call_hyp(f, ...)						\
589 	do {								\
590 		if (has_vhe()) {					\
591 			f(__VA_ARGS__);					\
592 			isb();						\
593 		} else {						\
594 			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
595 		}							\
596 	} while(0)
597 
598 #define kvm_call_hyp_ret(f, ...)					\
599 	({								\
600 		typeof(f(__VA_ARGS__)) ret;				\
601 									\
602 		if (has_vhe()) {					\
603 			ret = f(__VA_ARGS__);				\
604 			isb();						\
605 		} else {						\
606 			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
607 		}							\
608 									\
609 		ret;							\
610 	})
611 
612 void force_vm_exit(const cpumask_t *mask);
613 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
614 
615 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
616 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
617 
618 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
619 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
620 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
621 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
622 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
623 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
624 
625 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
626 
627 void kvm_sys_reg_table_init(void);
628 
629 /* MMIO helpers */
630 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
631 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
632 
633 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
634 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
635 
636 int kvm_perf_init(void);
637 int kvm_perf_teardown(void);
638 
639 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
640 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
641 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
642 
643 bool kvm_arm_pvtime_supported(void);
644 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
645 			    struct kvm_device_attr *attr);
646 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
647 			    struct kvm_device_attr *attr);
648 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
649 			    struct kvm_device_attr *attr);
650 
651 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
652 {
653 	vcpu_arch->steal.base = GPA_INVALID;
654 }
655 
656 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
657 {
658 	return (vcpu_arch->steal.base != GPA_INVALID);
659 }
660 
661 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
662 
663 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
664 
665 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
666 
667 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
668 {
669 	/* The host's MPIDR is immutable, so let's set it up at boot time */
670 	ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
671 }
672 
673 static inline bool kvm_arch_requires_vhe(void)
674 {
675 	/*
676 	 * The Arm architecture specifies that implementation of SVE
677 	 * requires VHE also to be implemented.  The KVM code for arm64
678 	 * relies on this when SVE is present:
679 	 */
680 	if (system_supports_sve())
681 		return true;
682 
683 	return false;
684 }
685 
686 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
687 
688 static inline void kvm_arch_hardware_unsetup(void) {}
689 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
690 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
691 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
692 
693 void kvm_arm_init_debug(void);
694 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
695 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
696 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
697 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
698 			       struct kvm_device_attr *attr);
699 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
700 			       struct kvm_device_attr *attr);
701 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
702 			       struct kvm_device_attr *attr);
703 
704 /* Guest/host FPSIMD coordination helpers */
705 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
706 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
707 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
708 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
709 
710 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
711 {
712 	return (!has_vhe() && attr->exclude_host);
713 }
714 
715 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
716 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
717 {
718 	return kvm_arch_vcpu_run_map_fp(vcpu);
719 }
720 
721 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
722 void kvm_clr_pmu_events(u32 clr);
723 
724 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
725 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
726 #else
727 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
728 static inline void kvm_clr_pmu_events(u32 clr) {}
729 #endif
730 
731 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
732 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
733 
734 int kvm_set_ipa_limit(void);
735 
736 #define __KVM_HAVE_ARCH_VM_ALLOC
737 struct kvm *kvm_arch_alloc_vm(void);
738 void kvm_arch_free_vm(struct kvm *kvm);
739 
740 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
741 
742 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
743 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
744 
745 #define kvm_arm_vcpu_sve_finalized(vcpu) \
746 	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
747 
748 #define kvm_vcpu_has_pmu(vcpu)					\
749 	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
750 
751 #endif /* __ARM64_KVM_HOST_H__ */
752