xref: /openbmc/linux/arch/arm64/include/asm/kvm_host.h (revision 861262ab)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13 
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <linux/psci.h>
21 #include <asm/arch_gicv3.h>
22 #include <asm/barrier.h>
23 #include <asm/cpufeature.h>
24 #include <asm/cputype.h>
25 #include <asm/daifflags.h>
26 #include <asm/fpsimd.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_asm.h>
29 
30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
31 
32 #define KVM_HALT_POLL_NS_DEFAULT 500000
33 
34 #include <kvm/arm_vgic.h>
35 #include <kvm/arm_arch_timer.h>
36 #include <kvm/arm_pmu.h>
37 
38 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
39 
40 #define KVM_VCPU_MAX_FEATURES 7
41 
42 #define KVM_REQ_SLEEP \
43 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44 #define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
45 #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
46 #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
47 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
48 #define KVM_REQ_RELOAD_PMU	KVM_ARCH_REQ(5)
49 
50 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51 				     KVM_DIRTY_LOG_INITIALLY_SET)
52 
53 #define KVM_HAVE_MMU_RWLOCK
54 
55 /*
56  * Mode of operation configurable with kvm-arm.mode early param.
57  * See Documentation/admin-guide/kernel-parameters.txt for more information.
58  */
59 enum kvm_mode {
60 	KVM_MODE_DEFAULT,
61 	KVM_MODE_PROTECTED,
62 	KVM_MODE_NONE,
63 };
64 enum kvm_mode kvm_get_mode(void);
65 
66 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
67 
68 extern unsigned int kvm_sve_max_vl;
69 int kvm_arm_init_sve(void);
70 
71 u32 __attribute_const__ kvm_target_cpu(void);
72 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
73 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
74 
75 struct kvm_vmid {
76 	atomic64_t id;
77 };
78 
79 struct kvm_s2_mmu {
80 	struct kvm_vmid vmid;
81 
82 	/*
83 	 * stage2 entry level table
84 	 *
85 	 * Two kvm_s2_mmu structures in the same VM can point to the same
86 	 * pgd here.  This happens when running a guest using a
87 	 * translation regime that isn't affected by its own stage-2
88 	 * translation, such as a non-VHE hypervisor running at vEL2, or
89 	 * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
90 	 * canonical stage-2 page tables.
91 	 */
92 	phys_addr_t	pgd_phys;
93 	struct kvm_pgtable *pgt;
94 
95 	/* The last vcpu id that ran on each physical CPU */
96 	int __percpu *last_vcpu_ran;
97 
98 	struct kvm_arch *arch;
99 };
100 
101 struct kvm_arch_memory_slot {
102 };
103 
104 struct kvm_arch {
105 	struct kvm_s2_mmu mmu;
106 
107 	/* VTCR_EL2 value for this VM */
108 	u64    vtcr;
109 
110 	/* The maximum number of vCPUs depends on the used GIC model */
111 	int max_vcpus;
112 
113 	/* Interrupt controller */
114 	struct vgic_dist	vgic;
115 
116 	/* Mandated version of PSCI */
117 	u32 psci_version;
118 
119 	/*
120 	 * If we encounter a data abort without valid instruction syndrome
121 	 * information, report this to user space.  User space can (and
122 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
123 	 * supported.
124 	 */
125 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER	0
126 	/* Memory Tagging Extension enabled for the guest */
127 #define KVM_ARCH_FLAG_MTE_ENABLED			1
128 	/* At least one vCPU has ran in the VM */
129 #define KVM_ARCH_FLAG_HAS_RAN_ONCE			2
130 	/*
131 	 * The following two bits are used to indicate the guest's EL1
132 	 * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
133 	 * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
134 	 * Otherwise, the guest's EL1 register width has not yet been
135 	 * determined yet.
136 	 */
137 #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED		3
138 #define KVM_ARCH_FLAG_EL1_32BIT				4
139 
140 	unsigned long flags;
141 
142 	/*
143 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
144 	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
145 	 */
146 	unsigned long *pmu_filter;
147 	struct arm_pmu *arm_pmu;
148 
149 	cpumask_var_t supported_cpus;
150 
151 	u8 pfr0_csv2;
152 	u8 pfr0_csv3;
153 };
154 
155 struct kvm_vcpu_fault_info {
156 	u32 esr_el2;		/* Hyp Syndrom Register */
157 	u64 far_el2;		/* Hyp Fault Address Register */
158 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
159 	u64 disr_el1;		/* Deferred [SError] Status Register */
160 };
161 
162 enum vcpu_sysreg {
163 	__INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
164 	MPIDR_EL1,	/* MultiProcessor Affinity Register */
165 	CSSELR_EL1,	/* Cache Size Selection Register */
166 	SCTLR_EL1,	/* System Control Register */
167 	ACTLR_EL1,	/* Auxiliary Control Register */
168 	CPACR_EL1,	/* Coprocessor Access Control */
169 	ZCR_EL1,	/* SVE Control */
170 	TTBR0_EL1,	/* Translation Table Base Register 0 */
171 	TTBR1_EL1,	/* Translation Table Base Register 1 */
172 	TCR_EL1,	/* Translation Control Register */
173 	ESR_EL1,	/* Exception Syndrome Register */
174 	AFSR0_EL1,	/* Auxiliary Fault Status Register 0 */
175 	AFSR1_EL1,	/* Auxiliary Fault Status Register 1 */
176 	FAR_EL1,	/* Fault Address Register */
177 	MAIR_EL1,	/* Memory Attribute Indirection Register */
178 	VBAR_EL1,	/* Vector Base Address Register */
179 	CONTEXTIDR_EL1,	/* Context ID Register */
180 	TPIDR_EL0,	/* Thread ID, User R/W */
181 	TPIDRRO_EL0,	/* Thread ID, User R/O */
182 	TPIDR_EL1,	/* Thread ID, Privileged */
183 	AMAIR_EL1,	/* Aux Memory Attribute Indirection Register */
184 	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
185 	PAR_EL1,	/* Physical Address Register */
186 	MDSCR_EL1,	/* Monitor Debug System Control Register */
187 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
188 	OSLSR_EL1,	/* OS Lock Status Register */
189 	DISR_EL1,	/* Deferred Interrupt Status Register */
190 
191 	/* Performance Monitors Registers */
192 	PMCR_EL0,	/* Control Register */
193 	PMSELR_EL0,	/* Event Counter Selection Register */
194 	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
195 	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
196 	PMCCNTR_EL0,	/* Cycle Counter Register */
197 	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
198 	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
199 	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
200 	PMCNTENSET_EL0,	/* Count Enable Set Register */
201 	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
202 	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
203 	PMUSERENR_EL0,	/* User Enable Register */
204 
205 	/* Pointer Authentication Registers in a strict increasing order. */
206 	APIAKEYLO_EL1,
207 	APIAKEYHI_EL1,
208 	APIBKEYLO_EL1,
209 	APIBKEYHI_EL1,
210 	APDAKEYLO_EL1,
211 	APDAKEYHI_EL1,
212 	APDBKEYLO_EL1,
213 	APDBKEYHI_EL1,
214 	APGAKEYLO_EL1,
215 	APGAKEYHI_EL1,
216 
217 	ELR_EL1,
218 	SP_EL1,
219 	SPSR_EL1,
220 
221 	CNTVOFF_EL2,
222 	CNTV_CVAL_EL0,
223 	CNTV_CTL_EL0,
224 	CNTP_CVAL_EL0,
225 	CNTP_CTL_EL0,
226 
227 	/* Memory Tagging Extension registers */
228 	RGSR_EL1,	/* Random Allocation Tag Seed Register */
229 	GCR_EL1,	/* Tag Control Register */
230 	TFSR_EL1,	/* Tag Fault Status Register (EL1) */
231 	TFSRE0_EL1,	/* Tag Fault Status Register (EL0) */
232 
233 	/* 32bit specific registers. Keep them at the end of the range */
234 	DACR32_EL2,	/* Domain Access Control Register */
235 	IFSR32_EL2,	/* Instruction Fault Status Register */
236 	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
237 	DBGVCR32_EL2,	/* Debug Vector Catch Register */
238 
239 	NR_SYS_REGS	/* Nothing after this line! */
240 };
241 
242 struct kvm_cpu_context {
243 	struct user_pt_regs regs;	/* sp = sp_el0 */
244 
245 	u64	spsr_abt;
246 	u64	spsr_und;
247 	u64	spsr_irq;
248 	u64	spsr_fiq;
249 
250 	struct user_fpsimd_state fp_regs;
251 
252 	u64 sys_regs[NR_SYS_REGS];
253 
254 	struct kvm_vcpu *__hyp_running_vcpu;
255 };
256 
257 struct kvm_pmu_events {
258 	u32 events_host;
259 	u32 events_guest;
260 };
261 
262 struct kvm_host_data {
263 	struct kvm_cpu_context host_ctxt;
264 	struct kvm_pmu_events pmu_events;
265 };
266 
267 struct kvm_host_psci_config {
268 	/* PSCI version used by host. */
269 	u32 version;
270 
271 	/* Function IDs used by host if version is v0.1. */
272 	struct psci_0_1_function_ids function_ids_0_1;
273 
274 	bool psci_0_1_cpu_suspend_implemented;
275 	bool psci_0_1_cpu_on_implemented;
276 	bool psci_0_1_cpu_off_implemented;
277 	bool psci_0_1_migrate_implemented;
278 };
279 
280 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
281 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
282 
283 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
284 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
285 
286 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
287 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
288 
289 struct vcpu_reset_state {
290 	unsigned long	pc;
291 	unsigned long	r0;
292 	bool		be;
293 	bool		reset;
294 };
295 
296 struct kvm_vcpu_arch {
297 	struct kvm_cpu_context ctxt;
298 
299 	/* Guest floating point state */
300 	void *sve_state;
301 	unsigned int sve_max_vl;
302 	u64 svcr;
303 
304 	/* Stage 2 paging state used by the hardware on next switch */
305 	struct kvm_s2_mmu *hw_mmu;
306 
307 	/* Values of trap registers for the guest. */
308 	u64 hcr_el2;
309 	u64 mdcr_el2;
310 	u64 cptr_el2;
311 
312 	/* Values of trap registers for the host before guest entry. */
313 	u64 mdcr_el2_host;
314 
315 	/* Exception Information */
316 	struct kvm_vcpu_fault_info fault;
317 
318 	/* Miscellaneous vcpu state flags */
319 	u64 flags;
320 
321 	/*
322 	 * We maintain more than a single set of debug registers to support
323 	 * debugging the guest from the host and to maintain separate host and
324 	 * guest state during world switches. vcpu_debug_state are the debug
325 	 * registers of the vcpu as the guest sees them.  host_debug_state are
326 	 * the host registers which are saved and restored during
327 	 * world switches. external_debug_state contains the debug
328 	 * values we want to debug the guest. This is set via the
329 	 * KVM_SET_GUEST_DEBUG ioctl.
330 	 *
331 	 * debug_ptr points to the set of debug registers that should be loaded
332 	 * onto the hardware when running the guest.
333 	 */
334 	struct kvm_guest_debug_arch *debug_ptr;
335 	struct kvm_guest_debug_arch vcpu_debug_state;
336 	struct kvm_guest_debug_arch external_debug_state;
337 
338 	struct user_fpsimd_state *host_fpsimd_state;	/* hyp VA */
339 	struct task_struct *parent_task;
340 
341 	struct {
342 		/* {Break,watch}point registers */
343 		struct kvm_guest_debug_arch regs;
344 		/* Statistical profiling extension */
345 		u64 pmscr_el1;
346 		/* Self-hosted trace */
347 		u64 trfcr_el1;
348 	} host_debug_state;
349 
350 	/* VGIC state */
351 	struct vgic_cpu vgic_cpu;
352 	struct arch_timer_cpu timer_cpu;
353 	struct kvm_pmu pmu;
354 
355 	/*
356 	 * Anything that is not used directly from assembly code goes
357 	 * here.
358 	 */
359 
360 	/*
361 	 * Guest registers we preserve during guest debugging.
362 	 *
363 	 * These shadow registers are updated by the kvm_handle_sys_reg
364 	 * trap handler if the guest accesses or updates them while we
365 	 * are using guest debug.
366 	 */
367 	struct {
368 		u32	mdscr_el1;
369 	} guest_debug_preserved;
370 
371 	/* vcpu power-off state */
372 	bool power_off;
373 
374 	/* Don't run the guest (internal implementation need) */
375 	bool pause;
376 
377 	/* Cache some mmu pages needed inside spinlock regions */
378 	struct kvm_mmu_memory_cache mmu_page_cache;
379 
380 	/* Target CPU and feature flags */
381 	int target;
382 	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
383 
384 	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
385 	u64 vsesr_el2;
386 
387 	/* Additional reset state */
388 	struct vcpu_reset_state	reset_state;
389 
390 	/* True when deferrable sysregs are loaded on the physical CPU,
391 	 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
392 	bool sysregs_loaded_on_cpu;
393 
394 	/* Guest PV state */
395 	struct {
396 		u64 last_steal;
397 		gpa_t base;
398 	} steal;
399 };
400 
401 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
402 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
403 			     sve_ffr_offset((vcpu)->arch.sve_max_vl))
404 
405 #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
406 
407 #define vcpu_sve_state_size(vcpu) ({					\
408 	size_t __size_ret;						\
409 	unsigned int __vcpu_vq;						\
410 									\
411 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
412 		__size_ret = 0;						\
413 	} else {							\
414 		__vcpu_vq = vcpu_sve_max_vq(vcpu);			\
415 		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
416 	}								\
417 									\
418 	__size_ret;							\
419 })
420 
421 /* vcpu_arch flags field values: */
422 #define KVM_ARM64_DEBUG_DIRTY		(1 << 0)
423 #define KVM_ARM64_FP_ENABLED		(1 << 1) /* guest FP regs loaded */
424 #define KVM_ARM64_FP_HOST		(1 << 2) /* host FP regs loaded */
425 #define KVM_ARM64_HOST_SVE_ENABLED	(1 << 4) /* SVE enabled for EL0 */
426 #define KVM_ARM64_GUEST_HAS_SVE		(1 << 5) /* SVE exposed to guest */
427 #define KVM_ARM64_VCPU_SVE_FINALIZED	(1 << 6) /* SVE config completed */
428 #define KVM_ARM64_GUEST_HAS_PTRAUTH	(1 << 7) /* PTRAUTH exposed to guest */
429 #define KVM_ARM64_PENDING_EXCEPTION	(1 << 8) /* Exception pending */
430 /*
431  * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
432  * set together with an exception...
433  */
434 #define KVM_ARM64_INCREMENT_PC		(1 << 9) /* Increment PC */
435 #define KVM_ARM64_EXCEPT_MASK		(7 << 9) /* Target EL/MODE */
436 /*
437  * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
438  * take the following values:
439  *
440  * For AArch32 EL1:
441  */
442 #define KVM_ARM64_EXCEPT_AA32_UND	(0 << 9)
443 #define KVM_ARM64_EXCEPT_AA32_IABT	(1 << 9)
444 #define KVM_ARM64_EXCEPT_AA32_DABT	(2 << 9)
445 /* For AArch64: */
446 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC	(0 << 9)
447 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ	(1 << 9)
448 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ	(2 << 9)
449 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR	(3 << 9)
450 #define KVM_ARM64_EXCEPT_AA64_EL1	(0 << 11)
451 #define KVM_ARM64_EXCEPT_AA64_EL2	(1 << 11)
452 
453 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE	(1 << 12) /* Save SPE context if active  */
454 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE	(1 << 13) /* Save TRBE context if active  */
455 #define KVM_ARM64_FP_FOREIGN_FPSTATE	(1 << 14)
456 #define KVM_ARM64_ON_UNSUPPORTED_CPU	(1 << 15) /* Physical CPU not in supported_cpus */
457 #define KVM_ARM64_HOST_SME_ENABLED	(1 << 16) /* SME enabled for EL0 */
458 
459 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
460 				 KVM_GUESTDBG_USE_SW_BP | \
461 				 KVM_GUESTDBG_USE_HW | \
462 				 KVM_GUESTDBG_SINGLESTEP)
463 
464 #define vcpu_has_sve(vcpu) (system_supports_sve() &&			\
465 			    ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
466 
467 #ifdef CONFIG_ARM64_PTR_AUTH
468 #define vcpu_has_ptrauth(vcpu)						\
469 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
470 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
471 	 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
472 #else
473 #define vcpu_has_ptrauth(vcpu)		false
474 #endif
475 
476 #define vcpu_on_unsupported_cpu(vcpu)					\
477 	((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU)
478 
479 #define vcpu_set_on_unsupported_cpu(vcpu)				\
480 	((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU)
481 
482 #define vcpu_clear_on_unsupported_cpu(vcpu)				\
483 	((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU)
484 
485 #define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
486 
487 /*
488  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
489  * memory backed version of a register, and not the one most recently
490  * accessed by a running VCPU.  For example, for userspace access or
491  * for system registers that are never context switched, but only
492  * emulated.
493  */
494 #define __ctxt_sys_reg(c,r)	(&(c)->sys_regs[(r)])
495 
496 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
497 
498 #define __vcpu_sys_reg(v,r)	(ctxt_sys_reg(&(v)->arch.ctxt, (r)))
499 
500 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
501 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
502 
503 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
504 {
505 	/*
506 	 * *** VHE ONLY ***
507 	 *
508 	 * System registers listed in the switch are not saved on every
509 	 * exit from the guest but are only saved on vcpu_put.
510 	 *
511 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
512 	 * should never be listed below, because the guest cannot modify its
513 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
514 	 * thread when emulating cross-VCPU communication.
515 	 */
516 	if (!has_vhe())
517 		return false;
518 
519 	switch (reg) {
520 	case CSSELR_EL1:	*val = read_sysreg_s(SYS_CSSELR_EL1);	break;
521 	case SCTLR_EL1:		*val = read_sysreg_s(SYS_SCTLR_EL12);	break;
522 	case CPACR_EL1:		*val = read_sysreg_s(SYS_CPACR_EL12);	break;
523 	case TTBR0_EL1:		*val = read_sysreg_s(SYS_TTBR0_EL12);	break;
524 	case TTBR1_EL1:		*val = read_sysreg_s(SYS_TTBR1_EL12);	break;
525 	case TCR_EL1:		*val = read_sysreg_s(SYS_TCR_EL12);	break;
526 	case ESR_EL1:		*val = read_sysreg_s(SYS_ESR_EL12);	break;
527 	case AFSR0_EL1:		*val = read_sysreg_s(SYS_AFSR0_EL12);	break;
528 	case AFSR1_EL1:		*val = read_sysreg_s(SYS_AFSR1_EL12);	break;
529 	case FAR_EL1:		*val = read_sysreg_s(SYS_FAR_EL12);	break;
530 	case MAIR_EL1:		*val = read_sysreg_s(SYS_MAIR_EL12);	break;
531 	case VBAR_EL1:		*val = read_sysreg_s(SYS_VBAR_EL12);	break;
532 	case CONTEXTIDR_EL1:	*val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
533 	case TPIDR_EL0:		*val = read_sysreg_s(SYS_TPIDR_EL0);	break;
534 	case TPIDRRO_EL0:	*val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
535 	case TPIDR_EL1:		*val = read_sysreg_s(SYS_TPIDR_EL1);	break;
536 	case AMAIR_EL1:		*val = read_sysreg_s(SYS_AMAIR_EL12);	break;
537 	case CNTKCTL_EL1:	*val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
538 	case ELR_EL1:		*val = read_sysreg_s(SYS_ELR_EL12);	break;
539 	case PAR_EL1:		*val = read_sysreg_par();		break;
540 	case DACR32_EL2:	*val = read_sysreg_s(SYS_DACR32_EL2);	break;
541 	case IFSR32_EL2:	*val = read_sysreg_s(SYS_IFSR32_EL2);	break;
542 	case DBGVCR32_EL2:	*val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
543 	default:		return false;
544 	}
545 
546 	return true;
547 }
548 
549 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
550 {
551 	/*
552 	 * *** VHE ONLY ***
553 	 *
554 	 * System registers listed in the switch are not restored on every
555 	 * entry to the guest but are only restored on vcpu_load.
556 	 *
557 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
558 	 * should never be listed below, because the MPIDR should only be set
559 	 * once, before running the VCPU, and never changed later.
560 	 */
561 	if (!has_vhe())
562 		return false;
563 
564 	switch (reg) {
565 	case CSSELR_EL1:	write_sysreg_s(val, SYS_CSSELR_EL1);	break;
566 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
567 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
568 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
569 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
570 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
571 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
572 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
573 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
574 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
575 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
576 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
577 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
578 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
579 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
580 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
581 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
582 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
583 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
584 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
585 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
586 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
587 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
588 	default:		return false;
589 	}
590 
591 	return true;
592 }
593 
594 struct kvm_vm_stat {
595 	struct kvm_vm_stat_generic generic;
596 };
597 
598 struct kvm_vcpu_stat {
599 	struct kvm_vcpu_stat_generic generic;
600 	u64 hvc_exit_stat;
601 	u64 wfe_exit_stat;
602 	u64 wfi_exit_stat;
603 	u64 mmio_exit_user;
604 	u64 mmio_exit_kernel;
605 	u64 signal_exits;
606 	u64 exits;
607 };
608 
609 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
610 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
611 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
612 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
613 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
614 
615 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
616 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
617 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
618 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
619 
620 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
621 			      struct kvm_vcpu_events *events);
622 
623 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
624 			      struct kvm_vcpu_events *events);
625 
626 #define KVM_ARCH_WANT_MMU_NOTIFIER
627 
628 void kvm_arm_halt_guest(struct kvm *kvm);
629 void kvm_arm_resume_guest(struct kvm *kvm);
630 
631 #define vcpu_has_run_once(vcpu)	!!rcu_access_pointer((vcpu)->pid)
632 
633 #ifndef __KVM_NVHE_HYPERVISOR__
634 #define kvm_call_hyp_nvhe(f, ...)						\
635 	({								\
636 		struct arm_smccc_res res;				\
637 									\
638 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
639 				  ##__VA_ARGS__, &res);			\
640 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
641 									\
642 		res.a1;							\
643 	})
644 
645 /*
646  * The couple of isb() below are there to guarantee the same behaviour
647  * on VHE as on !VHE, where the eret to EL1 acts as a context
648  * synchronization event.
649  */
650 #define kvm_call_hyp(f, ...)						\
651 	do {								\
652 		if (has_vhe()) {					\
653 			f(__VA_ARGS__);					\
654 			isb();						\
655 		} else {						\
656 			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
657 		}							\
658 	} while(0)
659 
660 #define kvm_call_hyp_ret(f, ...)					\
661 	({								\
662 		typeof(f(__VA_ARGS__)) ret;				\
663 									\
664 		if (has_vhe()) {					\
665 			ret = f(__VA_ARGS__);				\
666 			isb();						\
667 		} else {						\
668 			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
669 		}							\
670 									\
671 		ret;							\
672 	})
673 #else /* __KVM_NVHE_HYPERVISOR__ */
674 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
675 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
676 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
677 #endif /* __KVM_NVHE_HYPERVISOR__ */
678 
679 void force_vm_exit(const cpumask_t *mask);
680 
681 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
682 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
683 
684 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
685 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
686 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
687 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
688 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
689 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
690 
691 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
692 
693 void kvm_sys_reg_table_init(void);
694 
695 /* MMIO helpers */
696 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
697 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
698 
699 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
700 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
701 
702 /*
703  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
704  * arrived in guest context.  For arm64, any event that arrives while a vCPU is
705  * loaded is considered to be "in guest".
706  */
707 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
708 {
709 	return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
710 }
711 
712 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
713 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
714 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
715 
716 bool kvm_arm_pvtime_supported(void);
717 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
718 			    struct kvm_device_attr *attr);
719 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
720 			    struct kvm_device_attr *attr);
721 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
722 			    struct kvm_device_attr *attr);
723 
724 extern unsigned int kvm_arm_vmid_bits;
725 int kvm_arm_vmid_alloc_init(void);
726 void kvm_arm_vmid_alloc_free(void);
727 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
728 void kvm_arm_vmid_clear_active(void);
729 
730 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
731 {
732 	vcpu_arch->steal.base = GPA_INVALID;
733 }
734 
735 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
736 {
737 	return (vcpu_arch->steal.base != GPA_INVALID);
738 }
739 
740 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
741 
742 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
743 
744 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
745 
746 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
747 {
748 	/* The host's MPIDR is immutable, so let's set it up at boot time */
749 	ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
750 }
751 
752 static inline bool kvm_system_needs_idmapped_vectors(void)
753 {
754 	return cpus_have_const_cap(ARM64_SPECTRE_V3A);
755 }
756 
757 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
758 
759 static inline void kvm_arch_hardware_unsetup(void) {}
760 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
761 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
762 
763 void kvm_arm_init_debug(void);
764 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
765 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
766 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
767 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
768 
769 #define kvm_vcpu_os_lock_enabled(vcpu)		\
770 	(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK))
771 
772 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
773 			       struct kvm_device_attr *attr);
774 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
775 			       struct kvm_device_attr *attr);
776 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
777 			       struct kvm_device_attr *attr);
778 
779 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
780 				struct kvm_arm_copy_mte_tags *copy_tags);
781 
782 /* Guest/host FPSIMD coordination helpers */
783 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
784 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
785 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
786 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
787 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
788 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
789 
790 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
791 {
792 	return (!has_vhe() && attr->exclude_host);
793 }
794 
795 /* Flags for host debug state */
796 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
797 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
798 
799 #ifdef CONFIG_KVM
800 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
801 void kvm_clr_pmu_events(u32 clr);
802 
803 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
804 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
805 #else
806 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
807 static inline void kvm_clr_pmu_events(u32 clr) {}
808 #endif
809 
810 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
811 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
812 
813 int kvm_set_ipa_limit(void);
814 
815 #define __KVM_HAVE_ARCH_VM_ALLOC
816 struct kvm *kvm_arch_alloc_vm(void);
817 
818 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
819 
820 static inline bool kvm_vm_is_protected(struct kvm *kvm)
821 {
822 	return false;
823 }
824 
825 void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
826 
827 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
828 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
829 
830 #define kvm_arm_vcpu_sve_finalized(vcpu) \
831 	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
832 
833 #define kvm_has_mte(kvm)					\
834 	(system_supports_mte() &&				\
835 	 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
836 #define kvm_vcpu_has_pmu(vcpu)					\
837 	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
838 
839 int kvm_trng_call(struct kvm_vcpu *vcpu);
840 #ifdef CONFIG_KVM
841 extern phys_addr_t hyp_mem_base;
842 extern phys_addr_t hyp_mem_size;
843 void __init kvm_hyp_reserve(void);
844 #else
845 static inline void kvm_hyp_reserve(void) { }
846 #endif
847 
848 #endif /* __ARM64_KVM_HOST_H__ */
849