xref: /openbmc/linux/arch/arm64/include/asm/kvm_host.h (revision 840d9a813c8eaa5c55d86525e374a97ca5023b53)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13 
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/maple_tree.h>
20 #include <linux/percpu.h>
21 #include <linux/psci.h>
22 #include <asm/arch_gicv3.h>
23 #include <asm/barrier.h>
24 #include <asm/cpufeature.h>
25 #include <asm/cputype.h>
26 #include <asm/daifflags.h>
27 #include <asm/fpsimd.h>
28 #include <asm/kvm.h>
29 #include <asm/kvm_asm.h>
30 
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34 
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38 
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40 
41 #define KVM_VCPU_MAX_FEATURES 7
42 #define KVM_VCPU_VALID_FEATURES	(BIT(KVM_VCPU_MAX_FEATURES) - 1)
43 
44 #define KVM_REQ_SLEEP \
45 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
46 #define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
47 #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
48 #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
49 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
50 #define KVM_REQ_RELOAD_PMU	KVM_ARCH_REQ(5)
51 #define KVM_REQ_SUSPEND		KVM_ARCH_REQ(6)
52 #define KVM_REQ_RESYNC_PMU_EL0	KVM_ARCH_REQ(7)
53 
54 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
55 				     KVM_DIRTY_LOG_INITIALLY_SET)
56 
57 #define KVM_HAVE_MMU_RWLOCK
58 
59 /*
60  * Mode of operation configurable with kvm-arm.mode early param.
61  * See Documentation/admin-guide/kernel-parameters.txt for more information.
62  */
63 enum kvm_mode {
64 	KVM_MODE_DEFAULT,
65 	KVM_MODE_PROTECTED,
66 	KVM_MODE_NV,
67 	KVM_MODE_NONE,
68 };
69 #ifdef CONFIG_KVM
70 enum kvm_mode kvm_get_mode(void);
71 #else
kvm_get_mode(void)72 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
73 #endif
74 
75 extern unsigned int __ro_after_init kvm_sve_max_vl;
76 extern unsigned int __ro_after_init kvm_host_sve_max_vl;
77 int __init kvm_arm_init_sve(void);
78 
79 u32 __attribute_const__ kvm_target_cpu(void);
80 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
81 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
82 
83 struct kvm_hyp_memcache {
84 	phys_addr_t head;
85 	unsigned long nr_pages;
86 };
87 
push_hyp_memcache(struct kvm_hyp_memcache * mc,phys_addr_t * p,phys_addr_t (* to_pa)(void * virt))88 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
89 				     phys_addr_t *p,
90 				     phys_addr_t (*to_pa)(void *virt))
91 {
92 	*p = mc->head;
93 	mc->head = to_pa(p);
94 	mc->nr_pages++;
95 }
96 
pop_hyp_memcache(struct kvm_hyp_memcache * mc,void * (* to_va)(phys_addr_t phys))97 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
98 				     void *(*to_va)(phys_addr_t phys))
99 {
100 	phys_addr_t *p = to_va(mc->head);
101 
102 	if (!mc->nr_pages)
103 		return NULL;
104 
105 	mc->head = *p;
106 	mc->nr_pages--;
107 
108 	return p;
109 }
110 
__topup_hyp_memcache(struct kvm_hyp_memcache * mc,unsigned long min_pages,void * (* alloc_fn)(void * arg),phys_addr_t (* to_pa)(void * virt),void * arg)111 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
112 				       unsigned long min_pages,
113 				       void *(*alloc_fn)(void *arg),
114 				       phys_addr_t (*to_pa)(void *virt),
115 				       void *arg)
116 {
117 	while (mc->nr_pages < min_pages) {
118 		phys_addr_t *p = alloc_fn(arg);
119 
120 		if (!p)
121 			return -ENOMEM;
122 		push_hyp_memcache(mc, p, to_pa);
123 	}
124 
125 	return 0;
126 }
127 
__free_hyp_memcache(struct kvm_hyp_memcache * mc,void (* free_fn)(void * virt,void * arg),void * (* to_va)(phys_addr_t phys),void * arg)128 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
129 				       void (*free_fn)(void *virt, void *arg),
130 				       void *(*to_va)(phys_addr_t phys),
131 				       void *arg)
132 {
133 	while (mc->nr_pages)
134 		free_fn(pop_hyp_memcache(mc, to_va), arg);
135 }
136 
137 void free_hyp_memcache(struct kvm_hyp_memcache *mc);
138 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
139 
140 struct kvm_vmid {
141 	atomic64_t id;
142 };
143 
144 struct kvm_s2_mmu {
145 	struct kvm_vmid vmid;
146 
147 	/*
148 	 * stage2 entry level table
149 	 *
150 	 * Two kvm_s2_mmu structures in the same VM can point to the same
151 	 * pgd here.  This happens when running a guest using a
152 	 * translation regime that isn't affected by its own stage-2
153 	 * translation, such as a non-VHE hypervisor running at vEL2, or
154 	 * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
155 	 * canonical stage-2 page tables.
156 	 */
157 	phys_addr_t	pgd_phys;
158 	struct kvm_pgtable *pgt;
159 
160 	/* The last vcpu id that ran on each physical CPU */
161 	int __percpu *last_vcpu_ran;
162 
163 #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
164 	/*
165 	 * Memory cache used to split
166 	 * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
167 	 * is used to allocate stage2 page tables while splitting huge
168 	 * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
169 	 * influences both the capacity of the split page cache, and
170 	 * how often KVM reschedules. Be wary of raising CHUNK_SIZE
171 	 * too high.
172 	 *
173 	 * Protected by kvm->slots_lock.
174 	 */
175 	struct kvm_mmu_memory_cache split_page_cache;
176 	uint64_t split_page_chunk_size;
177 
178 	struct kvm_arch *arch;
179 };
180 
181 struct kvm_arch_memory_slot {
182 };
183 
184 /**
185  * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
186  *
187  * @std_bmap: Bitmap of standard secure service calls
188  * @std_hyp_bmap: Bitmap of standard hypervisor service calls
189  * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
190  */
191 struct kvm_smccc_features {
192 	unsigned long std_bmap;
193 	unsigned long std_hyp_bmap;
194 	unsigned long vendor_hyp_bmap;
195 };
196 
197 typedef unsigned int pkvm_handle_t;
198 
199 struct kvm_protected_vm {
200 	pkvm_handle_t handle;
201 	struct kvm_hyp_memcache teardown_mc;
202 };
203 
204 struct kvm_arch {
205 	struct kvm_s2_mmu mmu;
206 
207 	/* VTCR_EL2 value for this VM */
208 	u64    vtcr;
209 
210 	/* Interrupt controller */
211 	struct vgic_dist	vgic;
212 
213 	/* Timers */
214 	struct arch_timer_vm_data timer_data;
215 
216 	/* Mandated version of PSCI */
217 	u32 psci_version;
218 
219 	/* Protects VM-scoped configuration data */
220 	struct mutex config_lock;
221 
222 	/*
223 	 * If we encounter a data abort without valid instruction syndrome
224 	 * information, report this to user space.  User space can (and
225 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
226 	 * supported.
227 	 */
228 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER	0
229 	/* Memory Tagging Extension enabled for the guest */
230 #define KVM_ARCH_FLAG_MTE_ENABLED			1
231 	/* At least one vCPU has ran in the VM */
232 #define KVM_ARCH_FLAG_HAS_RAN_ONCE			2
233 	/* The vCPU feature set for the VM is configured */
234 #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED		3
235 	/* PSCI SYSTEM_SUSPEND enabled for the guest */
236 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED		4
237 	/* VM counter offset */
238 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET			5
239 	/* Timer PPIs made immutable */
240 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE		6
241 	/* SMCCC filter initialized for the VM */
242 #define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED		7
243 	/* Initial ID reg values loaded */
244 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED		8
245 	unsigned long flags;
246 
247 	/* VM-wide vCPU feature set */
248 	DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
249 
250 	/*
251 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
252 	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
253 	 */
254 	unsigned long *pmu_filter;
255 	struct arm_pmu *arm_pmu;
256 
257 	cpumask_var_t supported_cpus;
258 
259 	/* Hypercall features firmware registers' descriptor */
260 	struct kvm_smccc_features smccc_feat;
261 	struct maple_tree smccc_filter;
262 
263 	/*
264 	 * Emulated CPU ID registers per VM
265 	 * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
266 	 * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
267 	 *
268 	 * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
269 	 * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
270 	 */
271 #define IDREG_IDX(id)		(((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
272 #define IDREG(kvm, id)		((kvm)->arch.id_regs[IDREG_IDX(id)])
273 #define KVM_ARM_ID_REG_NUM	(IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
274 	u64 id_regs[KVM_ARM_ID_REG_NUM];
275 
276 	/*
277 	 * For an untrusted host VM, 'pkvm.handle' is used to lookup
278 	 * the associated pKVM instance in the hypervisor.
279 	 */
280 	struct kvm_protected_vm pkvm;
281 };
282 
283 struct kvm_vcpu_fault_info {
284 	u64 esr_el2;		/* Hyp Syndrom Register */
285 	u64 far_el2;		/* Hyp Fault Address Register */
286 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
287 	u64 disr_el1;		/* Deferred [SError] Status Register */
288 };
289 
290 enum vcpu_sysreg {
291 	__INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
292 	MPIDR_EL1,	/* MultiProcessor Affinity Register */
293 	CLIDR_EL1,	/* Cache Level ID Register */
294 	CSSELR_EL1,	/* Cache Size Selection Register */
295 	SCTLR_EL1,	/* System Control Register */
296 	ACTLR_EL1,	/* Auxiliary Control Register */
297 	CPACR_EL1,	/* Coprocessor Access Control */
298 	ZCR_EL1,	/* SVE Control */
299 	TTBR0_EL1,	/* Translation Table Base Register 0 */
300 	TTBR1_EL1,	/* Translation Table Base Register 1 */
301 	TCR_EL1,	/* Translation Control Register */
302 	TCR2_EL1,	/* Extended Translation Control Register */
303 	ESR_EL1,	/* Exception Syndrome Register */
304 	AFSR0_EL1,	/* Auxiliary Fault Status Register 0 */
305 	AFSR1_EL1,	/* Auxiliary Fault Status Register 1 */
306 	FAR_EL1,	/* Fault Address Register */
307 	MAIR_EL1,	/* Memory Attribute Indirection Register */
308 	VBAR_EL1,	/* Vector Base Address Register */
309 	CONTEXTIDR_EL1,	/* Context ID Register */
310 	TPIDR_EL0,	/* Thread ID, User R/W */
311 	TPIDRRO_EL0,	/* Thread ID, User R/O */
312 	TPIDR_EL1,	/* Thread ID, Privileged */
313 	AMAIR_EL1,	/* Aux Memory Attribute Indirection Register */
314 	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
315 	PAR_EL1,	/* Physical Address Register */
316 	MDSCR_EL1,	/* Monitor Debug System Control Register */
317 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
318 	OSLSR_EL1,	/* OS Lock Status Register */
319 	DISR_EL1,	/* Deferred Interrupt Status Register */
320 
321 	/* Performance Monitors Registers */
322 	PMCR_EL0,	/* Control Register */
323 	PMSELR_EL0,	/* Event Counter Selection Register */
324 	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
325 	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
326 	PMCCNTR_EL0,	/* Cycle Counter Register */
327 	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
328 	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
329 	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
330 	PMCNTENSET_EL0,	/* Count Enable Set Register */
331 	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
332 	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
333 	PMUSERENR_EL0,	/* User Enable Register */
334 
335 	/* Pointer Authentication Registers in a strict increasing order. */
336 	APIAKEYLO_EL1,
337 	APIAKEYHI_EL1,
338 	APIBKEYLO_EL1,
339 	APIBKEYHI_EL1,
340 	APDAKEYLO_EL1,
341 	APDAKEYHI_EL1,
342 	APDBKEYLO_EL1,
343 	APDBKEYHI_EL1,
344 	APGAKEYLO_EL1,
345 	APGAKEYHI_EL1,
346 
347 	ELR_EL1,
348 	SP_EL1,
349 	SPSR_EL1,
350 
351 	CNTVOFF_EL2,
352 	CNTV_CVAL_EL0,
353 	CNTV_CTL_EL0,
354 	CNTP_CVAL_EL0,
355 	CNTP_CTL_EL0,
356 
357 	/* Memory Tagging Extension registers */
358 	RGSR_EL1,	/* Random Allocation Tag Seed Register */
359 	GCR_EL1,	/* Tag Control Register */
360 	TFSR_EL1,	/* Tag Fault Status Register (EL1) */
361 	TFSRE0_EL1,	/* Tag Fault Status Register (EL0) */
362 
363 	/* Permission Indirection Extension registers */
364 	PIR_EL1,       /* Permission Indirection Register 1 (EL1) */
365 	PIRE0_EL1,     /*  Permission Indirection Register 0 (EL1) */
366 
367 	/* 32bit specific registers. */
368 	DACR32_EL2,	/* Domain Access Control Register */
369 	IFSR32_EL2,	/* Instruction Fault Status Register */
370 	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
371 	DBGVCR32_EL2,	/* Debug Vector Catch Register */
372 
373 	/* EL2 registers */
374 	VPIDR_EL2,	/* Virtualization Processor ID Register */
375 	VMPIDR_EL2,	/* Virtualization Multiprocessor ID Register */
376 	SCTLR_EL2,	/* System Control Register (EL2) */
377 	ACTLR_EL2,	/* Auxiliary Control Register (EL2) */
378 	HCR_EL2,	/* Hypervisor Configuration Register */
379 	MDCR_EL2,	/* Monitor Debug Configuration Register (EL2) */
380 	CPTR_EL2,	/* Architectural Feature Trap Register (EL2) */
381 	HSTR_EL2,	/* Hypervisor System Trap Register */
382 	HACR_EL2,	/* Hypervisor Auxiliary Control Register */
383 	HCRX_EL2,	/* Extended Hypervisor Configuration Register */
384 	TTBR0_EL2,	/* Translation Table Base Register 0 (EL2) */
385 	TTBR1_EL2,	/* Translation Table Base Register 1 (EL2) */
386 	TCR_EL2,	/* Translation Control Register (EL2) */
387 	VTTBR_EL2,	/* Virtualization Translation Table Base Register */
388 	VTCR_EL2,	/* Virtualization Translation Control Register */
389 	SPSR_EL2,	/* EL2 saved program status register */
390 	ELR_EL2,	/* EL2 exception link register */
391 	AFSR0_EL2,	/* Auxiliary Fault Status Register 0 (EL2) */
392 	AFSR1_EL2,	/* Auxiliary Fault Status Register 1 (EL2) */
393 	ESR_EL2,	/* Exception Syndrome Register (EL2) */
394 	FAR_EL2,	/* Fault Address Register (EL2) */
395 	HPFAR_EL2,	/* Hypervisor IPA Fault Address Register */
396 	MAIR_EL2,	/* Memory Attribute Indirection Register (EL2) */
397 	AMAIR_EL2,	/* Auxiliary Memory Attribute Indirection Register (EL2) */
398 	VBAR_EL2,	/* Vector Base Address Register (EL2) */
399 	RVBAR_EL2,	/* Reset Vector Base Address Register */
400 	CONTEXTIDR_EL2,	/* Context ID Register (EL2) */
401 	TPIDR_EL2,	/* EL2 Software Thread ID Register */
402 	CNTHCTL_EL2,	/* Counter-timer Hypervisor Control register */
403 	SP_EL2,		/* EL2 Stack Pointer */
404 	HFGRTR_EL2,
405 	HFGWTR_EL2,
406 	HFGITR_EL2,
407 	HDFGRTR_EL2,
408 	HDFGWTR_EL2,
409 	CNTHP_CTL_EL2,
410 	CNTHP_CVAL_EL2,
411 	CNTHV_CTL_EL2,
412 	CNTHV_CVAL_EL2,
413 
414 	NR_SYS_REGS	/* Nothing after this line! */
415 };
416 
417 struct kvm_cpu_context {
418 	struct user_pt_regs regs;	/* sp = sp_el0 */
419 
420 	u64	spsr_abt;
421 	u64	spsr_und;
422 	u64	spsr_irq;
423 	u64	spsr_fiq;
424 
425 	struct user_fpsimd_state fp_regs;
426 
427 	u64 sys_regs[NR_SYS_REGS];
428 
429 	struct kvm_vcpu *__hyp_running_vcpu;
430 };
431 
432 struct kvm_host_data {
433 	struct kvm_cpu_context host_ctxt;
434 };
435 
436 struct kvm_host_psci_config {
437 	/* PSCI version used by host. */
438 	u32 version;
439 	u32 smccc_version;
440 
441 	/* Function IDs used by host if version is v0.1. */
442 	struct psci_0_1_function_ids function_ids_0_1;
443 
444 	bool psci_0_1_cpu_suspend_implemented;
445 	bool psci_0_1_cpu_on_implemented;
446 	bool psci_0_1_cpu_off_implemented;
447 	bool psci_0_1_migrate_implemented;
448 };
449 
450 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
451 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
452 
453 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
454 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
455 
456 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
457 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
458 
459 struct vcpu_reset_state {
460 	unsigned long	pc;
461 	unsigned long	r0;
462 	bool		be;
463 	bool		reset;
464 };
465 
466 struct kvm_vcpu_arch {
467 	struct kvm_cpu_context ctxt;
468 
469 	/*
470 	 * Guest floating point state
471 	 *
472 	 * The architecture has two main floating point extensions,
473 	 * the original FPSIMD and SVE.  These have overlapping
474 	 * register views, with the FPSIMD V registers occupying the
475 	 * low 128 bits of the SVE Z registers.  When the core
476 	 * floating point code saves the register state of a task it
477 	 * records which view it saved in fp_type.
478 	 */
479 	void *sve_state;
480 	enum fp_type fp_type;
481 	unsigned int sve_max_vl;
482 	u64 svcr;
483 
484 	/* Stage 2 paging state used by the hardware on next switch */
485 	struct kvm_s2_mmu *hw_mmu;
486 
487 	/* Values of trap registers for the guest. */
488 	u64 hcr_el2;
489 	u64 mdcr_el2;
490 
491 	/* Values of trap registers for the host before guest entry. */
492 	u64 mdcr_el2_host;
493 
494 	/* Exception Information */
495 	struct kvm_vcpu_fault_info fault;
496 
497 	/* Ownership of the FP regs */
498 	enum {
499 		FP_STATE_FREE,
500 		FP_STATE_HOST_OWNED,
501 		FP_STATE_GUEST_OWNED,
502 	} fp_state;
503 
504 	/* Configuration flags, set once and for all before the vcpu can run */
505 	u8 cflags;
506 
507 	/* Input flags to the hypervisor code, potentially cleared after use */
508 	u8 iflags;
509 
510 	/* State flags for kernel bookkeeping, unused by the hypervisor code */
511 	u8 sflags;
512 
513 	/*
514 	 * Don't run the guest (internal implementation need).
515 	 *
516 	 * Contrary to the flags above, this is set/cleared outside of
517 	 * a vcpu context, and thus cannot be mixed with the flags
518 	 * themselves (or the flag accesses need to be made atomic).
519 	 */
520 	bool pause;
521 
522 	/*
523 	 * We maintain more than a single set of debug registers to support
524 	 * debugging the guest from the host and to maintain separate host and
525 	 * guest state during world switches. vcpu_debug_state are the debug
526 	 * registers of the vcpu as the guest sees them.  host_debug_state are
527 	 * the host registers which are saved and restored during
528 	 * world switches. external_debug_state contains the debug
529 	 * values we want to debug the guest. This is set via the
530 	 * KVM_SET_GUEST_DEBUG ioctl.
531 	 *
532 	 * debug_ptr points to the set of debug registers that should be loaded
533 	 * onto the hardware when running the guest.
534 	 */
535 	struct kvm_guest_debug_arch *debug_ptr;
536 	struct kvm_guest_debug_arch vcpu_debug_state;
537 	struct kvm_guest_debug_arch external_debug_state;
538 
539 	struct task_struct *parent_task;
540 
541 	struct {
542 		/* {Break,watch}point registers */
543 		struct kvm_guest_debug_arch regs;
544 		/* Statistical profiling extension */
545 		u64 pmscr_el1;
546 		/* Self-hosted trace */
547 		u64 trfcr_el1;
548 	} host_debug_state;
549 
550 	/* VGIC state */
551 	struct vgic_cpu vgic_cpu;
552 	struct arch_timer_cpu timer_cpu;
553 	struct kvm_pmu pmu;
554 
555 	/*
556 	 * Guest registers we preserve during guest debugging.
557 	 *
558 	 * These shadow registers are updated by the kvm_handle_sys_reg
559 	 * trap handler if the guest accesses or updates them while we
560 	 * are using guest debug.
561 	 */
562 	struct {
563 		u32	mdscr_el1;
564 		bool	pstate_ss;
565 	} guest_debug_preserved;
566 
567 	/* vcpu power state */
568 	struct kvm_mp_state mp_state;
569 	spinlock_t mp_state_lock;
570 
571 	/* Cache some mmu pages needed inside spinlock regions */
572 	struct kvm_mmu_memory_cache mmu_page_cache;
573 
574 	/* feature flags */
575 	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
576 
577 	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
578 	u64 vsesr_el2;
579 
580 	/* Additional reset state */
581 	struct vcpu_reset_state	reset_state;
582 
583 	/* Guest PV state */
584 	struct {
585 		u64 last_steal;
586 		gpa_t base;
587 	} steal;
588 
589 	/* Per-vcpu CCSIDR override or NULL */
590 	u32 *ccsidr;
591 };
592 
593 /*
594  * Each 'flag' is composed of a comma-separated triplet:
595  *
596  * - the flag-set it belongs to in the vcpu->arch structure
597  * - the value for that flag
598  * - the mask for that flag
599  *
600  *  __vcpu_single_flag() builds such a triplet for a single-bit flag.
601  * unpack_vcpu_flag() extract the flag value from the triplet for
602  * direct use outside of the flag accessors.
603  */
604 #define __vcpu_single_flag(_set, _f)	_set, (_f), (_f)
605 
606 #define __unpack_flag(_set, _f, _m)	_f
607 #define unpack_vcpu_flag(...)		__unpack_flag(__VA_ARGS__)
608 
609 #define __build_check_flag(v, flagset, f, m)			\
610 	do {							\
611 		typeof(v->arch.flagset) *_fset;			\
612 								\
613 		/* Check that the flags fit in the mask */	\
614 		BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m)));	\
615 		/* Check that the flags fit in the type */	\
616 		BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m));	\
617 	} while (0)
618 
619 #define __vcpu_get_flag(v, flagset, f, m)			\
620 	({							\
621 		__build_check_flag(v, flagset, f, m);		\
622 								\
623 		READ_ONCE(v->arch.flagset) & (m);		\
624 	})
625 
626 /*
627  * Note that the set/clear accessors must be preempt-safe in order to
628  * avoid nesting them with load/put which also manipulate flags...
629  */
630 #ifdef __KVM_NVHE_HYPERVISOR__
631 /* the nVHE hypervisor is always non-preemptible */
632 #define __vcpu_flags_preempt_disable()
633 #define __vcpu_flags_preempt_enable()
634 #else
635 #define __vcpu_flags_preempt_disable()	preempt_disable()
636 #define __vcpu_flags_preempt_enable()	preempt_enable()
637 #endif
638 
639 #define __vcpu_set_flag(v, flagset, f, m)			\
640 	do {							\
641 		typeof(v->arch.flagset) *fset;			\
642 								\
643 		__build_check_flag(v, flagset, f, m);		\
644 								\
645 		fset = &v->arch.flagset;			\
646 		__vcpu_flags_preempt_disable();			\
647 		if (HWEIGHT(m) > 1)				\
648 			*fset &= ~(m);				\
649 		*fset |= (f);					\
650 		__vcpu_flags_preempt_enable();			\
651 	} while (0)
652 
653 #define __vcpu_clear_flag(v, flagset, f, m)			\
654 	do {							\
655 		typeof(v->arch.flagset) *fset;			\
656 								\
657 		__build_check_flag(v, flagset, f, m);		\
658 								\
659 		fset = &v->arch.flagset;			\
660 		__vcpu_flags_preempt_disable();			\
661 		*fset &= ~(m);					\
662 		__vcpu_flags_preempt_enable();			\
663 	} while (0)
664 
665 #define vcpu_get_flag(v, ...)	__vcpu_get_flag((v), __VA_ARGS__)
666 #define vcpu_set_flag(v, ...)	__vcpu_set_flag((v), __VA_ARGS__)
667 #define vcpu_clear_flag(v, ...)	__vcpu_clear_flag((v), __VA_ARGS__)
668 
669 /* SVE exposed to guest */
670 #define GUEST_HAS_SVE		__vcpu_single_flag(cflags, BIT(0))
671 /* SVE config completed */
672 #define VCPU_SVE_FINALIZED	__vcpu_single_flag(cflags, BIT(1))
673 /* PTRAUTH exposed to guest */
674 #define GUEST_HAS_PTRAUTH	__vcpu_single_flag(cflags, BIT(2))
675 /* KVM_ARM_VCPU_INIT completed */
676 #define VCPU_INITIALIZED	__vcpu_single_flag(cflags, BIT(3))
677 
678 /* Exception pending */
679 #define PENDING_EXCEPTION	__vcpu_single_flag(iflags, BIT(0))
680 /*
681  * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
682  * be set together with an exception...
683  */
684 #define INCREMENT_PC		__vcpu_single_flag(iflags, BIT(1))
685 /* Target EL/MODE (not a single flag, but let's abuse the macro) */
686 #define EXCEPT_MASK		__vcpu_single_flag(iflags, GENMASK(3, 1))
687 
688 /* Helpers to encode exceptions with minimum fuss */
689 #define __EXCEPT_MASK_VAL	unpack_vcpu_flag(EXCEPT_MASK)
690 #define __EXCEPT_SHIFT		__builtin_ctzl(__EXCEPT_MASK_VAL)
691 #define __vcpu_except_flags(_f)	iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
692 
693 /*
694  * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
695  * values:
696  *
697  * For AArch32 EL1:
698  */
699 #define EXCEPT_AA32_UND		__vcpu_except_flags(0)
700 #define EXCEPT_AA32_IABT	__vcpu_except_flags(1)
701 #define EXCEPT_AA32_DABT	__vcpu_except_flags(2)
702 /* For AArch64: */
703 #define EXCEPT_AA64_EL1_SYNC	__vcpu_except_flags(0)
704 #define EXCEPT_AA64_EL1_IRQ	__vcpu_except_flags(1)
705 #define EXCEPT_AA64_EL1_FIQ	__vcpu_except_flags(2)
706 #define EXCEPT_AA64_EL1_SERR	__vcpu_except_flags(3)
707 /* For AArch64 with NV: */
708 #define EXCEPT_AA64_EL2_SYNC	__vcpu_except_flags(4)
709 #define EXCEPT_AA64_EL2_IRQ	__vcpu_except_flags(5)
710 #define EXCEPT_AA64_EL2_FIQ	__vcpu_except_flags(6)
711 #define EXCEPT_AA64_EL2_SERR	__vcpu_except_flags(7)
712 /* Guest debug is live */
713 #define DEBUG_DIRTY		__vcpu_single_flag(iflags, BIT(4))
714 /* Save SPE context if active  */
715 #define DEBUG_STATE_SAVE_SPE	__vcpu_single_flag(iflags, BIT(5))
716 /* Save TRBE context if active  */
717 #define DEBUG_STATE_SAVE_TRBE	__vcpu_single_flag(iflags, BIT(6))
718 /* vcpu running in HYP context */
719 #define VCPU_HYP_CONTEXT	__vcpu_single_flag(iflags, BIT(7))
720 
721 /* Physical CPU not in supported_cpus */
722 #define ON_UNSUPPORTED_CPU	__vcpu_single_flag(sflags, BIT(2))
723 /* WFIT instruction trapped */
724 #define IN_WFIT			__vcpu_single_flag(sflags, BIT(3))
725 /* vcpu system registers loaded on physical CPU */
726 #define SYSREGS_ON_CPU		__vcpu_single_flag(sflags, BIT(4))
727 /* Software step state is Active-pending */
728 #define DBG_SS_ACTIVE_PENDING	__vcpu_single_flag(sflags, BIT(5))
729 /* PMUSERENR for the guest EL0 is on physical CPU */
730 #define PMUSERENR_ON_CPU	__vcpu_single_flag(sflags, BIT(6))
731 /* WFI instruction trapped */
732 #define IN_WFI			__vcpu_single_flag(sflags, BIT(7))
733 
734 
735 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
736 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
737 			     sve_ffr_offset((vcpu)->arch.sve_max_vl))
738 
739 #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
740 
741 #define vcpu_sve_state_size(vcpu) ({					\
742 	size_t __size_ret;						\
743 	unsigned int __vcpu_vq;						\
744 									\
745 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
746 		__size_ret = 0;						\
747 	} else {							\
748 		__vcpu_vq = vcpu_sve_max_vq(vcpu);			\
749 		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
750 	}								\
751 									\
752 	__size_ret;							\
753 })
754 
755 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
756 				 KVM_GUESTDBG_USE_SW_BP | \
757 				 KVM_GUESTDBG_USE_HW | \
758 				 KVM_GUESTDBG_SINGLESTEP)
759 
760 #define vcpu_has_sve(vcpu) (system_supports_sve() &&			\
761 			    vcpu_get_flag(vcpu, GUEST_HAS_SVE))
762 
763 #ifdef CONFIG_ARM64_PTR_AUTH
764 #define vcpu_has_ptrauth(vcpu)						\
765 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
766 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
767 	  vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
768 #else
769 #define vcpu_has_ptrauth(vcpu)		false
770 #endif
771 
772 #define vcpu_on_unsupported_cpu(vcpu)					\
773 	vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
774 
775 #define vcpu_set_on_unsupported_cpu(vcpu)				\
776 	vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
777 
778 #define vcpu_clear_on_unsupported_cpu(vcpu)				\
779 	vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
780 
781 #define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
782 
783 /*
784  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
785  * memory backed version of a register, and not the one most recently
786  * accessed by a running VCPU.  For example, for userspace access or
787  * for system registers that are never context switched, but only
788  * emulated.
789  */
790 #define __ctxt_sys_reg(c,r)	(&(c)->sys_regs[(r)])
791 
792 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
793 
794 #define __vcpu_sys_reg(v,r)	(ctxt_sys_reg(&(v)->arch.ctxt, (r)))
795 
796 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
797 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
798 
__vcpu_read_sys_reg_from_cpu(int reg,u64 * val)799 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
800 {
801 	/*
802 	 * *** VHE ONLY ***
803 	 *
804 	 * System registers listed in the switch are not saved on every
805 	 * exit from the guest but are only saved on vcpu_put.
806 	 *
807 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
808 	 * should never be listed below, because the guest cannot modify its
809 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
810 	 * thread when emulating cross-VCPU communication.
811 	 */
812 	if (!has_vhe())
813 		return false;
814 
815 	switch (reg) {
816 	case SCTLR_EL1:		*val = read_sysreg_s(SYS_SCTLR_EL12);	break;
817 	case CPACR_EL1:		*val = read_sysreg_s(SYS_CPACR_EL12);	break;
818 	case TTBR0_EL1:		*val = read_sysreg_s(SYS_TTBR0_EL12);	break;
819 	case TTBR1_EL1:		*val = read_sysreg_s(SYS_TTBR1_EL12);	break;
820 	case TCR_EL1:		*val = read_sysreg_s(SYS_TCR_EL12);	break;
821 	case ESR_EL1:		*val = read_sysreg_s(SYS_ESR_EL12);	break;
822 	case AFSR0_EL1:		*val = read_sysreg_s(SYS_AFSR0_EL12);	break;
823 	case AFSR1_EL1:		*val = read_sysreg_s(SYS_AFSR1_EL12);	break;
824 	case FAR_EL1:		*val = read_sysreg_s(SYS_FAR_EL12);	break;
825 	case MAIR_EL1:		*val = read_sysreg_s(SYS_MAIR_EL12);	break;
826 	case VBAR_EL1:		*val = read_sysreg_s(SYS_VBAR_EL12);	break;
827 	case CONTEXTIDR_EL1:	*val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
828 	case TPIDR_EL0:		*val = read_sysreg_s(SYS_TPIDR_EL0);	break;
829 	case TPIDRRO_EL0:	*val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
830 	case TPIDR_EL1:		*val = read_sysreg_s(SYS_TPIDR_EL1);	break;
831 	case AMAIR_EL1:		*val = read_sysreg_s(SYS_AMAIR_EL12);	break;
832 	case CNTKCTL_EL1:	*val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
833 	case ELR_EL1:		*val = read_sysreg_s(SYS_ELR_EL12);	break;
834 	case PAR_EL1:		*val = read_sysreg_par();		break;
835 	case DACR32_EL2:	*val = read_sysreg_s(SYS_DACR32_EL2);	break;
836 	case IFSR32_EL2:	*val = read_sysreg_s(SYS_IFSR32_EL2);	break;
837 	case DBGVCR32_EL2:	*val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
838 	default:		return false;
839 	}
840 
841 	return true;
842 }
843 
__vcpu_write_sys_reg_to_cpu(u64 val,int reg)844 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
845 {
846 	/*
847 	 * *** VHE ONLY ***
848 	 *
849 	 * System registers listed in the switch are not restored on every
850 	 * entry to the guest but are only restored on vcpu_load.
851 	 *
852 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
853 	 * should never be listed below, because the MPIDR should only be set
854 	 * once, before running the VCPU, and never changed later.
855 	 */
856 	if (!has_vhe())
857 		return false;
858 
859 	switch (reg) {
860 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
861 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
862 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
863 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
864 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
865 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
866 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
867 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
868 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
869 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
870 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
871 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
872 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
873 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
874 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
875 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
876 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
877 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
878 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
879 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
880 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
881 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
882 	default:		return false;
883 	}
884 
885 	return true;
886 }
887 
888 struct kvm_vm_stat {
889 	struct kvm_vm_stat_generic generic;
890 };
891 
892 struct kvm_vcpu_stat {
893 	struct kvm_vcpu_stat_generic generic;
894 	u64 hvc_exit_stat;
895 	u64 wfe_exit_stat;
896 	u64 wfi_exit_stat;
897 	u64 mmio_exit_user;
898 	u64 mmio_exit_kernel;
899 	u64 signal_exits;
900 	u64 exits;
901 };
902 
903 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
904 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
905 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
906 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
907 
908 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
909 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
910 
911 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
912 			      struct kvm_vcpu_events *events);
913 
914 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
915 			      struct kvm_vcpu_events *events);
916 
917 #define KVM_ARCH_WANT_MMU_NOTIFIER
918 
919 void kvm_arm_halt_guest(struct kvm *kvm);
920 void kvm_arm_resume_guest(struct kvm *kvm);
921 
922 #define vcpu_has_run_once(vcpu)	!!rcu_access_pointer((vcpu)->pid)
923 
924 #ifndef __KVM_NVHE_HYPERVISOR__
925 #define kvm_call_hyp_nvhe(f, ...)						\
926 	({								\
927 		struct arm_smccc_res res;				\
928 									\
929 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
930 				  ##__VA_ARGS__, &res);			\
931 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
932 									\
933 		res.a1;							\
934 	})
935 
936 /*
937  * The couple of isb() below are there to guarantee the same behaviour
938  * on VHE as on !VHE, where the eret to EL1 acts as a context
939  * synchronization event.
940  */
941 #define kvm_call_hyp(f, ...)						\
942 	do {								\
943 		if (has_vhe()) {					\
944 			f(__VA_ARGS__);					\
945 			isb();						\
946 		} else {						\
947 			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
948 		}							\
949 	} while(0)
950 
951 #define kvm_call_hyp_ret(f, ...)					\
952 	({								\
953 		typeof(f(__VA_ARGS__)) ret;				\
954 									\
955 		if (has_vhe()) {					\
956 			ret = f(__VA_ARGS__);				\
957 			isb();						\
958 		} else {						\
959 			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
960 		}							\
961 									\
962 		ret;							\
963 	})
964 #else /* __KVM_NVHE_HYPERVISOR__ */
965 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
966 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
967 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
968 #endif /* __KVM_NVHE_HYPERVISOR__ */
969 
970 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
971 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
972 
973 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
974 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
975 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
976 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
977 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
978 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
979 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
980 
981 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
982 
983 int __init kvm_sys_reg_table_init(void);
984 int __init populate_nv_trap_config(void);
985 
986 bool lock_all_vcpus(struct kvm *kvm);
987 void unlock_all_vcpus(struct kvm *kvm);
988 
989 /* MMIO helpers */
990 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
991 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
992 
993 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
994 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
995 
996 /*
997  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
998  * arrived in guest context.  For arm64, any event that arrives while a vCPU is
999  * loaded is considered to be "in guest".
1000  */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)1001 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
1002 {
1003 	return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
1004 }
1005 
1006 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
1007 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
1008 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
1009 
1010 bool kvm_arm_pvtime_supported(void);
1011 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
1012 			    struct kvm_device_attr *attr);
1013 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
1014 			    struct kvm_device_attr *attr);
1015 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
1016 			    struct kvm_device_attr *attr);
1017 
1018 extern unsigned int __ro_after_init kvm_arm_vmid_bits;
1019 int __init kvm_arm_vmid_alloc_init(void);
1020 void __init kvm_arm_vmid_alloc_free(void);
1021 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1022 void kvm_arm_vmid_clear_active(void);
1023 
kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch * vcpu_arch)1024 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
1025 {
1026 	vcpu_arch->steal.base = INVALID_GPA;
1027 }
1028 
kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch * vcpu_arch)1029 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
1030 {
1031 	return (vcpu_arch->steal.base != INVALID_GPA);
1032 }
1033 
1034 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1035 
1036 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
1037 
1038 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
1039 
kvm_init_host_cpu_context(struct kvm_cpu_context * cpu_ctxt)1040 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
1041 {
1042 	/* The host's MPIDR is immutable, so let's set it up at boot time */
1043 	ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
1044 }
1045 
kvm_system_needs_idmapped_vectors(void)1046 static inline bool kvm_system_needs_idmapped_vectors(void)
1047 {
1048 	return cpus_have_const_cap(ARM64_SPECTRE_V3A);
1049 }
1050 
kvm_arch_sync_events(struct kvm * kvm)1051 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)1052 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1053 
1054 void kvm_arm_init_debug(void);
1055 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
1056 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
1057 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
1058 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
1059 
1060 #define kvm_vcpu_os_lock_enabled(vcpu)		\
1061 	(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
1062 
1063 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
1064 			       struct kvm_device_attr *attr);
1065 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
1066 			       struct kvm_device_attr *attr);
1067 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
1068 			       struct kvm_device_attr *attr);
1069 
1070 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1071 			       struct kvm_arm_copy_mte_tags *copy_tags);
1072 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1073 				    struct kvm_arm_counter_offset *offset);
1074 
1075 /* Guest/host FPSIMD coordination helpers */
1076 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
1077 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
1078 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
1079 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
1080 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
1081 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
1082 
kvm_pmu_counter_deferred(struct perf_event_attr * attr)1083 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
1084 {
1085 	return (!has_vhe() && attr->exclude_host);
1086 }
1087 
1088 /* Flags for host debug state */
1089 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
1090 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
1091 
1092 #ifdef CONFIG_KVM
1093 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
1094 void kvm_clr_pmu_events(u32 clr);
1095 bool kvm_set_pmuserenr(u64 val);
1096 #else
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)1097 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
kvm_clr_pmu_events(u32 clr)1098 static inline void kvm_clr_pmu_events(u32 clr) {}
kvm_set_pmuserenr(u64 val)1099 static inline bool kvm_set_pmuserenr(u64 val)
1100 {
1101 	return false;
1102 }
1103 #endif
1104 
1105 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
1106 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
1107 
1108 int __init kvm_set_ipa_limit(void);
1109 
1110 #define __KVM_HAVE_ARCH_VM_ALLOC
1111 struct kvm *kvm_arch_alloc_vm(void);
1112 
1113 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1114 
1115 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1116 
kvm_vm_is_protected(struct kvm * kvm)1117 static inline bool kvm_vm_is_protected(struct kvm *kvm)
1118 {
1119 	return false;
1120 }
1121 
1122 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1123 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1124 
1125 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1126 
1127 #define kvm_has_mte(kvm)					\
1128 	(system_supports_mte() &&				\
1129 	 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1130 
1131 #define kvm_supports_32bit_el0()				\
1132 	(system_supports_32bit_el0() &&				\
1133 	 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1134 
1135 #define kvm_vm_has_ran_once(kvm)					\
1136 	(test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1137 
1138 int kvm_trng_call(struct kvm_vcpu *vcpu);
1139 #ifdef CONFIG_KVM
1140 extern phys_addr_t hyp_mem_base;
1141 extern phys_addr_t hyp_mem_size;
1142 void __init kvm_hyp_reserve(void);
1143 #else
kvm_hyp_reserve(void)1144 static inline void kvm_hyp_reserve(void) { }
1145 #endif
1146 
1147 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1148 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1149 
1150 #endif /* __ARM64_KVM_HOST_H__ */
1151