xref: /openbmc/linux/arch/arm64/include/asm/kvm_host.h (revision fbff5606)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/asm/kvm_host.h:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13 
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/maple_tree.h>
20 #include <linux/percpu.h>
21 #include <linux/psci.h>
22 #include <asm/arch_gicv3.h>
23 #include <asm/barrier.h>
24 #include <asm/cpufeature.h>
25 #include <asm/cputype.h>
26 #include <asm/daifflags.h>
27 #include <asm/fpsimd.h>
28 #include <asm/kvm.h>
29 #include <asm/kvm_asm.h>
30 
31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
34 
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
38 
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40 
41 #define KVM_VCPU_MAX_FEATURES 7
42 
43 #define KVM_REQ_SLEEP \
44 	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
47 #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
48 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
49 #define KVM_REQ_RELOAD_PMU	KVM_ARCH_REQ(5)
50 #define KVM_REQ_SUSPEND		KVM_ARCH_REQ(6)
51 
52 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
53 				     KVM_DIRTY_LOG_INITIALLY_SET)
54 
55 #define KVM_HAVE_MMU_RWLOCK
56 
57 /*
58  * Mode of operation configurable with kvm-arm.mode early param.
59  * See Documentation/admin-guide/kernel-parameters.txt for more information.
60  */
61 enum kvm_mode {
62 	KVM_MODE_DEFAULT,
63 	KVM_MODE_PROTECTED,
64 	KVM_MODE_NV,
65 	KVM_MODE_NONE,
66 };
67 #ifdef CONFIG_KVM
68 enum kvm_mode kvm_get_mode(void);
69 #else
70 static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
71 #endif
72 
73 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
74 
75 extern unsigned int __ro_after_init kvm_sve_max_vl;
76 int __init kvm_arm_init_sve(void);
77 
78 u32 __attribute_const__ kvm_target_cpu(void);
79 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
80 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
81 
82 struct kvm_hyp_memcache {
83 	phys_addr_t head;
84 	unsigned long nr_pages;
85 };
86 
87 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
88 				     phys_addr_t *p,
89 				     phys_addr_t (*to_pa)(void *virt))
90 {
91 	*p = mc->head;
92 	mc->head = to_pa(p);
93 	mc->nr_pages++;
94 }
95 
96 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
97 				     void *(*to_va)(phys_addr_t phys))
98 {
99 	phys_addr_t *p = to_va(mc->head);
100 
101 	if (!mc->nr_pages)
102 		return NULL;
103 
104 	mc->head = *p;
105 	mc->nr_pages--;
106 
107 	return p;
108 }
109 
110 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
111 				       unsigned long min_pages,
112 				       void *(*alloc_fn)(void *arg),
113 				       phys_addr_t (*to_pa)(void *virt),
114 				       void *arg)
115 {
116 	while (mc->nr_pages < min_pages) {
117 		phys_addr_t *p = alloc_fn(arg);
118 
119 		if (!p)
120 			return -ENOMEM;
121 		push_hyp_memcache(mc, p, to_pa);
122 	}
123 
124 	return 0;
125 }
126 
127 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
128 				       void (*free_fn)(void *virt, void *arg),
129 				       void *(*to_va)(phys_addr_t phys),
130 				       void *arg)
131 {
132 	while (mc->nr_pages)
133 		free_fn(pop_hyp_memcache(mc, to_va), arg);
134 }
135 
136 void free_hyp_memcache(struct kvm_hyp_memcache *mc);
137 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
138 
139 struct kvm_vmid {
140 	atomic64_t id;
141 };
142 
143 struct kvm_s2_mmu {
144 	struct kvm_vmid vmid;
145 
146 	/*
147 	 * stage2 entry level table
148 	 *
149 	 * Two kvm_s2_mmu structures in the same VM can point to the same
150 	 * pgd here.  This happens when running a guest using a
151 	 * translation regime that isn't affected by its own stage-2
152 	 * translation, such as a non-VHE hypervisor running at vEL2, or
153 	 * for vEL1/EL0 with vHCR_EL2.VM == 0.  In that case, we use the
154 	 * canonical stage-2 page tables.
155 	 */
156 	phys_addr_t	pgd_phys;
157 	struct kvm_pgtable *pgt;
158 
159 	/* The last vcpu id that ran on each physical CPU */
160 	int __percpu *last_vcpu_ran;
161 
162 	struct kvm_arch *arch;
163 };
164 
165 struct kvm_arch_memory_slot {
166 };
167 
168 /**
169  * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
170  *
171  * @std_bmap: Bitmap of standard secure service calls
172  * @std_hyp_bmap: Bitmap of standard hypervisor service calls
173  * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
174  */
175 struct kvm_smccc_features {
176 	unsigned long std_bmap;
177 	unsigned long std_hyp_bmap;
178 	unsigned long vendor_hyp_bmap;
179 };
180 
181 typedef unsigned int pkvm_handle_t;
182 
183 struct kvm_protected_vm {
184 	pkvm_handle_t handle;
185 	struct kvm_hyp_memcache teardown_mc;
186 };
187 
188 struct kvm_arch {
189 	struct kvm_s2_mmu mmu;
190 
191 	/* VTCR_EL2 value for this VM */
192 	u64    vtcr;
193 
194 	/* Interrupt controller */
195 	struct vgic_dist	vgic;
196 
197 	/* Timers */
198 	struct arch_timer_vm_data timer_data;
199 
200 	/* Mandated version of PSCI */
201 	u32 psci_version;
202 
203 	/* Protects VM-scoped configuration data */
204 	struct mutex config_lock;
205 
206 	/*
207 	 * If we encounter a data abort without valid instruction syndrome
208 	 * information, report this to user space.  User space can (and
209 	 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
210 	 * supported.
211 	 */
212 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER	0
213 	/* Memory Tagging Extension enabled for the guest */
214 #define KVM_ARCH_FLAG_MTE_ENABLED			1
215 	/* At least one vCPU has ran in the VM */
216 #define KVM_ARCH_FLAG_HAS_RAN_ONCE			2
217 	/*
218 	 * The following two bits are used to indicate the guest's EL1
219 	 * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
220 	 * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
221 	 * Otherwise, the guest's EL1 register width has not yet been
222 	 * determined yet.
223 	 */
224 #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED		3
225 #define KVM_ARCH_FLAG_EL1_32BIT				4
226 	/* PSCI SYSTEM_SUSPEND enabled for the guest */
227 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED		5
228 	/* VM counter offset */
229 #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET			6
230 	/* Timer PPIs made immutable */
231 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE		7
232 	/* SMCCC filter initialized for the VM */
233 #define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED		8
234 	unsigned long flags;
235 
236 	/*
237 	 * VM-wide PMU filter, implemented as a bitmap and big enough for
238 	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
239 	 */
240 	unsigned long *pmu_filter;
241 	struct arm_pmu *arm_pmu;
242 
243 	cpumask_var_t supported_cpus;
244 
245 	u8 pfr0_csv2;
246 	u8 pfr0_csv3;
247 	struct {
248 		u8 imp:4;
249 		u8 unimp:4;
250 	} dfr0_pmuver;
251 
252 	/* Hypercall features firmware registers' descriptor */
253 	struct kvm_smccc_features smccc_feat;
254 	struct maple_tree smccc_filter;
255 
256 	/*
257 	 * For an untrusted host VM, 'pkvm.handle' is used to lookup
258 	 * the associated pKVM instance in the hypervisor.
259 	 */
260 	struct kvm_protected_vm pkvm;
261 };
262 
263 struct kvm_vcpu_fault_info {
264 	u64 esr_el2;		/* Hyp Syndrom Register */
265 	u64 far_el2;		/* Hyp Fault Address Register */
266 	u64 hpfar_el2;		/* Hyp IPA Fault Address Register */
267 	u64 disr_el1;		/* Deferred [SError] Status Register */
268 };
269 
270 enum vcpu_sysreg {
271 	__INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
272 	MPIDR_EL1,	/* MultiProcessor Affinity Register */
273 	CLIDR_EL1,	/* Cache Level ID Register */
274 	CSSELR_EL1,	/* Cache Size Selection Register */
275 	SCTLR_EL1,	/* System Control Register */
276 	ACTLR_EL1,	/* Auxiliary Control Register */
277 	CPACR_EL1,	/* Coprocessor Access Control */
278 	ZCR_EL1,	/* SVE Control */
279 	TTBR0_EL1,	/* Translation Table Base Register 0 */
280 	TTBR1_EL1,	/* Translation Table Base Register 1 */
281 	TCR_EL1,	/* Translation Control Register */
282 	TCR2_EL1,	/* Extended Translation Control Register */
283 	ESR_EL1,	/* Exception Syndrome Register */
284 	AFSR0_EL1,	/* Auxiliary Fault Status Register 0 */
285 	AFSR1_EL1,	/* Auxiliary Fault Status Register 1 */
286 	FAR_EL1,	/* Fault Address Register */
287 	MAIR_EL1,	/* Memory Attribute Indirection Register */
288 	VBAR_EL1,	/* Vector Base Address Register */
289 	CONTEXTIDR_EL1,	/* Context ID Register */
290 	TPIDR_EL0,	/* Thread ID, User R/W */
291 	TPIDRRO_EL0,	/* Thread ID, User R/O */
292 	TPIDR_EL1,	/* Thread ID, Privileged */
293 	AMAIR_EL1,	/* Aux Memory Attribute Indirection Register */
294 	CNTKCTL_EL1,	/* Timer Control Register (EL1) */
295 	PAR_EL1,	/* Physical Address Register */
296 	MDSCR_EL1,	/* Monitor Debug System Control Register */
297 	MDCCINT_EL1,	/* Monitor Debug Comms Channel Interrupt Enable Reg */
298 	OSLSR_EL1,	/* OS Lock Status Register */
299 	DISR_EL1,	/* Deferred Interrupt Status Register */
300 
301 	/* Performance Monitors Registers */
302 	PMCR_EL0,	/* Control Register */
303 	PMSELR_EL0,	/* Event Counter Selection Register */
304 	PMEVCNTR0_EL0,	/* Event Counter Register (0-30) */
305 	PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
306 	PMCCNTR_EL0,	/* Cycle Counter Register */
307 	PMEVTYPER0_EL0,	/* Event Type Register (0-30) */
308 	PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
309 	PMCCFILTR_EL0,	/* Cycle Count Filter Register */
310 	PMCNTENSET_EL0,	/* Count Enable Set Register */
311 	PMINTENSET_EL1,	/* Interrupt Enable Set Register */
312 	PMOVSSET_EL0,	/* Overflow Flag Status Set Register */
313 	PMUSERENR_EL0,	/* User Enable Register */
314 
315 	/* Pointer Authentication Registers in a strict increasing order. */
316 	APIAKEYLO_EL1,
317 	APIAKEYHI_EL1,
318 	APIBKEYLO_EL1,
319 	APIBKEYHI_EL1,
320 	APDAKEYLO_EL1,
321 	APDAKEYHI_EL1,
322 	APDBKEYLO_EL1,
323 	APDBKEYHI_EL1,
324 	APGAKEYLO_EL1,
325 	APGAKEYHI_EL1,
326 
327 	ELR_EL1,
328 	SP_EL1,
329 	SPSR_EL1,
330 
331 	CNTVOFF_EL2,
332 	CNTV_CVAL_EL0,
333 	CNTV_CTL_EL0,
334 	CNTP_CVAL_EL0,
335 	CNTP_CTL_EL0,
336 
337 	/* Memory Tagging Extension registers */
338 	RGSR_EL1,	/* Random Allocation Tag Seed Register */
339 	GCR_EL1,	/* Tag Control Register */
340 	TFSR_EL1,	/* Tag Fault Status Register (EL1) */
341 	TFSRE0_EL1,	/* Tag Fault Status Register (EL0) */
342 
343 	/* 32bit specific registers. */
344 	DACR32_EL2,	/* Domain Access Control Register */
345 	IFSR32_EL2,	/* Instruction Fault Status Register */
346 	FPEXC32_EL2,	/* Floating-Point Exception Control Register */
347 	DBGVCR32_EL2,	/* Debug Vector Catch Register */
348 
349 	/* EL2 registers */
350 	VPIDR_EL2,	/* Virtualization Processor ID Register */
351 	VMPIDR_EL2,	/* Virtualization Multiprocessor ID Register */
352 	SCTLR_EL2,	/* System Control Register (EL2) */
353 	ACTLR_EL2,	/* Auxiliary Control Register (EL2) */
354 	HCR_EL2,	/* Hypervisor Configuration Register */
355 	MDCR_EL2,	/* Monitor Debug Configuration Register (EL2) */
356 	CPTR_EL2,	/* Architectural Feature Trap Register (EL2) */
357 	HSTR_EL2,	/* Hypervisor System Trap Register */
358 	HACR_EL2,	/* Hypervisor Auxiliary Control Register */
359 	TTBR0_EL2,	/* Translation Table Base Register 0 (EL2) */
360 	TTBR1_EL2,	/* Translation Table Base Register 1 (EL2) */
361 	TCR_EL2,	/* Translation Control Register (EL2) */
362 	VTTBR_EL2,	/* Virtualization Translation Table Base Register */
363 	VTCR_EL2,	/* Virtualization Translation Control Register */
364 	SPSR_EL2,	/* EL2 saved program status register */
365 	ELR_EL2,	/* EL2 exception link register */
366 	AFSR0_EL2,	/* Auxiliary Fault Status Register 0 (EL2) */
367 	AFSR1_EL2,	/* Auxiliary Fault Status Register 1 (EL2) */
368 	ESR_EL2,	/* Exception Syndrome Register (EL2) */
369 	FAR_EL2,	/* Fault Address Register (EL2) */
370 	HPFAR_EL2,	/* Hypervisor IPA Fault Address Register */
371 	MAIR_EL2,	/* Memory Attribute Indirection Register (EL2) */
372 	AMAIR_EL2,	/* Auxiliary Memory Attribute Indirection Register (EL2) */
373 	VBAR_EL2,	/* Vector Base Address Register (EL2) */
374 	RVBAR_EL2,	/* Reset Vector Base Address Register */
375 	CONTEXTIDR_EL2,	/* Context ID Register (EL2) */
376 	TPIDR_EL2,	/* EL2 Software Thread ID Register */
377 	CNTHCTL_EL2,	/* Counter-timer Hypervisor Control register */
378 	SP_EL2,		/* EL2 Stack Pointer */
379 	CNTHP_CTL_EL2,
380 	CNTHP_CVAL_EL2,
381 	CNTHV_CTL_EL2,
382 	CNTHV_CVAL_EL2,
383 
384 	NR_SYS_REGS	/* Nothing after this line! */
385 };
386 
387 struct kvm_cpu_context {
388 	struct user_pt_regs regs;	/* sp = sp_el0 */
389 
390 	u64	spsr_abt;
391 	u64	spsr_und;
392 	u64	spsr_irq;
393 	u64	spsr_fiq;
394 
395 	struct user_fpsimd_state fp_regs;
396 
397 	u64 sys_regs[NR_SYS_REGS];
398 
399 	struct kvm_vcpu *__hyp_running_vcpu;
400 };
401 
402 struct kvm_host_data {
403 	struct kvm_cpu_context host_ctxt;
404 };
405 
406 struct kvm_host_psci_config {
407 	/* PSCI version used by host. */
408 	u32 version;
409 
410 	/* Function IDs used by host if version is v0.1. */
411 	struct psci_0_1_function_ids function_ids_0_1;
412 
413 	bool psci_0_1_cpu_suspend_implemented;
414 	bool psci_0_1_cpu_on_implemented;
415 	bool psci_0_1_cpu_off_implemented;
416 	bool psci_0_1_migrate_implemented;
417 };
418 
419 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
420 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
421 
422 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
423 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
424 
425 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
426 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
427 
428 struct vcpu_reset_state {
429 	unsigned long	pc;
430 	unsigned long	r0;
431 	bool		be;
432 	bool		reset;
433 };
434 
435 struct kvm_vcpu_arch {
436 	struct kvm_cpu_context ctxt;
437 
438 	/*
439 	 * Guest floating point state
440 	 *
441 	 * The architecture has two main floating point extensions,
442 	 * the original FPSIMD and SVE.  These have overlapping
443 	 * register views, with the FPSIMD V registers occupying the
444 	 * low 128 bits of the SVE Z registers.  When the core
445 	 * floating point code saves the register state of a task it
446 	 * records which view it saved in fp_type.
447 	 */
448 	void *sve_state;
449 	enum fp_type fp_type;
450 	unsigned int sve_max_vl;
451 	u64 svcr;
452 
453 	/* Stage 2 paging state used by the hardware on next switch */
454 	struct kvm_s2_mmu *hw_mmu;
455 
456 	/* Values of trap registers for the guest. */
457 	u64 hcr_el2;
458 	u64 mdcr_el2;
459 	u64 cptr_el2;
460 
461 	/* Values of trap registers for the host before guest entry. */
462 	u64 mdcr_el2_host;
463 
464 	/* Exception Information */
465 	struct kvm_vcpu_fault_info fault;
466 
467 	/* Ownership of the FP regs */
468 	enum {
469 		FP_STATE_FREE,
470 		FP_STATE_HOST_OWNED,
471 		FP_STATE_GUEST_OWNED,
472 	} fp_state;
473 
474 	/* Configuration flags, set once and for all before the vcpu can run */
475 	u8 cflags;
476 
477 	/* Input flags to the hypervisor code, potentially cleared after use */
478 	u8 iflags;
479 
480 	/* State flags for kernel bookkeeping, unused by the hypervisor code */
481 	u8 sflags;
482 
483 	/*
484 	 * Don't run the guest (internal implementation need).
485 	 *
486 	 * Contrary to the flags above, this is set/cleared outside of
487 	 * a vcpu context, and thus cannot be mixed with the flags
488 	 * themselves (or the flag accesses need to be made atomic).
489 	 */
490 	bool pause;
491 
492 	/*
493 	 * We maintain more than a single set of debug registers to support
494 	 * debugging the guest from the host and to maintain separate host and
495 	 * guest state during world switches. vcpu_debug_state are the debug
496 	 * registers of the vcpu as the guest sees them.  host_debug_state are
497 	 * the host registers which are saved and restored during
498 	 * world switches. external_debug_state contains the debug
499 	 * values we want to debug the guest. This is set via the
500 	 * KVM_SET_GUEST_DEBUG ioctl.
501 	 *
502 	 * debug_ptr points to the set of debug registers that should be loaded
503 	 * onto the hardware when running the guest.
504 	 */
505 	struct kvm_guest_debug_arch *debug_ptr;
506 	struct kvm_guest_debug_arch vcpu_debug_state;
507 	struct kvm_guest_debug_arch external_debug_state;
508 
509 	struct user_fpsimd_state *host_fpsimd_state;	/* hyp VA */
510 	struct task_struct *parent_task;
511 
512 	struct {
513 		/* {Break,watch}point registers */
514 		struct kvm_guest_debug_arch regs;
515 		/* Statistical profiling extension */
516 		u64 pmscr_el1;
517 		/* Self-hosted trace */
518 		u64 trfcr_el1;
519 	} host_debug_state;
520 
521 	/* VGIC state */
522 	struct vgic_cpu vgic_cpu;
523 	struct arch_timer_cpu timer_cpu;
524 	struct kvm_pmu pmu;
525 
526 	/*
527 	 * Guest registers we preserve during guest debugging.
528 	 *
529 	 * These shadow registers are updated by the kvm_handle_sys_reg
530 	 * trap handler if the guest accesses or updates them while we
531 	 * are using guest debug.
532 	 */
533 	struct {
534 		u32	mdscr_el1;
535 		bool	pstate_ss;
536 	} guest_debug_preserved;
537 
538 	/* vcpu power state */
539 	struct kvm_mp_state mp_state;
540 	spinlock_t mp_state_lock;
541 
542 	/* Cache some mmu pages needed inside spinlock regions */
543 	struct kvm_mmu_memory_cache mmu_page_cache;
544 
545 	/* Target CPU and feature flags */
546 	int target;
547 	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
548 
549 	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
550 	u64 vsesr_el2;
551 
552 	/* Additional reset state */
553 	struct vcpu_reset_state	reset_state;
554 
555 	/* Guest PV state */
556 	struct {
557 		u64 last_steal;
558 		gpa_t base;
559 	} steal;
560 
561 	/* Per-vcpu CCSIDR override or NULL */
562 	u32 *ccsidr;
563 };
564 
565 /*
566  * Each 'flag' is composed of a comma-separated triplet:
567  *
568  * - the flag-set it belongs to in the vcpu->arch structure
569  * - the value for that flag
570  * - the mask for that flag
571  *
572  *  __vcpu_single_flag() builds such a triplet for a single-bit flag.
573  * unpack_vcpu_flag() extract the flag value from the triplet for
574  * direct use outside of the flag accessors.
575  */
576 #define __vcpu_single_flag(_set, _f)	_set, (_f), (_f)
577 
578 #define __unpack_flag(_set, _f, _m)	_f
579 #define unpack_vcpu_flag(...)		__unpack_flag(__VA_ARGS__)
580 
581 #define __build_check_flag(v, flagset, f, m)			\
582 	do {							\
583 		typeof(v->arch.flagset) *_fset;			\
584 								\
585 		/* Check that the flags fit in the mask */	\
586 		BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m)));	\
587 		/* Check that the flags fit in the type */	\
588 		BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m));	\
589 	} while (0)
590 
591 #define __vcpu_get_flag(v, flagset, f, m)			\
592 	({							\
593 		__build_check_flag(v, flagset, f, m);		\
594 								\
595 		READ_ONCE(v->arch.flagset) & (m);		\
596 	})
597 
598 /*
599  * Note that the set/clear accessors must be preempt-safe in order to
600  * avoid nesting them with load/put which also manipulate flags...
601  */
602 #ifdef __KVM_NVHE_HYPERVISOR__
603 /* the nVHE hypervisor is always non-preemptible */
604 #define __vcpu_flags_preempt_disable()
605 #define __vcpu_flags_preempt_enable()
606 #else
607 #define __vcpu_flags_preempt_disable()	preempt_disable()
608 #define __vcpu_flags_preempt_enable()	preempt_enable()
609 #endif
610 
611 #define __vcpu_set_flag(v, flagset, f, m)			\
612 	do {							\
613 		typeof(v->arch.flagset) *fset;			\
614 								\
615 		__build_check_flag(v, flagset, f, m);		\
616 								\
617 		fset = &v->arch.flagset;			\
618 		__vcpu_flags_preempt_disable();			\
619 		if (HWEIGHT(m) > 1)				\
620 			*fset &= ~(m);				\
621 		*fset |= (f);					\
622 		__vcpu_flags_preempt_enable();			\
623 	} while (0)
624 
625 #define __vcpu_clear_flag(v, flagset, f, m)			\
626 	do {							\
627 		typeof(v->arch.flagset) *fset;			\
628 								\
629 		__build_check_flag(v, flagset, f, m);		\
630 								\
631 		fset = &v->arch.flagset;			\
632 		__vcpu_flags_preempt_disable();			\
633 		*fset &= ~(m);					\
634 		__vcpu_flags_preempt_enable();			\
635 	} while (0)
636 
637 #define vcpu_get_flag(v, ...)	__vcpu_get_flag((v), __VA_ARGS__)
638 #define vcpu_set_flag(v, ...)	__vcpu_set_flag((v), __VA_ARGS__)
639 #define vcpu_clear_flag(v, ...)	__vcpu_clear_flag((v), __VA_ARGS__)
640 
641 /* SVE exposed to guest */
642 #define GUEST_HAS_SVE		__vcpu_single_flag(cflags, BIT(0))
643 /* SVE config completed */
644 #define VCPU_SVE_FINALIZED	__vcpu_single_flag(cflags, BIT(1))
645 /* PTRAUTH exposed to guest */
646 #define GUEST_HAS_PTRAUTH	__vcpu_single_flag(cflags, BIT(2))
647 
648 /* Exception pending */
649 #define PENDING_EXCEPTION	__vcpu_single_flag(iflags, BIT(0))
650 /*
651  * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
652  * be set together with an exception...
653  */
654 #define INCREMENT_PC		__vcpu_single_flag(iflags, BIT(1))
655 /* Target EL/MODE (not a single flag, but let's abuse the macro) */
656 #define EXCEPT_MASK		__vcpu_single_flag(iflags, GENMASK(3, 1))
657 
658 /* Helpers to encode exceptions with minimum fuss */
659 #define __EXCEPT_MASK_VAL	unpack_vcpu_flag(EXCEPT_MASK)
660 #define __EXCEPT_SHIFT		__builtin_ctzl(__EXCEPT_MASK_VAL)
661 #define __vcpu_except_flags(_f)	iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
662 
663 /*
664  * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
665  * values:
666  *
667  * For AArch32 EL1:
668  */
669 #define EXCEPT_AA32_UND		__vcpu_except_flags(0)
670 #define EXCEPT_AA32_IABT	__vcpu_except_flags(1)
671 #define EXCEPT_AA32_DABT	__vcpu_except_flags(2)
672 /* For AArch64: */
673 #define EXCEPT_AA64_EL1_SYNC	__vcpu_except_flags(0)
674 #define EXCEPT_AA64_EL1_IRQ	__vcpu_except_flags(1)
675 #define EXCEPT_AA64_EL1_FIQ	__vcpu_except_flags(2)
676 #define EXCEPT_AA64_EL1_SERR	__vcpu_except_flags(3)
677 /* For AArch64 with NV: */
678 #define EXCEPT_AA64_EL2_SYNC	__vcpu_except_flags(4)
679 #define EXCEPT_AA64_EL2_IRQ	__vcpu_except_flags(5)
680 #define EXCEPT_AA64_EL2_FIQ	__vcpu_except_flags(6)
681 #define EXCEPT_AA64_EL2_SERR	__vcpu_except_flags(7)
682 /* Guest debug is live */
683 #define DEBUG_DIRTY		__vcpu_single_flag(iflags, BIT(4))
684 /* Save SPE context if active  */
685 #define DEBUG_STATE_SAVE_SPE	__vcpu_single_flag(iflags, BIT(5))
686 /* Save TRBE context if active  */
687 #define DEBUG_STATE_SAVE_TRBE	__vcpu_single_flag(iflags, BIT(6))
688 /* vcpu running in HYP context */
689 #define VCPU_HYP_CONTEXT	__vcpu_single_flag(iflags, BIT(7))
690 
691 /* SVE enabled for host EL0 */
692 #define HOST_SVE_ENABLED	__vcpu_single_flag(sflags, BIT(0))
693 /* SME enabled for EL0 */
694 #define HOST_SME_ENABLED	__vcpu_single_flag(sflags, BIT(1))
695 /* Physical CPU not in supported_cpus */
696 #define ON_UNSUPPORTED_CPU	__vcpu_single_flag(sflags, BIT(2))
697 /* WFIT instruction trapped */
698 #define IN_WFIT			__vcpu_single_flag(sflags, BIT(3))
699 /* vcpu system registers loaded on physical CPU */
700 #define SYSREGS_ON_CPU		__vcpu_single_flag(sflags, BIT(4))
701 /* Software step state is Active-pending */
702 #define DBG_SS_ACTIVE_PENDING	__vcpu_single_flag(sflags, BIT(5))
703 
704 
705 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
706 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
707 			     sve_ffr_offset((vcpu)->arch.sve_max_vl))
708 
709 #define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)
710 
711 #define vcpu_sve_state_size(vcpu) ({					\
712 	size_t __size_ret;						\
713 	unsigned int __vcpu_vq;						\
714 									\
715 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
716 		__size_ret = 0;						\
717 	} else {							\
718 		__vcpu_vq = vcpu_sve_max_vq(vcpu);			\
719 		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
720 	}								\
721 									\
722 	__size_ret;							\
723 })
724 
725 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
726 				 KVM_GUESTDBG_USE_SW_BP | \
727 				 KVM_GUESTDBG_USE_HW | \
728 				 KVM_GUESTDBG_SINGLESTEP)
729 
730 #define vcpu_has_sve(vcpu) (system_supports_sve() &&			\
731 			    vcpu_get_flag(vcpu, GUEST_HAS_SVE))
732 
733 #ifdef CONFIG_ARM64_PTR_AUTH
734 #define vcpu_has_ptrauth(vcpu)						\
735 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
736 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
737 	  vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
738 #else
739 #define vcpu_has_ptrauth(vcpu)		false
740 #endif
741 
742 #define vcpu_on_unsupported_cpu(vcpu)					\
743 	vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
744 
745 #define vcpu_set_on_unsupported_cpu(vcpu)				\
746 	vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
747 
748 #define vcpu_clear_on_unsupported_cpu(vcpu)				\
749 	vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
750 
751 #define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
752 
753 /*
754  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
755  * memory backed version of a register, and not the one most recently
756  * accessed by a running VCPU.  For example, for userspace access or
757  * for system registers that are never context switched, but only
758  * emulated.
759  */
760 #define __ctxt_sys_reg(c,r)	(&(c)->sys_regs[(r)])
761 
762 #define ctxt_sys_reg(c,r)	(*__ctxt_sys_reg(c,r))
763 
764 #define __vcpu_sys_reg(v,r)	(ctxt_sys_reg(&(v)->arch.ctxt, (r)))
765 
766 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
767 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
768 
769 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
770 {
771 	/*
772 	 * *** VHE ONLY ***
773 	 *
774 	 * System registers listed in the switch are not saved on every
775 	 * exit from the guest but are only saved on vcpu_put.
776 	 *
777 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
778 	 * should never be listed below, because the guest cannot modify its
779 	 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
780 	 * thread when emulating cross-VCPU communication.
781 	 */
782 	if (!has_vhe())
783 		return false;
784 
785 	switch (reg) {
786 	case SCTLR_EL1:		*val = read_sysreg_s(SYS_SCTLR_EL12);	break;
787 	case CPACR_EL1:		*val = read_sysreg_s(SYS_CPACR_EL12);	break;
788 	case TTBR0_EL1:		*val = read_sysreg_s(SYS_TTBR0_EL12);	break;
789 	case TTBR1_EL1:		*val = read_sysreg_s(SYS_TTBR1_EL12);	break;
790 	case TCR_EL1:		*val = read_sysreg_s(SYS_TCR_EL12);	break;
791 	case ESR_EL1:		*val = read_sysreg_s(SYS_ESR_EL12);	break;
792 	case AFSR0_EL1:		*val = read_sysreg_s(SYS_AFSR0_EL12);	break;
793 	case AFSR1_EL1:		*val = read_sysreg_s(SYS_AFSR1_EL12);	break;
794 	case FAR_EL1:		*val = read_sysreg_s(SYS_FAR_EL12);	break;
795 	case MAIR_EL1:		*val = read_sysreg_s(SYS_MAIR_EL12);	break;
796 	case VBAR_EL1:		*val = read_sysreg_s(SYS_VBAR_EL12);	break;
797 	case CONTEXTIDR_EL1:	*val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
798 	case TPIDR_EL0:		*val = read_sysreg_s(SYS_TPIDR_EL0);	break;
799 	case TPIDRRO_EL0:	*val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
800 	case TPIDR_EL1:		*val = read_sysreg_s(SYS_TPIDR_EL1);	break;
801 	case AMAIR_EL1:		*val = read_sysreg_s(SYS_AMAIR_EL12);	break;
802 	case CNTKCTL_EL1:	*val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
803 	case ELR_EL1:		*val = read_sysreg_s(SYS_ELR_EL12);	break;
804 	case PAR_EL1:		*val = read_sysreg_par();		break;
805 	case DACR32_EL2:	*val = read_sysreg_s(SYS_DACR32_EL2);	break;
806 	case IFSR32_EL2:	*val = read_sysreg_s(SYS_IFSR32_EL2);	break;
807 	case DBGVCR32_EL2:	*val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
808 	default:		return false;
809 	}
810 
811 	return true;
812 }
813 
814 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
815 {
816 	/*
817 	 * *** VHE ONLY ***
818 	 *
819 	 * System registers listed in the switch are not restored on every
820 	 * entry to the guest but are only restored on vcpu_load.
821 	 *
822 	 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
823 	 * should never be listed below, because the MPIDR should only be set
824 	 * once, before running the VCPU, and never changed later.
825 	 */
826 	if (!has_vhe())
827 		return false;
828 
829 	switch (reg) {
830 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
831 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
832 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
833 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
834 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
835 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
836 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
837 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
838 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
839 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
840 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
841 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
842 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
843 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
844 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
845 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
846 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
847 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
848 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
849 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
850 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
851 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
852 	default:		return false;
853 	}
854 
855 	return true;
856 }
857 
858 struct kvm_vm_stat {
859 	struct kvm_vm_stat_generic generic;
860 };
861 
862 struct kvm_vcpu_stat {
863 	struct kvm_vcpu_stat_generic generic;
864 	u64 hvc_exit_stat;
865 	u64 wfe_exit_stat;
866 	u64 wfi_exit_stat;
867 	u64 mmio_exit_user;
868 	u64 mmio_exit_kernel;
869 	u64 signal_exits;
870 	u64 exits;
871 };
872 
873 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
874 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
875 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
876 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
877 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
878 
879 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
880 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
881 
882 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
883 			      struct kvm_vcpu_events *events);
884 
885 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
886 			      struct kvm_vcpu_events *events);
887 
888 #define KVM_ARCH_WANT_MMU_NOTIFIER
889 
890 void kvm_arm_halt_guest(struct kvm *kvm);
891 void kvm_arm_resume_guest(struct kvm *kvm);
892 
893 #define vcpu_has_run_once(vcpu)	!!rcu_access_pointer((vcpu)->pid)
894 
895 #ifndef __KVM_NVHE_HYPERVISOR__
896 #define kvm_call_hyp_nvhe(f, ...)						\
897 	({								\
898 		struct arm_smccc_res res;				\
899 									\
900 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
901 				  ##__VA_ARGS__, &res);			\
902 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
903 									\
904 		res.a1;							\
905 	})
906 
907 /*
908  * The couple of isb() below are there to guarantee the same behaviour
909  * on VHE as on !VHE, where the eret to EL1 acts as a context
910  * synchronization event.
911  */
912 #define kvm_call_hyp(f, ...)						\
913 	do {								\
914 		if (has_vhe()) {					\
915 			f(__VA_ARGS__);					\
916 			isb();						\
917 		} else {						\
918 			kvm_call_hyp_nvhe(f, ##__VA_ARGS__);		\
919 		}							\
920 	} while(0)
921 
922 #define kvm_call_hyp_ret(f, ...)					\
923 	({								\
924 		typeof(f(__VA_ARGS__)) ret;				\
925 									\
926 		if (has_vhe()) {					\
927 			ret = f(__VA_ARGS__);				\
928 			isb();						\
929 		} else {						\
930 			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
931 		}							\
932 									\
933 		ret;							\
934 	})
935 #else /* __KVM_NVHE_HYPERVISOR__ */
936 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
937 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
938 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
939 #endif /* __KVM_NVHE_HYPERVISOR__ */
940 
941 void force_vm_exit(const cpumask_t *mask);
942 
943 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
944 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
945 
946 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
947 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
948 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
949 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
950 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
951 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
952 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
953 
954 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
955 
956 int __init kvm_sys_reg_table_init(void);
957 
958 bool lock_all_vcpus(struct kvm *kvm);
959 void unlock_all_vcpus(struct kvm *kvm);
960 
961 /* MMIO helpers */
962 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
963 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
964 
965 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
966 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
967 
968 /*
969  * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
970  * arrived in guest context.  For arm64, any event that arrives while a vCPU is
971  * loaded is considered to be "in guest".
972  */
973 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
974 {
975 	return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
976 }
977 
978 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
979 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
980 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
981 
982 bool kvm_arm_pvtime_supported(void);
983 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
984 			    struct kvm_device_attr *attr);
985 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
986 			    struct kvm_device_attr *attr);
987 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
988 			    struct kvm_device_attr *attr);
989 
990 extern unsigned int __ro_after_init kvm_arm_vmid_bits;
991 int __init kvm_arm_vmid_alloc_init(void);
992 void __init kvm_arm_vmid_alloc_free(void);
993 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
994 void kvm_arm_vmid_clear_active(void);
995 
996 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
997 {
998 	vcpu_arch->steal.base = INVALID_GPA;
999 }
1000 
1001 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
1002 {
1003 	return (vcpu_arch->steal.base != INVALID_GPA);
1004 }
1005 
1006 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1007 
1008 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
1009 
1010 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
1011 
1012 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
1013 {
1014 	/* The host's MPIDR is immutable, so let's set it up at boot time */
1015 	ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
1016 }
1017 
1018 static inline bool kvm_system_needs_idmapped_vectors(void)
1019 {
1020 	return cpus_have_const_cap(ARM64_SPECTRE_V3A);
1021 }
1022 
1023 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
1024 
1025 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1026 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1027 
1028 void kvm_arm_init_debug(void);
1029 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
1030 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
1031 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
1032 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
1033 
1034 #define kvm_vcpu_os_lock_enabled(vcpu)		\
1035 	(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK))
1036 
1037 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
1038 			       struct kvm_device_attr *attr);
1039 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
1040 			       struct kvm_device_attr *attr);
1041 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
1042 			       struct kvm_device_attr *attr);
1043 
1044 int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1045 			       struct kvm_arm_copy_mte_tags *copy_tags);
1046 int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
1047 				    struct kvm_arm_counter_offset *offset);
1048 
1049 /* Guest/host FPSIMD coordination helpers */
1050 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
1051 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
1052 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
1053 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
1054 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
1055 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
1056 
1057 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
1058 {
1059 	return (!has_vhe() && attr->exclude_host);
1060 }
1061 
1062 /* Flags for host debug state */
1063 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
1064 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
1065 
1066 #ifdef CONFIG_KVM
1067 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
1068 void kvm_clr_pmu_events(u32 clr);
1069 #else
1070 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
1071 static inline void kvm_clr_pmu_events(u32 clr) {}
1072 #endif
1073 
1074 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
1075 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
1076 
1077 int __init kvm_set_ipa_limit(void);
1078 
1079 #define __KVM_HAVE_ARCH_VM_ALLOC
1080 struct kvm *kvm_arch_alloc_vm(void);
1081 
1082 static inline bool kvm_vm_is_protected(struct kvm *kvm)
1083 {
1084 	return false;
1085 }
1086 
1087 void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
1088 
1089 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1090 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1091 
1092 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1093 
1094 #define kvm_has_mte(kvm)					\
1095 	(system_supports_mte() &&				\
1096 	 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1097 
1098 #define kvm_supports_32bit_el0()				\
1099 	(system_supports_32bit_el0() &&				\
1100 	 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1101 
1102 #define kvm_vm_has_ran_once(kvm)					\
1103 	(test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
1104 
1105 int kvm_trng_call(struct kvm_vcpu *vcpu);
1106 #ifdef CONFIG_KVM
1107 extern phys_addr_t hyp_mem_base;
1108 extern phys_addr_t hyp_mem_size;
1109 void __init kvm_hyp_reserve(void);
1110 #else
1111 static inline void kvm_hyp_reserve(void) { }
1112 #endif
1113 
1114 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1115 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1116 
1117 #endif /* __ARM64_KVM_HOST_H__ */
1118