xref: /openbmc/linux/arch/x86/kvm/xen.h (revision 8733068b9bdbc7a54f02dcc59eb0e4789cd60942)
123200b7aSJoao Martins // SPDX-License-Identifier: GPL-2.0
223200b7aSJoao Martins /*
323200b7aSJoao Martins  * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
423200b7aSJoao Martins  * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
523200b7aSJoao Martins  *
623200b7aSJoao Martins  * KVM Xen emulation
723200b7aSJoao Martins  */
823200b7aSJoao Martins 
923200b7aSJoao Martins #ifndef __ARCH_X86_KVM_XEN_H__
1023200b7aSJoao Martins #define __ARCH_X86_KVM_XEN_H__
1123200b7aSJoao Martins 
12b59b153dSPaolo Bonzini #ifdef CONFIG_KVM_XEN
137d6bbebbSDavid Woodhouse #include <linux/jump_label_ratelimit.h>
147d6bbebbSDavid Woodhouse 
157d6bbebbSDavid Woodhouse extern struct static_key_false_deferred kvm_xen_enabled;
167d6bbebbSDavid Woodhouse 
1740da8ccdSDavid Woodhouse int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
187caf9571SDavid Woodhouse void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
193e324615SDavid Woodhouse int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
203e324615SDavid Woodhouse int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
21a76b9641SJoao Martins int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
22a76b9641SJoao Martins int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
2323200b7aSJoao Martins int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
2478e9878cSDavid Woodhouse int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
25319afe68SPaolo Bonzini void kvm_xen_init_vm(struct kvm *kvm);
267d6bbebbSDavid Woodhouse void kvm_xen_destroy_vm(struct kvm *kvm);
27a795cd43SDavid Woodhouse void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
28*8733068bSDavid Woodhouse int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
2914243b38SDavid Woodhouse 			    struct kvm *kvm);
3014243b38SDavid Woodhouse int kvm_xen_setup_evtchn(struct kvm *kvm,
3114243b38SDavid Woodhouse 			 struct kvm_kernel_irq_routing_entry *e,
3214243b38SDavid Woodhouse 			 const struct kvm_irq_routing_entry *ue);
3314243b38SDavid Woodhouse 
3430b5c851SDavid Woodhouse static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
3530b5c851SDavid Woodhouse {
3630b5c851SDavid Woodhouse 	return static_branch_unlikely(&kvm_xen_enabled.key) &&
3730b5c851SDavid Woodhouse 		kvm->arch.xen_hvm_config.msr;
3830b5c851SDavid Woodhouse }
3930b5c851SDavid Woodhouse 
4023200b7aSJoao Martins static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
4123200b7aSJoao Martins {
427d6bbebbSDavid Woodhouse 	return static_branch_unlikely(&kvm_xen_enabled.key) &&
437d6bbebbSDavid Woodhouse 		(kvm->arch.xen_hvm_config.flags &
447d6bbebbSDavid Woodhouse 		 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
4523200b7aSJoao Martins }
4623200b7aSJoao Martins 
4740da8ccdSDavid Woodhouse static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
4840da8ccdSDavid Woodhouse {
4940da8ccdSDavid Woodhouse 	if (static_branch_unlikely(&kvm_xen_enabled.key) &&
507caf9571SDavid Woodhouse 	    vcpu->arch.xen.vcpu_info_cache.active &&
517caf9571SDavid Woodhouse 	    vcpu->kvm->arch.xen.upcall_vector)
5240da8ccdSDavid Woodhouse 		return __kvm_xen_has_interrupt(vcpu);
5340da8ccdSDavid Woodhouse 
5440da8ccdSDavid Woodhouse 	return 0;
5540da8ccdSDavid Woodhouse }
567caf9571SDavid Woodhouse 
577caf9571SDavid Woodhouse static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
587caf9571SDavid Woodhouse {
597caf9571SDavid Woodhouse 	return static_branch_unlikely(&kvm_xen_enabled.key) &&
607caf9571SDavid Woodhouse 		vcpu->arch.xen.evtchn_pending_sel;
617caf9571SDavid Woodhouse }
627caf9571SDavid Woodhouse 
63b59b153dSPaolo Bonzini #else
64b59b153dSPaolo Bonzini static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
65b59b153dSPaolo Bonzini {
66b59b153dSPaolo Bonzini 	return 1;
67b59b153dSPaolo Bonzini }
68b59b153dSPaolo Bonzini 
69319afe68SPaolo Bonzini static inline void kvm_xen_init_vm(struct kvm *kvm)
70319afe68SPaolo Bonzini {
71319afe68SPaolo Bonzini }
72319afe68SPaolo Bonzini 
73b59b153dSPaolo Bonzini static inline void kvm_xen_destroy_vm(struct kvm *kvm)
74b59b153dSPaolo Bonzini {
75b59b153dSPaolo Bonzini }
76b59b153dSPaolo Bonzini 
77a795cd43SDavid Woodhouse static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
78a795cd43SDavid Woodhouse {
79a795cd43SDavid Woodhouse }
80a795cd43SDavid Woodhouse 
8130b5c851SDavid Woodhouse static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
8230b5c851SDavid Woodhouse {
8330b5c851SDavid Woodhouse 	return false;
8430b5c851SDavid Woodhouse }
8530b5c851SDavid Woodhouse 
86b59b153dSPaolo Bonzini static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
87b59b153dSPaolo Bonzini {
88b59b153dSPaolo Bonzini 	return false;
89b59b153dSPaolo Bonzini }
90b59b153dSPaolo Bonzini 
91b59b153dSPaolo Bonzini static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
92b59b153dSPaolo Bonzini {
93b59b153dSPaolo Bonzini 	return 0;
94b59b153dSPaolo Bonzini }
957caf9571SDavid Woodhouse 
967caf9571SDavid Woodhouse static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
977caf9571SDavid Woodhouse {
987caf9571SDavid Woodhouse }
997caf9571SDavid Woodhouse 
1007caf9571SDavid Woodhouse static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
1017caf9571SDavid Woodhouse {
1027caf9571SDavid Woodhouse 	return false;
1037caf9571SDavid Woodhouse }
104b59b153dSPaolo Bonzini #endif
105b59b153dSPaolo Bonzini 
106b59b153dSPaolo Bonzini int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
1071ea9f2edSDavid Woodhouse 
1081ea9f2edSDavid Woodhouse #include <asm/pvclock-abi.h>
1091ea9f2edSDavid Woodhouse #include <asm/xen/interface.h>
11030b5c851SDavid Woodhouse #include <xen/interface/vcpu.h>
1111ea9f2edSDavid Woodhouse 
11230b5c851SDavid Woodhouse void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
11330b5c851SDavid Woodhouse 
11430b5c851SDavid Woodhouse static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
11530b5c851SDavid Woodhouse {
11630b5c851SDavid Woodhouse 	kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
11730b5c851SDavid Woodhouse }
11830b5c851SDavid Woodhouse 
11930b5c851SDavid Woodhouse static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
12030b5c851SDavid Woodhouse {
12130b5c851SDavid Woodhouse 	/*
12230b5c851SDavid Woodhouse 	 * If the vCPU wasn't preempted but took a normal exit for
12330b5c851SDavid Woodhouse 	 * some reason (hypercalls, I/O, etc.), that is accounted as
12430b5c851SDavid Woodhouse 	 * still RUNSTATE_running, as the VMM is still operating on
12530b5c851SDavid Woodhouse 	 * behalf of the vCPU. Only if the VMM does actually block
12630b5c851SDavid Woodhouse 	 * does it need to enter RUNSTATE_blocked.
12730b5c851SDavid Woodhouse 	 */
12830b5c851SDavid Woodhouse 	if (vcpu->preempted)
12930b5c851SDavid Woodhouse 		kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
13030b5c851SDavid Woodhouse }
13130b5c851SDavid Woodhouse 
13230b5c851SDavid Woodhouse /* 32-bit compatibility definitions, also used natively in 32-bit build */
1331ea9f2edSDavid Woodhouse struct compat_arch_vcpu_info {
1341ea9f2edSDavid Woodhouse 	unsigned int cr2;
1351ea9f2edSDavid Woodhouse 	unsigned int pad[5];
1361ea9f2edSDavid Woodhouse };
1371ea9f2edSDavid Woodhouse 
1381ea9f2edSDavid Woodhouse struct compat_vcpu_info {
1391ea9f2edSDavid Woodhouse 	uint8_t evtchn_upcall_pending;
1401ea9f2edSDavid Woodhouse 	uint8_t evtchn_upcall_mask;
1417137b7aeSSean Christopherson 	uint16_t pad;
1421ea9f2edSDavid Woodhouse 	uint32_t evtchn_pending_sel;
1431ea9f2edSDavid Woodhouse 	struct compat_arch_vcpu_info arch;
1441ea9f2edSDavid Woodhouse 	struct pvclock_vcpu_time_info time;
1451ea9f2edSDavid Woodhouse }; /* 64 bytes (x86) */
1461ea9f2edSDavid Woodhouse 
1471ea9f2edSDavid Woodhouse struct compat_arch_shared_info {
1481ea9f2edSDavid Woodhouse 	unsigned int max_pfn;
1491ea9f2edSDavid Woodhouse 	unsigned int pfn_to_mfn_frame_list_list;
1501ea9f2edSDavid Woodhouse 	unsigned int nmi_reason;
1511ea9f2edSDavid Woodhouse 	unsigned int p2m_cr3;
1521ea9f2edSDavid Woodhouse 	unsigned int p2m_vaddr;
1531ea9f2edSDavid Woodhouse 	unsigned int p2m_generation;
1541ea9f2edSDavid Woodhouse 	uint32_t wc_sec_hi;
1551ea9f2edSDavid Woodhouse };
1561ea9f2edSDavid Woodhouse 
1571ea9f2edSDavid Woodhouse struct compat_shared_info {
1581ea9f2edSDavid Woodhouse 	struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
1591ea9f2edSDavid Woodhouse 	uint32_t evtchn_pending[32];
1601ea9f2edSDavid Woodhouse 	uint32_t evtchn_mask[32];
1611ea9f2edSDavid Woodhouse 	struct pvclock_wall_clock wc;
1621ea9f2edSDavid Woodhouse 	struct compat_arch_shared_info arch;
1631ea9f2edSDavid Woodhouse };
1641ea9f2edSDavid Woodhouse 
16514243b38SDavid Woodhouse #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 *				\
16614243b38SDavid Woodhouse 				      sizeof_field(struct compat_shared_info, \
16714243b38SDavid Woodhouse 						   evtchn_pending))
16830b5c851SDavid Woodhouse struct compat_vcpu_runstate_info {
16930b5c851SDavid Woodhouse     int state;
17030b5c851SDavid Woodhouse     uint64_t state_entry_time;
17130b5c851SDavid Woodhouse     uint64_t time[4];
17230b5c851SDavid Woodhouse } __attribute__((packed));
17330b5c851SDavid Woodhouse 
17423200b7aSJoao Martins #endif /* __ARCH_X86_KVM_XEN_H__ */
175