123200b7aSJoao Martins // SPDX-License-Identifier: GPL-2.0
223200b7aSJoao Martins /*
323200b7aSJoao Martins * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
423200b7aSJoao Martins * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
523200b7aSJoao Martins *
623200b7aSJoao Martins * KVM Xen emulation
723200b7aSJoao Martins */
823200b7aSJoao Martins
923200b7aSJoao Martins #ifndef __ARCH_X86_KVM_XEN_H__
1023200b7aSJoao Martins #define __ARCH_X86_KVM_XEN_H__
1123200b7aSJoao Martins
12f422f853SPaul Durrant #include <asm/xen/hypervisor.h>
13f422f853SPaul Durrant
14b59b153dSPaolo Bonzini #ifdef CONFIG_KVM_XEN
157d6bbebbSDavid Woodhouse #include <linux/jump_label_ratelimit.h>
167d6bbebbSDavid Woodhouse
177d6bbebbSDavid Woodhouse extern struct static_key_false_deferred kvm_xen_enabled;
187d6bbebbSDavid Woodhouse
1940da8ccdSDavid Woodhouse int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
207caf9571SDavid Woodhouse void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
21*28f71967SDavid Woodhouse void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
223e324615SDavid Woodhouse int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
233e324615SDavid Woodhouse int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
24a76b9641SJoao Martins int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
25a76b9641SJoao Martins int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
2635025735SDavid Woodhouse int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt);
2723200b7aSJoao Martins int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
2878e9878cSDavid Woodhouse int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
29319afe68SPaolo Bonzini void kvm_xen_init_vm(struct kvm *kvm);
307d6bbebbSDavid Woodhouse void kvm_xen_destroy_vm(struct kvm *kvm);
31942c2490SDavid Woodhouse void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu);
32a795cd43SDavid Woodhouse void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
338733068bSDavid Woodhouse int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
3414243b38SDavid Woodhouse struct kvm *kvm);
3514243b38SDavid Woodhouse int kvm_xen_setup_evtchn(struct kvm *kvm,
3614243b38SDavid Woodhouse struct kvm_kernel_irq_routing_entry *e,
3714243b38SDavid Woodhouse const struct kvm_irq_routing_entry *ue);
38f422f853SPaul Durrant void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu);
3914243b38SDavid Woodhouse
kvm_xen_sw_enable_lapic(struct kvm_vcpu * vcpu)40*28f71967SDavid Woodhouse static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
41*28f71967SDavid Woodhouse {
42*28f71967SDavid Woodhouse /*
43*28f71967SDavid Woodhouse * The local APIC is being enabled. If the per-vCPU upcall vector is
44*28f71967SDavid Woodhouse * set and the vCPU's evtchn_upcall_pending flag is set, inject the
45*28f71967SDavid Woodhouse * interrupt.
46*28f71967SDavid Woodhouse */
47*28f71967SDavid Woodhouse if (static_branch_unlikely(&kvm_xen_enabled.key) &&
48*28f71967SDavid Woodhouse vcpu->arch.xen.vcpu_info_cache.active &&
49*28f71967SDavid Woodhouse vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
50*28f71967SDavid Woodhouse kvm_xen_inject_vcpu_vector(vcpu);
51*28f71967SDavid Woodhouse }
52*28f71967SDavid Woodhouse
kvm_xen_msr_enabled(struct kvm * kvm)5330b5c851SDavid Woodhouse static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
5430b5c851SDavid Woodhouse {
5530b5c851SDavid Woodhouse return static_branch_unlikely(&kvm_xen_enabled.key) &&
5630b5c851SDavid Woodhouse kvm->arch.xen_hvm_config.msr;
5730b5c851SDavid Woodhouse }
5830b5c851SDavid Woodhouse
kvm_xen_hypercall_enabled(struct kvm * kvm)5923200b7aSJoao Martins static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
6023200b7aSJoao Martins {
617d6bbebbSDavid Woodhouse return static_branch_unlikely(&kvm_xen_enabled.key) &&
627d6bbebbSDavid Woodhouse (kvm->arch.xen_hvm_config.flags &
637d6bbebbSDavid Woodhouse KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
6423200b7aSJoao Martins }
6523200b7aSJoao Martins
kvm_xen_has_interrupt(struct kvm_vcpu * vcpu)6640da8ccdSDavid Woodhouse static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
6740da8ccdSDavid Woodhouse {
6840da8ccdSDavid Woodhouse if (static_branch_unlikely(&kvm_xen_enabled.key) &&
697caf9571SDavid Woodhouse vcpu->arch.xen.vcpu_info_cache.active &&
707caf9571SDavid Woodhouse vcpu->kvm->arch.xen.upcall_vector)
7140da8ccdSDavid Woodhouse return __kvm_xen_has_interrupt(vcpu);
7240da8ccdSDavid Woodhouse
7340da8ccdSDavid Woodhouse return 0;
7440da8ccdSDavid Woodhouse }
757caf9571SDavid Woodhouse
kvm_xen_has_pending_events(struct kvm_vcpu * vcpu)767caf9571SDavid Woodhouse static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
777caf9571SDavid Woodhouse {
787caf9571SDavid Woodhouse return static_branch_unlikely(&kvm_xen_enabled.key) &&
797caf9571SDavid Woodhouse vcpu->arch.xen.evtchn_pending_sel;
807caf9571SDavid Woodhouse }
817caf9571SDavid Woodhouse
kvm_xen_timer_enabled(struct kvm_vcpu * vcpu)8253639526SJoao Martins static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
8353639526SJoao Martins {
8453639526SJoao Martins return !!vcpu->arch.xen.timer_virq;
8553639526SJoao Martins }
8653639526SJoao Martins
kvm_xen_has_pending_timer(struct kvm_vcpu * vcpu)8753639526SJoao Martins static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
8853639526SJoao Martins {
8953639526SJoao Martins if (kvm_xen_hypercall_enabled(vcpu->kvm) && kvm_xen_timer_enabled(vcpu))
9053639526SJoao Martins return atomic_read(&vcpu->arch.xen.timer_pending);
9153639526SJoao Martins
9253639526SJoao Martins return 0;
9353639526SJoao Martins }
9453639526SJoao Martins
9553639526SJoao Martins void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu);
96b59b153dSPaolo Bonzini #else
kvm_xen_write_hypercall_page(struct kvm_vcpu * vcpu,u64 data)97b59b153dSPaolo Bonzini static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
98b59b153dSPaolo Bonzini {
99b59b153dSPaolo Bonzini return 1;
100b59b153dSPaolo Bonzini }
101b59b153dSPaolo Bonzini
kvm_xen_init_vm(struct kvm * kvm)102319afe68SPaolo Bonzini static inline void kvm_xen_init_vm(struct kvm *kvm)
103319afe68SPaolo Bonzini {
104319afe68SPaolo Bonzini }
105319afe68SPaolo Bonzini
kvm_xen_destroy_vm(struct kvm * kvm)106b59b153dSPaolo Bonzini static inline void kvm_xen_destroy_vm(struct kvm *kvm)
107b59b153dSPaolo Bonzini {
108b59b153dSPaolo Bonzini }
109b59b153dSPaolo Bonzini
kvm_xen_init_vcpu(struct kvm_vcpu * vcpu)110942c2490SDavid Woodhouse static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
111942c2490SDavid Woodhouse {
112942c2490SDavid Woodhouse }
113942c2490SDavid Woodhouse
kvm_xen_destroy_vcpu(struct kvm_vcpu * vcpu)114a795cd43SDavid Woodhouse static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
115a795cd43SDavid Woodhouse {
116a795cd43SDavid Woodhouse }
117a795cd43SDavid Woodhouse
kvm_xen_sw_enable_lapic(struct kvm_vcpu * vcpu)118*28f71967SDavid Woodhouse static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
119*28f71967SDavid Woodhouse {
120*28f71967SDavid Woodhouse }
121*28f71967SDavid Woodhouse
kvm_xen_msr_enabled(struct kvm * kvm)12230b5c851SDavid Woodhouse static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
12330b5c851SDavid Woodhouse {
12430b5c851SDavid Woodhouse return false;
12530b5c851SDavid Woodhouse }
12630b5c851SDavid Woodhouse
kvm_xen_hypercall_enabled(struct kvm * kvm)127b59b153dSPaolo Bonzini static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
128b59b153dSPaolo Bonzini {
129b59b153dSPaolo Bonzini return false;
130b59b153dSPaolo Bonzini }
131b59b153dSPaolo Bonzini
kvm_xen_has_interrupt(struct kvm_vcpu * vcpu)132b59b153dSPaolo Bonzini static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
133b59b153dSPaolo Bonzini {
134b59b153dSPaolo Bonzini return 0;
135b59b153dSPaolo Bonzini }
1367caf9571SDavid Woodhouse
kvm_xen_inject_pending_events(struct kvm_vcpu * vcpu)1377caf9571SDavid Woodhouse static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu)
1387caf9571SDavid Woodhouse {
1397caf9571SDavid Woodhouse }
1407caf9571SDavid Woodhouse
kvm_xen_has_pending_events(struct kvm_vcpu * vcpu)1417caf9571SDavid Woodhouse static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu)
1427caf9571SDavid Woodhouse {
1437caf9571SDavid Woodhouse return false;
1447caf9571SDavid Woodhouse }
14553639526SJoao Martins
kvm_xen_has_pending_timer(struct kvm_vcpu * vcpu)14653639526SJoao Martins static inline int kvm_xen_has_pending_timer(struct kvm_vcpu *vcpu)
14753639526SJoao Martins {
14853639526SJoao Martins return 0;
14953639526SJoao Martins }
15053639526SJoao Martins
kvm_xen_inject_timer_irqs(struct kvm_vcpu * vcpu)15153639526SJoao Martins static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
15253639526SJoao Martins {
15353639526SJoao Martins }
15453639526SJoao Martins
kvm_xen_timer_enabled(struct kvm_vcpu * vcpu)15553639526SJoao Martins static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
15653639526SJoao Martins {
15753639526SJoao Martins return false;
15853639526SJoao Martins }
159f422f853SPaul Durrant
kvm_xen_update_tsc_info(struct kvm_vcpu * vcpu)160f422f853SPaul Durrant static inline void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu)
161f422f853SPaul Durrant {
162f422f853SPaul Durrant }
163b59b153dSPaolo Bonzini #endif
164b59b153dSPaolo Bonzini
165b59b153dSPaolo Bonzini int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
1661ea9f2edSDavid Woodhouse
1671ea9f2edSDavid Woodhouse #include <asm/pvclock-abi.h>
1681ea9f2edSDavid Woodhouse #include <asm/xen/interface.h>
16930b5c851SDavid Woodhouse #include <xen/interface/vcpu.h>
1701ea9f2edSDavid Woodhouse
1715ec3289bSDavid Woodhouse void kvm_xen_update_runstate(struct kvm_vcpu *vcpu, int state);
17230b5c851SDavid Woodhouse
kvm_xen_runstate_set_running(struct kvm_vcpu * vcpu)17330b5c851SDavid Woodhouse static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
17430b5c851SDavid Woodhouse {
1755ec3289bSDavid Woodhouse kvm_xen_update_runstate(vcpu, RUNSTATE_running);
17630b5c851SDavid Woodhouse }
17730b5c851SDavid Woodhouse
kvm_xen_runstate_set_preempted(struct kvm_vcpu * vcpu)17830b5c851SDavid Woodhouse static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
17930b5c851SDavid Woodhouse {
18030b5c851SDavid Woodhouse /*
18130b5c851SDavid Woodhouse * If the vCPU wasn't preempted but took a normal exit for
18230b5c851SDavid Woodhouse * some reason (hypercalls, I/O, etc.), that is accounted as
18330b5c851SDavid Woodhouse * still RUNSTATE_running, as the VMM is still operating on
18430b5c851SDavid Woodhouse * behalf of the vCPU. Only if the VMM does actually block
18530b5c851SDavid Woodhouse * does it need to enter RUNSTATE_blocked.
18630b5c851SDavid Woodhouse */
18754aa83c9SPaolo Bonzini if (WARN_ON_ONCE(!vcpu->preempted))
18854aa83c9SPaolo Bonzini return;
18954aa83c9SPaolo Bonzini
1905ec3289bSDavid Woodhouse kvm_xen_update_runstate(vcpu, RUNSTATE_runnable);
19130b5c851SDavid Woodhouse }
19230b5c851SDavid Woodhouse
19330b5c851SDavid Woodhouse /* 32-bit compatibility definitions, also used natively in 32-bit build */
1941ea9f2edSDavid Woodhouse struct compat_arch_vcpu_info {
1951ea9f2edSDavid Woodhouse unsigned int cr2;
1961ea9f2edSDavid Woodhouse unsigned int pad[5];
1971ea9f2edSDavid Woodhouse };
1981ea9f2edSDavid Woodhouse
1991ea9f2edSDavid Woodhouse struct compat_vcpu_info {
2001ea9f2edSDavid Woodhouse uint8_t evtchn_upcall_pending;
2011ea9f2edSDavid Woodhouse uint8_t evtchn_upcall_mask;
2027137b7aeSSean Christopherson uint16_t pad;
2031ea9f2edSDavid Woodhouse uint32_t evtchn_pending_sel;
2041ea9f2edSDavid Woodhouse struct compat_arch_vcpu_info arch;
2051ea9f2edSDavid Woodhouse struct pvclock_vcpu_time_info time;
2061ea9f2edSDavid Woodhouse }; /* 64 bytes (x86) */
2071ea9f2edSDavid Woodhouse
2081ea9f2edSDavid Woodhouse struct compat_arch_shared_info {
2091ea9f2edSDavid Woodhouse unsigned int max_pfn;
2101ea9f2edSDavid Woodhouse unsigned int pfn_to_mfn_frame_list_list;
2111ea9f2edSDavid Woodhouse unsigned int nmi_reason;
2121ea9f2edSDavid Woodhouse unsigned int p2m_cr3;
2131ea9f2edSDavid Woodhouse unsigned int p2m_vaddr;
2141ea9f2edSDavid Woodhouse unsigned int p2m_generation;
2151ea9f2edSDavid Woodhouse uint32_t wc_sec_hi;
2161ea9f2edSDavid Woodhouse };
2171ea9f2edSDavid Woodhouse
2181ea9f2edSDavid Woodhouse struct compat_shared_info {
2191ea9f2edSDavid Woodhouse struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
2201ea9f2edSDavid Woodhouse uint32_t evtchn_pending[32];
2211ea9f2edSDavid Woodhouse uint32_t evtchn_mask[32];
2221ea9f2edSDavid Woodhouse struct pvclock_wall_clock wc;
2231ea9f2edSDavid Woodhouse struct compat_arch_shared_info arch;
2241ea9f2edSDavid Woodhouse };
2251ea9f2edSDavid Woodhouse
22614243b38SDavid Woodhouse #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
22714243b38SDavid Woodhouse sizeof_field(struct compat_shared_info, \
22814243b38SDavid Woodhouse evtchn_pending))
22930b5c851SDavid Woodhouse struct compat_vcpu_runstate_info {
23030b5c851SDavid Woodhouse int state;
23130b5c851SDavid Woodhouse uint64_t state_entry_time;
23230b5c851SDavid Woodhouse uint64_t time[4];
23330b5c851SDavid Woodhouse } __attribute__((packed));
23430b5c851SDavid Woodhouse
235214b0a88SMetin Kaya struct compat_sched_poll {
236214b0a88SMetin Kaya /* This is actually a guest virtual address which points to ports. */
237214b0a88SMetin Kaya uint32_t ports;
238214b0a88SMetin Kaya unsigned int nr_ports;
239214b0a88SMetin Kaya uint64_t timeout;
240214b0a88SMetin Kaya };
241214b0a88SMetin Kaya
24223200b7aSJoao Martins #endif /* __ARCH_X86_KVM_XEN_H__ */
243