123200b7aSJoao Martins // SPDX-License-Identifier: GPL-2.0 223200b7aSJoao Martins /* 323200b7aSJoao Martins * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. 423200b7aSJoao Martins * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 523200b7aSJoao Martins * 623200b7aSJoao Martins * KVM Xen emulation 723200b7aSJoao Martins */ 823200b7aSJoao Martins 923200b7aSJoao Martins #ifndef __ARCH_X86_KVM_XEN_H__ 1023200b7aSJoao Martins #define __ARCH_X86_KVM_XEN_H__ 1123200b7aSJoao Martins 12b59b153dSPaolo Bonzini #ifdef CONFIG_KVM_XEN 137d6bbebbSDavid Woodhouse #include <linux/jump_label_ratelimit.h> 147d6bbebbSDavid Woodhouse 157d6bbebbSDavid Woodhouse extern struct static_key_false_deferred kvm_xen_enabled; 167d6bbebbSDavid Woodhouse 1740da8ccdSDavid Woodhouse int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); 183e324615SDavid Woodhouse int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); 193e324615SDavid Woodhouse int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); 20a76b9641SJoao Martins int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 21a76b9641SJoao Martins int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 2223200b7aSJoao Martins int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); 2378e9878cSDavid Woodhouse int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); 247d6bbebbSDavid Woodhouse void kvm_xen_destroy_vm(struct kvm *kvm); 2523200b7aSJoao Martins 26*30b5c851SDavid Woodhouse static inline bool kvm_xen_msr_enabled(struct kvm *kvm) 27*30b5c851SDavid Woodhouse { 28*30b5c851SDavid Woodhouse return static_branch_unlikely(&kvm_xen_enabled.key) && 29*30b5c851SDavid Woodhouse kvm->arch.xen_hvm_config.msr; 30*30b5c851SDavid Woodhouse } 31*30b5c851SDavid Woodhouse 3223200b7aSJoao Martins static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) 3323200b7aSJoao Martins { 347d6bbebbSDavid Woodhouse return static_branch_unlikely(&kvm_xen_enabled.key) && 357d6bbebbSDavid Woodhouse (kvm->arch.xen_hvm_config.flags & 367d6bbebbSDavid Woodhouse KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); 3723200b7aSJoao Martins } 3823200b7aSJoao Martins 3940da8ccdSDavid Woodhouse static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) 4040da8ccdSDavid Woodhouse { 4140da8ccdSDavid Woodhouse if (static_branch_unlikely(&kvm_xen_enabled.key) && 4240da8ccdSDavid Woodhouse vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector) 4340da8ccdSDavid Woodhouse return __kvm_xen_has_interrupt(vcpu); 4440da8ccdSDavid Woodhouse 4540da8ccdSDavid Woodhouse return 0; 4640da8ccdSDavid Woodhouse } 47b59b153dSPaolo Bonzini #else 48b59b153dSPaolo Bonzini static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) 49b59b153dSPaolo Bonzini { 50b59b153dSPaolo Bonzini return 1; 51b59b153dSPaolo Bonzini } 52b59b153dSPaolo Bonzini 53b59b153dSPaolo Bonzini static inline void kvm_xen_destroy_vm(struct kvm *kvm) 54b59b153dSPaolo Bonzini { 55b59b153dSPaolo Bonzini } 56b59b153dSPaolo Bonzini 57*30b5c851SDavid Woodhouse static inline bool kvm_xen_msr_enabled(struct kvm *kvm) 58*30b5c851SDavid Woodhouse { 59*30b5c851SDavid Woodhouse return false; 60*30b5c851SDavid Woodhouse } 61*30b5c851SDavid Woodhouse 62b59b153dSPaolo Bonzini static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) 63b59b153dSPaolo Bonzini { 64b59b153dSPaolo Bonzini return false; 65b59b153dSPaolo Bonzini } 66b59b153dSPaolo Bonzini 67b59b153dSPaolo Bonzini static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) 68b59b153dSPaolo Bonzini { 69b59b153dSPaolo Bonzini return 0; 70b59b153dSPaolo Bonzini } 71b59b153dSPaolo Bonzini #endif 72b59b153dSPaolo Bonzini 73b59b153dSPaolo Bonzini int kvm_xen_hypercall(struct kvm_vcpu *vcpu); 741ea9f2edSDavid Woodhouse 751ea9f2edSDavid Woodhouse #include <asm/pvclock-abi.h> 761ea9f2edSDavid Woodhouse #include <asm/xen/interface.h> 77*30b5c851SDavid Woodhouse #include <xen/interface/vcpu.h> 781ea9f2edSDavid Woodhouse 79*30b5c851SDavid Woodhouse void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state); 80*30b5c851SDavid Woodhouse 81*30b5c851SDavid Woodhouse static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) 82*30b5c851SDavid Woodhouse { 83*30b5c851SDavid Woodhouse kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running); 84*30b5c851SDavid Woodhouse } 85*30b5c851SDavid Woodhouse 86*30b5c851SDavid Woodhouse static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) 87*30b5c851SDavid Woodhouse { 88*30b5c851SDavid Woodhouse /* 89*30b5c851SDavid Woodhouse * If the vCPU wasn't preempted but took a normal exit for 90*30b5c851SDavid Woodhouse * some reason (hypercalls, I/O, etc.), that is accounted as 91*30b5c851SDavid Woodhouse * still RUNSTATE_running, as the VMM is still operating on 92*30b5c851SDavid Woodhouse * behalf of the vCPU. Only if the VMM does actually block 93*30b5c851SDavid Woodhouse * does it need to enter RUNSTATE_blocked. 94*30b5c851SDavid Woodhouse */ 95*30b5c851SDavid Woodhouse if (vcpu->preempted) 96*30b5c851SDavid Woodhouse kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); 97*30b5c851SDavid Woodhouse } 98*30b5c851SDavid Woodhouse 99*30b5c851SDavid Woodhouse /* 32-bit compatibility definitions, also used natively in 32-bit build */ 1001ea9f2edSDavid Woodhouse struct compat_arch_vcpu_info { 1011ea9f2edSDavid Woodhouse unsigned int cr2; 1021ea9f2edSDavid Woodhouse unsigned int pad[5]; 1031ea9f2edSDavid Woodhouse }; 1041ea9f2edSDavid Woodhouse 1051ea9f2edSDavid Woodhouse struct compat_vcpu_info { 1061ea9f2edSDavid Woodhouse uint8_t evtchn_upcall_pending; 1071ea9f2edSDavid Woodhouse uint8_t evtchn_upcall_mask; 1087137b7aeSSean Christopherson uint16_t pad; 1091ea9f2edSDavid Woodhouse uint32_t evtchn_pending_sel; 1101ea9f2edSDavid Woodhouse struct compat_arch_vcpu_info arch; 1111ea9f2edSDavid Woodhouse struct pvclock_vcpu_time_info time; 1121ea9f2edSDavid Woodhouse }; /* 64 bytes (x86) */ 1131ea9f2edSDavid Woodhouse 1141ea9f2edSDavid Woodhouse struct compat_arch_shared_info { 1151ea9f2edSDavid Woodhouse unsigned int max_pfn; 1161ea9f2edSDavid Woodhouse unsigned int pfn_to_mfn_frame_list_list; 1171ea9f2edSDavid Woodhouse unsigned int nmi_reason; 1181ea9f2edSDavid Woodhouse unsigned int p2m_cr3; 1191ea9f2edSDavid Woodhouse unsigned int p2m_vaddr; 1201ea9f2edSDavid Woodhouse unsigned int p2m_generation; 1211ea9f2edSDavid Woodhouse uint32_t wc_sec_hi; 1221ea9f2edSDavid Woodhouse }; 1231ea9f2edSDavid Woodhouse 1241ea9f2edSDavid Woodhouse struct compat_shared_info { 1251ea9f2edSDavid Woodhouse struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; 1261ea9f2edSDavid Woodhouse uint32_t evtchn_pending[32]; 1271ea9f2edSDavid Woodhouse uint32_t evtchn_mask[32]; 1281ea9f2edSDavid Woodhouse struct pvclock_wall_clock wc; 1291ea9f2edSDavid Woodhouse struct compat_arch_shared_info arch; 1301ea9f2edSDavid Woodhouse }; 1311ea9f2edSDavid Woodhouse 132*30b5c851SDavid Woodhouse struct compat_vcpu_runstate_info { 133*30b5c851SDavid Woodhouse int state; 134*30b5c851SDavid Woodhouse uint64_t state_entry_time; 135*30b5c851SDavid Woodhouse uint64_t time[4]; 136*30b5c851SDavid Woodhouse } __attribute__((packed)); 137*30b5c851SDavid Woodhouse 13823200b7aSJoao Martins #endif /* __ARCH_X86_KVM_XEN_H__ */ 139