123200b7aSJoao Martins // SPDX-License-Identifier: GPL-2.0 223200b7aSJoao Martins /* 323200b7aSJoao Martins * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. 423200b7aSJoao Martins * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 523200b7aSJoao Martins * 623200b7aSJoao Martins * KVM Xen emulation 723200b7aSJoao Martins */ 823200b7aSJoao Martins 923200b7aSJoao Martins #ifndef __ARCH_X86_KVM_XEN_H__ 1023200b7aSJoao Martins #define __ARCH_X86_KVM_XEN_H__ 1123200b7aSJoao Martins 12b59b153dSPaolo Bonzini #ifdef CONFIG_KVM_XEN 137d6bbebbSDavid Woodhouse #include <linux/jump_label_ratelimit.h> 147d6bbebbSDavid Woodhouse 157d6bbebbSDavid Woodhouse extern struct static_key_false_deferred kvm_xen_enabled; 167d6bbebbSDavid Woodhouse 1740da8ccdSDavid Woodhouse int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); 187caf9571SDavid Woodhouse void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu); 193e324615SDavid Woodhouse int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); 203e324615SDavid Woodhouse int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); 21a76b9641SJoao Martins int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 22a76b9641SJoao Martins int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); 2335025735SDavid Woodhouse int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *evt); 2423200b7aSJoao Martins int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); 2578e9878cSDavid Woodhouse int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); 26319afe68SPaolo Bonzini void kvm_xen_init_vm(struct kvm *kvm); 277d6bbebbSDavid Woodhouse void kvm_xen_destroy_vm(struct kvm *kvm); 28*942c2490SDavid Woodhouse void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu); 29a795cd43SDavid Woodhouse void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu); 308733068bSDavid Woodhouse int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, 3114243b38SDavid Woodhouse struct kvm *kvm); 3214243b38SDavid Woodhouse int kvm_xen_setup_evtchn(struct kvm *kvm, 3314243b38SDavid Woodhouse struct kvm_kernel_irq_routing_entry *e, 3414243b38SDavid Woodhouse const struct kvm_irq_routing_entry *ue); 3514243b38SDavid Woodhouse 3630b5c851SDavid Woodhouse static inline bool kvm_xen_msr_enabled(struct kvm *kvm) 3730b5c851SDavid Woodhouse { 3830b5c851SDavid Woodhouse return static_branch_unlikely(&kvm_xen_enabled.key) && 3930b5c851SDavid Woodhouse kvm->arch.xen_hvm_config.msr; 4030b5c851SDavid Woodhouse } 4130b5c851SDavid Woodhouse 4223200b7aSJoao Martins static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) 4323200b7aSJoao Martins { 447d6bbebbSDavid Woodhouse return static_branch_unlikely(&kvm_xen_enabled.key) && 457d6bbebbSDavid Woodhouse (kvm->arch.xen_hvm_config.flags & 467d6bbebbSDavid Woodhouse KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); 4723200b7aSJoao Martins } 4823200b7aSJoao Martins 4940da8ccdSDavid Woodhouse static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) 5040da8ccdSDavid Woodhouse { 5140da8ccdSDavid Woodhouse if (static_branch_unlikely(&kvm_xen_enabled.key) && 527caf9571SDavid Woodhouse vcpu->arch.xen.vcpu_info_cache.active && 537caf9571SDavid Woodhouse vcpu->kvm->arch.xen.upcall_vector) 5440da8ccdSDavid Woodhouse return __kvm_xen_has_interrupt(vcpu); 5540da8ccdSDavid Woodhouse 5640da8ccdSDavid Woodhouse return 0; 5740da8ccdSDavid Woodhouse } 587caf9571SDavid Woodhouse 597caf9571SDavid Woodhouse static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) 607caf9571SDavid Woodhouse { 617caf9571SDavid Woodhouse return static_branch_unlikely(&kvm_xen_enabled.key) && 627caf9571SDavid Woodhouse vcpu->arch.xen.evtchn_pending_sel; 637caf9571SDavid Woodhouse } 647caf9571SDavid Woodhouse 65b59b153dSPaolo Bonzini #else 66b59b153dSPaolo Bonzini static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) 67b59b153dSPaolo Bonzini { 68b59b153dSPaolo Bonzini return 1; 69b59b153dSPaolo Bonzini } 70b59b153dSPaolo Bonzini 71319afe68SPaolo Bonzini static inline void kvm_xen_init_vm(struct kvm *kvm) 72319afe68SPaolo Bonzini { 73319afe68SPaolo Bonzini } 74319afe68SPaolo Bonzini 75b59b153dSPaolo Bonzini static inline void kvm_xen_destroy_vm(struct kvm *kvm) 76b59b153dSPaolo Bonzini { 77b59b153dSPaolo Bonzini } 78b59b153dSPaolo Bonzini 79*942c2490SDavid Woodhouse static inline void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu) 80*942c2490SDavid Woodhouse { 81*942c2490SDavid Woodhouse } 82*942c2490SDavid Woodhouse 83a795cd43SDavid Woodhouse static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) 84a795cd43SDavid Woodhouse { 85a795cd43SDavid Woodhouse } 86a795cd43SDavid Woodhouse 8730b5c851SDavid Woodhouse static inline bool kvm_xen_msr_enabled(struct kvm *kvm) 8830b5c851SDavid Woodhouse { 8930b5c851SDavid Woodhouse return false; 9030b5c851SDavid Woodhouse } 9130b5c851SDavid Woodhouse 92b59b153dSPaolo Bonzini static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) 93b59b153dSPaolo Bonzini { 94b59b153dSPaolo Bonzini return false; 95b59b153dSPaolo Bonzini } 96b59b153dSPaolo Bonzini 97b59b153dSPaolo Bonzini static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) 98b59b153dSPaolo Bonzini { 99b59b153dSPaolo Bonzini return 0; 100b59b153dSPaolo Bonzini } 1017caf9571SDavid Woodhouse 1027caf9571SDavid Woodhouse static inline void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu) 1037caf9571SDavid Woodhouse { 1047caf9571SDavid Woodhouse } 1057caf9571SDavid Woodhouse 1067caf9571SDavid Woodhouse static inline bool kvm_xen_has_pending_events(struct kvm_vcpu *vcpu) 1077caf9571SDavid Woodhouse { 1087caf9571SDavid Woodhouse return false; 1097caf9571SDavid Woodhouse } 110b59b153dSPaolo Bonzini #endif 111b59b153dSPaolo Bonzini 112b59b153dSPaolo Bonzini int kvm_xen_hypercall(struct kvm_vcpu *vcpu); 1131ea9f2edSDavid Woodhouse 1141ea9f2edSDavid Woodhouse #include <asm/pvclock-abi.h> 1151ea9f2edSDavid Woodhouse #include <asm/xen/interface.h> 11630b5c851SDavid Woodhouse #include <xen/interface/vcpu.h> 1171ea9f2edSDavid Woodhouse 11830b5c851SDavid Woodhouse void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state); 11930b5c851SDavid Woodhouse 12030b5c851SDavid Woodhouse static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) 12130b5c851SDavid Woodhouse { 12230b5c851SDavid Woodhouse kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running); 12330b5c851SDavid Woodhouse } 12430b5c851SDavid Woodhouse 12530b5c851SDavid Woodhouse static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) 12630b5c851SDavid Woodhouse { 12730b5c851SDavid Woodhouse /* 12830b5c851SDavid Woodhouse * If the vCPU wasn't preempted but took a normal exit for 12930b5c851SDavid Woodhouse * some reason (hypercalls, I/O, etc.), that is accounted as 13030b5c851SDavid Woodhouse * still RUNSTATE_running, as the VMM is still operating on 13130b5c851SDavid Woodhouse * behalf of the vCPU. Only if the VMM does actually block 13230b5c851SDavid Woodhouse * does it need to enter RUNSTATE_blocked. 13330b5c851SDavid Woodhouse */ 13430b5c851SDavid Woodhouse if (vcpu->preempted) 13530b5c851SDavid Woodhouse kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); 13630b5c851SDavid Woodhouse } 13730b5c851SDavid Woodhouse 13830b5c851SDavid Woodhouse /* 32-bit compatibility definitions, also used natively in 32-bit build */ 1391ea9f2edSDavid Woodhouse struct compat_arch_vcpu_info { 1401ea9f2edSDavid Woodhouse unsigned int cr2; 1411ea9f2edSDavid Woodhouse unsigned int pad[5]; 1421ea9f2edSDavid Woodhouse }; 1431ea9f2edSDavid Woodhouse 1441ea9f2edSDavid Woodhouse struct compat_vcpu_info { 1451ea9f2edSDavid Woodhouse uint8_t evtchn_upcall_pending; 1461ea9f2edSDavid Woodhouse uint8_t evtchn_upcall_mask; 1477137b7aeSSean Christopherson uint16_t pad; 1481ea9f2edSDavid Woodhouse uint32_t evtchn_pending_sel; 1491ea9f2edSDavid Woodhouse struct compat_arch_vcpu_info arch; 1501ea9f2edSDavid Woodhouse struct pvclock_vcpu_time_info time; 1511ea9f2edSDavid Woodhouse }; /* 64 bytes (x86) */ 1521ea9f2edSDavid Woodhouse 1531ea9f2edSDavid Woodhouse struct compat_arch_shared_info { 1541ea9f2edSDavid Woodhouse unsigned int max_pfn; 1551ea9f2edSDavid Woodhouse unsigned int pfn_to_mfn_frame_list_list; 1561ea9f2edSDavid Woodhouse unsigned int nmi_reason; 1571ea9f2edSDavid Woodhouse unsigned int p2m_cr3; 1581ea9f2edSDavid Woodhouse unsigned int p2m_vaddr; 1591ea9f2edSDavid Woodhouse unsigned int p2m_generation; 1601ea9f2edSDavid Woodhouse uint32_t wc_sec_hi; 1611ea9f2edSDavid Woodhouse }; 1621ea9f2edSDavid Woodhouse 1631ea9f2edSDavid Woodhouse struct compat_shared_info { 1641ea9f2edSDavid Woodhouse struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; 1651ea9f2edSDavid Woodhouse uint32_t evtchn_pending[32]; 1661ea9f2edSDavid Woodhouse uint32_t evtchn_mask[32]; 1671ea9f2edSDavid Woodhouse struct pvclock_wall_clock wc; 1681ea9f2edSDavid Woodhouse struct compat_arch_shared_info arch; 1691ea9f2edSDavid Woodhouse }; 1701ea9f2edSDavid Woodhouse 17114243b38SDavid Woodhouse #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \ 17214243b38SDavid Woodhouse sizeof_field(struct compat_shared_info, \ 17314243b38SDavid Woodhouse evtchn_pending)) 17430b5c851SDavid Woodhouse struct compat_vcpu_runstate_info { 17530b5c851SDavid Woodhouse int state; 17630b5c851SDavid Woodhouse uint64_t state_entry_time; 17730b5c851SDavid Woodhouse uint64_t time[4]; 17830b5c851SDavid Woodhouse } __attribute__((packed)); 17930b5c851SDavid Woodhouse 18023200b7aSJoao Martins #endif /* __ARCH_X86_KVM_XEN_H__ */ 181