1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_KVM_PARA_H
31965aae3SH. Peter Anvin #define _ASM_X86_KVM_PARA_H
4bb898558SAl Viro
5bb898558SAl Viro #include <asm/processor.h>
6c1118b36SPaolo Bonzini #include <asm/alternative.h>
7b1d40575SVitaly Kuznetsov #include <linux/interrupt.h>
8af170c50SDavid Howells #include <uapi/asm/kvm_para.h>
9bb898558SAl Viro
10*cfb8ec7aSKuppuswamy Sathyanarayanan #include <asm/tdx.h>
11*cfb8ec7aSKuppuswamy Sathyanarayanan
1290993cddSMarcelo Tosatti #ifdef CONFIG_KVM_GUEST
133b5d56b9SEric B Munson bool kvm_check_and_clear_guest_paused(void);
143b5d56b9SEric B Munson #else
kvm_check_and_clear_guest_paused(void)153b5d56b9SEric B Munson static inline bool kvm_check_and_clear_guest_paused(void)
163b5d56b9SEric B Munson {
173b5d56b9SEric B Munson return false;
183b5d56b9SEric B Munson }
1990993cddSMarcelo Tosatti #endif /* CONFIG_KVM_GUEST */
20bb898558SAl Viro
21c1118b36SPaolo Bonzini #define KVM_HYPERCALL \
224cb5b77eSUros Bizjak ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL)
23bb898558SAl Viro
24e423ca15SRaghavendra K T /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
25bb898558SAl Viro * instruction. The hypervisor may replace it with something else but only the
26bb898558SAl Viro * instructions are guaranteed to be supported.
27bb898558SAl Viro *
28bb898558SAl Viro * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
29bb898558SAl Viro * The hypercall number should be placed in rax and the return value will be
3011393a07SJesse Larrew * placed in rax. No other registers will be clobbered unless explicitly
31bb898558SAl Viro * noted by the particular hypercall.
32bb898558SAl Viro */
33bb898558SAl Viro
kvm_hypercall0(unsigned int nr)34bb898558SAl Viro static inline long kvm_hypercall0(unsigned int nr)
35bb898558SAl Viro {
36bb898558SAl Viro long ret;
37*cfb8ec7aSKuppuswamy Sathyanarayanan
38*cfb8ec7aSKuppuswamy Sathyanarayanan if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
39*cfb8ec7aSKuppuswamy Sathyanarayanan return tdx_kvm_hypercall(nr, 0, 0, 0, 0);
40*cfb8ec7aSKuppuswamy Sathyanarayanan
41bb898558SAl Viro asm volatile(KVM_HYPERCALL
42bb898558SAl Viro : "=a"(ret)
43bb898558SAl Viro : "a"(nr)
44bb898558SAl Viro : "memory");
45bb898558SAl Viro return ret;
46bb898558SAl Viro }
47bb898558SAl Viro
kvm_hypercall1(unsigned int nr,unsigned long p1)48bb898558SAl Viro static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
49bb898558SAl Viro {
50bb898558SAl Viro long ret;
51*cfb8ec7aSKuppuswamy Sathyanarayanan
52*cfb8ec7aSKuppuswamy Sathyanarayanan if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
53*cfb8ec7aSKuppuswamy Sathyanarayanan return tdx_kvm_hypercall(nr, p1, 0, 0, 0);
54*cfb8ec7aSKuppuswamy Sathyanarayanan
55bb898558SAl Viro asm volatile(KVM_HYPERCALL
56bb898558SAl Viro : "=a"(ret)
57bb898558SAl Viro : "a"(nr), "b"(p1)
58bb898558SAl Viro : "memory");
59bb898558SAl Viro return ret;
60bb898558SAl Viro }
61bb898558SAl Viro
kvm_hypercall2(unsigned int nr,unsigned long p1,unsigned long p2)62bb898558SAl Viro static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
63bb898558SAl Viro unsigned long p2)
64bb898558SAl Viro {
65bb898558SAl Viro long ret;
66*cfb8ec7aSKuppuswamy Sathyanarayanan
67*cfb8ec7aSKuppuswamy Sathyanarayanan if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
68*cfb8ec7aSKuppuswamy Sathyanarayanan return tdx_kvm_hypercall(nr, p1, p2, 0, 0);
69*cfb8ec7aSKuppuswamy Sathyanarayanan
70bb898558SAl Viro asm volatile(KVM_HYPERCALL
71bb898558SAl Viro : "=a"(ret)
72bb898558SAl Viro : "a"(nr), "b"(p1), "c"(p2)
73bb898558SAl Viro : "memory");
74bb898558SAl Viro return ret;
75bb898558SAl Viro }
76bb898558SAl Viro
kvm_hypercall3(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3)77bb898558SAl Viro static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
78bb898558SAl Viro unsigned long p2, unsigned long p3)
79bb898558SAl Viro {
80bb898558SAl Viro long ret;
81*cfb8ec7aSKuppuswamy Sathyanarayanan
82*cfb8ec7aSKuppuswamy Sathyanarayanan if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
83*cfb8ec7aSKuppuswamy Sathyanarayanan return tdx_kvm_hypercall(nr, p1, p2, p3, 0);
84*cfb8ec7aSKuppuswamy Sathyanarayanan
85bb898558SAl Viro asm volatile(KVM_HYPERCALL
86bb898558SAl Viro : "=a"(ret)
87bb898558SAl Viro : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
88bb898558SAl Viro : "memory");
89bb898558SAl Viro return ret;
90bb898558SAl Viro }
91bb898558SAl Viro
kvm_hypercall4(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)92bb898558SAl Viro static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
93bb898558SAl Viro unsigned long p2, unsigned long p3,
94bb898558SAl Viro unsigned long p4)
95bb898558SAl Viro {
96bb898558SAl Viro long ret;
97*cfb8ec7aSKuppuswamy Sathyanarayanan
98*cfb8ec7aSKuppuswamy Sathyanarayanan if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
99*cfb8ec7aSKuppuswamy Sathyanarayanan return tdx_kvm_hypercall(nr, p1, p2, p3, p4);
100*cfb8ec7aSKuppuswamy Sathyanarayanan
101bb898558SAl Viro asm volatile(KVM_HYPERCALL
102bb898558SAl Viro : "=a"(ret)
103bb898558SAl Viro : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
104bb898558SAl Viro : "memory");
105bb898558SAl Viro return ret;
106bb898558SAl Viro }
107bb898558SAl Viro
kvm_sev_hypercall3(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3)10808c2336dSBrijesh Singh static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1,
10908c2336dSBrijesh Singh unsigned long p2, unsigned long p3)
11008c2336dSBrijesh Singh {
11108c2336dSBrijesh Singh long ret;
11208c2336dSBrijesh Singh
11308c2336dSBrijesh Singh asm volatile("vmmcall"
11408c2336dSBrijesh Singh : "=a"(ret)
11508c2336dSBrijesh Singh : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
11608c2336dSBrijesh Singh : "memory");
11708c2336dSBrijesh Singh return ret;
11808c2336dSBrijesh Singh }
11908c2336dSBrijesh Singh
120ba492962SAlexander Graf #ifdef CONFIG_KVM_GUEST
121c02027b5SVitaly Kuznetsov void kvmclock_init(void);
122c02027b5SVitaly Kuznetsov void kvmclock_disable(void);
1231c300a40SPaolo Bonzini bool kvm_para_available(void);
12477f01bdfSPaolo Bonzini unsigned int kvm_arch_para_features(void);
125a4429e53SWanpeng Li unsigned int kvm_arch_para_hints(void);
1266bca69adSThomas Gleixner void kvm_async_pf_task_wait_schedule(u32 token);
127631bc487SGleb Natapov void kvm_async_pf_task_wake(u32 token);
12868fd66f1SVitaly Kuznetsov u32 kvm_read_and_reset_apf_flags(void);
129ef68017eSAndy Lutomirski bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
130ef68017eSAndy Lutomirski
131ef68017eSAndy Lutomirski DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
132ef68017eSAndy Lutomirski
kvm_handle_async_pf(struct pt_regs * regs,u32 token)133ef68017eSAndy Lutomirski static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
134ef68017eSAndy Lutomirski {
135ef68017eSAndy Lutomirski if (static_branch_unlikely(&kvm_async_pf_enabled))
136ef68017eSAndy Lutomirski return __kvm_handle_async_pf(regs, token);
137ef68017eSAndy Lutomirski else
138ef68017eSAndy Lutomirski return false;
139ef68017eSAndy Lutomirski }
14092b75202SSrivatsa Vaddagiri
14192b75202SSrivatsa Vaddagiri #ifdef CONFIG_PARAVIRT_SPINLOCKS
14292b75202SSrivatsa Vaddagiri void __init kvm_spinlock_init(void);
14392b75202SSrivatsa Vaddagiri #else /* !CONFIG_PARAVIRT_SPINLOCKS */
kvm_spinlock_init(void)14492b75202SSrivatsa Vaddagiri static inline void kvm_spinlock_init(void)
14592b75202SSrivatsa Vaddagiri {
14692b75202SSrivatsa Vaddagiri }
14792b75202SSrivatsa Vaddagiri #endif /* CONFIG_PARAVIRT_SPINLOCKS */
14892b75202SSrivatsa Vaddagiri
14992b75202SSrivatsa Vaddagiri #else /* CONFIG_KVM_GUEST */
1506bca69adSThomas Gleixner #define kvm_async_pf_task_wait_schedule(T) do {} while(0)
151631bc487SGleb Natapov #define kvm_async_pf_task_wake(T) do {} while(0)
15292b75202SSrivatsa Vaddagiri
kvm_para_available(void)1531c300a40SPaolo Bonzini static inline bool kvm_para_available(void)
1541c300a40SPaolo Bonzini {
1551d804d07SJoe Perches return false;
1561c300a40SPaolo Bonzini }
1571c300a40SPaolo Bonzini
kvm_arch_para_features(void)15877f01bdfSPaolo Bonzini static inline unsigned int kvm_arch_para_features(void)
15977f01bdfSPaolo Bonzini {
16077f01bdfSPaolo Bonzini return 0;
16177f01bdfSPaolo Bonzini }
16277f01bdfSPaolo Bonzini
kvm_arch_para_hints(void)163a4429e53SWanpeng Li static inline unsigned int kvm_arch_para_hints(void)
164a4429e53SWanpeng Li {
165a4429e53SWanpeng Li return 0;
166a4429e53SWanpeng Li }
167a4429e53SWanpeng Li
kvm_read_and_reset_apf_flags(void)16868fd66f1SVitaly Kuznetsov static inline u32 kvm_read_and_reset_apf_flags(void)
169631bc487SGleb Natapov {
170631bc487SGleb Natapov return 0;
171631bc487SGleb Natapov }
172d910f5c1SGlauber Costa
kvm_handle_async_pf(struct pt_regs * regs,u32 token)1732823e83aSPeter Zijlstra static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
174ef68017eSAndy Lutomirski {
175ef68017eSAndy Lutomirski return false;
176ef68017eSAndy Lutomirski }
177bb898558SAl Viro #endif
178bb898558SAl Viro
1791965aae3SH. Peter Anvin #endif /* _ASM_X86_KVM_PARA_H */
180