135a78319SSean Christopherson /* SPDX-License-Identifier: GPL-2.0 */
235a78319SSean Christopherson #ifndef __KVM_X86_SVM_OPS_H
335a78319SSean Christopherson #define __KVM_X86_SVM_OPS_H
435a78319SSean Christopherson
535a78319SSean Christopherson #include <linux/compiler_types.h>
635a78319SSean Christopherson
765297341SUros Bizjak #include "x86.h"
835a78319SSean Christopherson
935a78319SSean Christopherson #define svm_asm(insn, clobber...) \
1035a78319SSean Christopherson do { \
11*aaff74d8SLinus Torvalds asm goto("1: " __stringify(insn) "\n\t" \
1235a78319SSean Christopherson _ASM_EXTABLE(1b, %l[fault]) \
1335a78319SSean Christopherson ::: clobber : fault); \
1435a78319SSean Christopherson return; \
1535a78319SSean Christopherson fault: \
1635a78319SSean Christopherson kvm_spurious_fault(); \
1735a78319SSean Christopherson } while (0)
1835a78319SSean Christopherson
1935a78319SSean Christopherson #define svm_asm1(insn, op1, clobber...) \
2035a78319SSean Christopherson do { \
21*aaff74d8SLinus Torvalds asm goto("1: " __stringify(insn) " %0\n\t" \
2235a78319SSean Christopherson _ASM_EXTABLE(1b, %l[fault]) \
2335a78319SSean Christopherson :: op1 : clobber : fault); \
2435a78319SSean Christopherson return; \
2535a78319SSean Christopherson fault: \
2635a78319SSean Christopherson kvm_spurious_fault(); \
2735a78319SSean Christopherson } while (0)
2835a78319SSean Christopherson
2935a78319SSean Christopherson #define svm_asm2(insn, op1, op2, clobber...) \
3035a78319SSean Christopherson do { \
31*aaff74d8SLinus Torvalds asm goto("1: " __stringify(insn) " %1, %0\n\t" \
3235a78319SSean Christopherson _ASM_EXTABLE(1b, %l[fault]) \
3335a78319SSean Christopherson :: op1, op2 : clobber : fault); \
3435a78319SSean Christopherson return; \
3535a78319SSean Christopherson fault: \
3635a78319SSean Christopherson kvm_spurious_fault(); \
3735a78319SSean Christopherson } while (0)
3835a78319SSean Christopherson
clgi(void)3935a78319SSean Christopherson static inline void clgi(void)
4035a78319SSean Christopherson {
4135a78319SSean Christopherson svm_asm(clgi);
4235a78319SSean Christopherson }
4335a78319SSean Christopherson
stgi(void)4435a78319SSean Christopherson static inline void stgi(void)
4535a78319SSean Christopherson {
4635a78319SSean Christopherson svm_asm(stgi);
4735a78319SSean Christopherson }
4835a78319SSean Christopherson
invlpga(unsigned long addr,u32 asid)4935a78319SSean Christopherson static inline void invlpga(unsigned long addr, u32 asid)
5035a78319SSean Christopherson {
5135a78319SSean Christopherson svm_asm2(invlpga, "c"(asid), "a"(addr));
5235a78319SSean Christopherson }
5335a78319SSean Christopherson
5435a78319SSean Christopherson /*
5535a78319SSean Christopherson * Despite being a physical address, the portion of rAX that is consumed by
5635a78319SSean Christopherson * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
5735a78319SSean Christopherson * hence 'unsigned long' instead of 'hpa_t'.
5835a78319SSean Christopherson */
vmsave(unsigned long pa)59a168233aSPeter Zijlstra static __always_inline void vmsave(unsigned long pa)
6035a78319SSean Christopherson {
6135a78319SSean Christopherson svm_asm1(vmsave, "a" (pa), "memory");
6235a78319SSean Christopherson }
6335a78319SSean Christopherson
6435a78319SSean Christopherson #endif /* __KVM_X86_SVM_OPS_H */
65