xref: /openbmc/linux/arch/x86/kvm/vmx/vmx_onhyperv.h (revision ebfed7be)
1*ebfed7beSVitaly Kuznetsov /* SPDX-License-Identifier: GPL-2.0-only */
2*ebfed7beSVitaly Kuznetsov 
3*ebfed7beSVitaly Kuznetsov #ifndef __ARCH_X86_KVM_VMX_ONHYPERV_H__
4*ebfed7beSVitaly Kuznetsov #define __ARCH_X86_KVM_VMX_ONHYPERV_H__
5*ebfed7beSVitaly Kuznetsov 
6*ebfed7beSVitaly Kuznetsov #include <asm/hyperv-tlfs.h>
7*ebfed7beSVitaly Kuznetsov 
8*ebfed7beSVitaly Kuznetsov #include <linux/jump_label.h>
9*ebfed7beSVitaly Kuznetsov 
10*ebfed7beSVitaly Kuznetsov #include "capabilities.h"
11*ebfed7beSVitaly Kuznetsov #include "hyperv.h"
12*ebfed7beSVitaly Kuznetsov #include "vmcs12.h"
13*ebfed7beSVitaly Kuznetsov 
14*ebfed7beSVitaly Kuznetsov #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
15*ebfed7beSVitaly Kuznetsov 
16*ebfed7beSVitaly Kuznetsov #if IS_ENABLED(CONFIG_HYPERV)
17*ebfed7beSVitaly Kuznetsov 
18*ebfed7beSVitaly Kuznetsov DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
19*ebfed7beSVitaly Kuznetsov 
kvm_is_using_evmcs(void)20*ebfed7beSVitaly Kuznetsov static __always_inline bool kvm_is_using_evmcs(void)
21*ebfed7beSVitaly Kuznetsov {
22*ebfed7beSVitaly Kuznetsov 	return static_branch_unlikely(&__kvm_is_using_evmcs);
23*ebfed7beSVitaly Kuznetsov }
24*ebfed7beSVitaly Kuznetsov 
get_evmcs_offset(unsigned long field,u16 * clean_field)25*ebfed7beSVitaly Kuznetsov static __always_inline int get_evmcs_offset(unsigned long field,
26*ebfed7beSVitaly Kuznetsov 					    u16 *clean_field)
27*ebfed7beSVitaly Kuznetsov {
28*ebfed7beSVitaly Kuznetsov 	int offset = evmcs_field_offset(field, clean_field);
29*ebfed7beSVitaly Kuznetsov 
30*ebfed7beSVitaly Kuznetsov 	WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
31*ebfed7beSVitaly Kuznetsov 	return offset;
32*ebfed7beSVitaly Kuznetsov }
33*ebfed7beSVitaly Kuznetsov 
evmcs_write64(unsigned long field,u64 value)34*ebfed7beSVitaly Kuznetsov static __always_inline void evmcs_write64(unsigned long field, u64 value)
35*ebfed7beSVitaly Kuznetsov {
36*ebfed7beSVitaly Kuznetsov 	u16 clean_field;
37*ebfed7beSVitaly Kuznetsov 	int offset = get_evmcs_offset(field, &clean_field);
38*ebfed7beSVitaly Kuznetsov 
39*ebfed7beSVitaly Kuznetsov 	if (offset < 0)
40*ebfed7beSVitaly Kuznetsov 		return;
41*ebfed7beSVitaly Kuznetsov 
42*ebfed7beSVitaly Kuznetsov 	*(u64 *)((char *)current_evmcs + offset) = value;
43*ebfed7beSVitaly Kuznetsov 
44*ebfed7beSVitaly Kuznetsov 	current_evmcs->hv_clean_fields &= ~clean_field;
45*ebfed7beSVitaly Kuznetsov }
46*ebfed7beSVitaly Kuznetsov 
evmcs_write32(unsigned long field,u32 value)47*ebfed7beSVitaly Kuznetsov static __always_inline void evmcs_write32(unsigned long field, u32 value)
48*ebfed7beSVitaly Kuznetsov {
49*ebfed7beSVitaly Kuznetsov 	u16 clean_field;
50*ebfed7beSVitaly Kuznetsov 	int offset = get_evmcs_offset(field, &clean_field);
51*ebfed7beSVitaly Kuznetsov 
52*ebfed7beSVitaly Kuznetsov 	if (offset < 0)
53*ebfed7beSVitaly Kuznetsov 		return;
54*ebfed7beSVitaly Kuznetsov 
55*ebfed7beSVitaly Kuznetsov 	*(u32 *)((char *)current_evmcs + offset) = value;
56*ebfed7beSVitaly Kuznetsov 	current_evmcs->hv_clean_fields &= ~clean_field;
57*ebfed7beSVitaly Kuznetsov }
58*ebfed7beSVitaly Kuznetsov 
evmcs_write16(unsigned long field,u16 value)59*ebfed7beSVitaly Kuznetsov static __always_inline void evmcs_write16(unsigned long field, u16 value)
60*ebfed7beSVitaly Kuznetsov {
61*ebfed7beSVitaly Kuznetsov 	u16 clean_field;
62*ebfed7beSVitaly Kuznetsov 	int offset = get_evmcs_offset(field, &clean_field);
63*ebfed7beSVitaly Kuznetsov 
64*ebfed7beSVitaly Kuznetsov 	if (offset < 0)
65*ebfed7beSVitaly Kuznetsov 		return;
66*ebfed7beSVitaly Kuznetsov 
67*ebfed7beSVitaly Kuznetsov 	*(u16 *)((char *)current_evmcs + offset) = value;
68*ebfed7beSVitaly Kuznetsov 	current_evmcs->hv_clean_fields &= ~clean_field;
69*ebfed7beSVitaly Kuznetsov }
70*ebfed7beSVitaly Kuznetsov 
evmcs_read64(unsigned long field)71*ebfed7beSVitaly Kuznetsov static __always_inline u64 evmcs_read64(unsigned long field)
72*ebfed7beSVitaly Kuznetsov {
73*ebfed7beSVitaly Kuznetsov 	int offset = get_evmcs_offset(field, NULL);
74*ebfed7beSVitaly Kuznetsov 
75*ebfed7beSVitaly Kuznetsov 	if (offset < 0)
76*ebfed7beSVitaly Kuznetsov 		return 0;
77*ebfed7beSVitaly Kuznetsov 
78*ebfed7beSVitaly Kuznetsov 	return *(u64 *)((char *)current_evmcs + offset);
79*ebfed7beSVitaly Kuznetsov }
80*ebfed7beSVitaly Kuznetsov 
evmcs_read32(unsigned long field)81*ebfed7beSVitaly Kuznetsov static __always_inline u32 evmcs_read32(unsigned long field)
82*ebfed7beSVitaly Kuznetsov {
83*ebfed7beSVitaly Kuznetsov 	int offset = get_evmcs_offset(field, NULL);
84*ebfed7beSVitaly Kuznetsov 
85*ebfed7beSVitaly Kuznetsov 	if (offset < 0)
86*ebfed7beSVitaly Kuznetsov 		return 0;
87*ebfed7beSVitaly Kuznetsov 
88*ebfed7beSVitaly Kuznetsov 	return *(u32 *)((char *)current_evmcs + offset);
89*ebfed7beSVitaly Kuznetsov }
90*ebfed7beSVitaly Kuznetsov 
evmcs_read16(unsigned long field)91*ebfed7beSVitaly Kuznetsov static __always_inline u16 evmcs_read16(unsigned long field)
92*ebfed7beSVitaly Kuznetsov {
93*ebfed7beSVitaly Kuznetsov 	int offset = get_evmcs_offset(field, NULL);
94*ebfed7beSVitaly Kuznetsov 
95*ebfed7beSVitaly Kuznetsov 	if (offset < 0)
96*ebfed7beSVitaly Kuznetsov 		return 0;
97*ebfed7beSVitaly Kuznetsov 
98*ebfed7beSVitaly Kuznetsov 	return *(u16 *)((char *)current_evmcs + offset);
99*ebfed7beSVitaly Kuznetsov }
100*ebfed7beSVitaly Kuznetsov 
evmcs_load(u64 phys_addr)101*ebfed7beSVitaly Kuznetsov static inline void evmcs_load(u64 phys_addr)
102*ebfed7beSVitaly Kuznetsov {
103*ebfed7beSVitaly Kuznetsov 	struct hv_vp_assist_page *vp_ap =
104*ebfed7beSVitaly Kuznetsov 		hv_get_vp_assist_page(smp_processor_id());
105*ebfed7beSVitaly Kuznetsov 
106*ebfed7beSVitaly Kuznetsov 	if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
107*ebfed7beSVitaly Kuznetsov 		vp_ap->nested_control.features.directhypercall = 1;
108*ebfed7beSVitaly Kuznetsov 	vp_ap->current_nested_vmcs = phys_addr;
109*ebfed7beSVitaly Kuznetsov 	vp_ap->enlighten_vmentry = 1;
110*ebfed7beSVitaly Kuznetsov }
111*ebfed7beSVitaly Kuznetsov 
112*ebfed7beSVitaly Kuznetsov void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
113*ebfed7beSVitaly Kuznetsov #else /* !IS_ENABLED(CONFIG_HYPERV) */
kvm_is_using_evmcs(void)114*ebfed7beSVitaly Kuznetsov static __always_inline bool kvm_is_using_evmcs(void) { return false; }
evmcs_write64(unsigned long field,u64 value)115*ebfed7beSVitaly Kuznetsov static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
evmcs_write32(unsigned long field,u32 value)116*ebfed7beSVitaly Kuznetsov static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
evmcs_write16(unsigned long field,u16 value)117*ebfed7beSVitaly Kuznetsov static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
evmcs_read64(unsigned long field)118*ebfed7beSVitaly Kuznetsov static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
evmcs_read32(unsigned long field)119*ebfed7beSVitaly Kuznetsov static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
evmcs_read16(unsigned long field)120*ebfed7beSVitaly Kuznetsov static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
evmcs_load(u64 phys_addr)121*ebfed7beSVitaly Kuznetsov static inline void evmcs_load(u64 phys_addr) {}
122*ebfed7beSVitaly Kuznetsov #endif /* IS_ENABLED(CONFIG_HYPERV) */
123*ebfed7beSVitaly Kuznetsov 
124*ebfed7beSVitaly Kuznetsov #endif /* __ARCH_X86_KVM_VMX_ONHYPERV_H__ */
125