xref: /openbmc/linux/arch/x86/kvm/kvm_cache_regs.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef ASM_KVM_CACHE_REGS_H
3  #define ASM_KVM_CACHE_REGS_H
4  
5  #include <linux/kvm_host.h>
6  
7  #define KVM_POSSIBLE_CR0_GUEST_BITS	(X86_CR0_TS | X86_CR0_WP)
8  #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
9  	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10  	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11  
12  #define X86_CR0_PDPTR_BITS    (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
13  #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
14  #define X86_CR4_PDPTR_BITS    (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
15  
16  static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
17  
18  #define BUILD_KVM_GPR_ACCESSORS(lname, uname)				      \
19  static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
20  {									      \
21  	return vcpu->arch.regs[VCPU_REGS_##uname];			      \
22  }									      \
23  static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,	      \
24  						unsigned long val)	      \
25  {									      \
26  	vcpu->arch.regs[VCPU_REGS_##uname] = val;			      \
27  }
BUILD_KVM_GPR_ACCESSORS(rax,RAX)28  BUILD_KVM_GPR_ACCESSORS(rax, RAX)
29  BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
30  BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
31  BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
32  BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
33  BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
34  BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
35  #ifdef CONFIG_X86_64
36  BUILD_KVM_GPR_ACCESSORS(r8,  R8)
37  BUILD_KVM_GPR_ACCESSORS(r9,  R9)
38  BUILD_KVM_GPR_ACCESSORS(r10, R10)
39  BUILD_KVM_GPR_ACCESSORS(r11, R11)
40  BUILD_KVM_GPR_ACCESSORS(r12, R12)
41  BUILD_KVM_GPR_ACCESSORS(r13, R13)
42  BUILD_KVM_GPR_ACCESSORS(r14, R14)
43  BUILD_KVM_GPR_ACCESSORS(r15, R15)
44  #endif
45  
46  /*
47   * avail  dirty
48   * 0	  0	  register in VMCS/VMCB
49   * 0	  1	  *INVALID*
50   * 1	  0	  register in vcpu->arch
51   * 1	  1	  register in vcpu->arch, needs to be stored back
52   */
53  static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
54  					     enum kvm_reg reg)
55  {
56  	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
57  }
58  
kvm_register_is_dirty(struct kvm_vcpu * vcpu,enum kvm_reg reg)59  static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
60  					 enum kvm_reg reg)
61  {
62  	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63  }
64  
kvm_register_mark_available(struct kvm_vcpu * vcpu,enum kvm_reg reg)65  static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
66  					       enum kvm_reg reg)
67  {
68  	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
69  }
70  
kvm_register_mark_dirty(struct kvm_vcpu * vcpu,enum kvm_reg reg)71  static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
72  					   enum kvm_reg reg)
73  {
74  	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
75  	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
76  }
77  
78  /*
79   * kvm_register_test_and_mark_available() is a special snowflake that uses an
80   * arch bitop directly to avoid the explicit instrumentation that comes with
81   * the generic bitops.  This allows code that cannot be instrumented (noinstr
82   * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
83   */
kvm_register_test_and_mark_available(struct kvm_vcpu * vcpu,enum kvm_reg reg)84  static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
85  								 enum kvm_reg reg)
86  {
87  	return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
88  }
89  
90  /*
91   * The "raw" register helpers are only for cases where the full 64 bits of a
92   * register are read/written irrespective of current vCPU mode.  In other words,
93   * odds are good you shouldn't be using the raw variants.
94   */
kvm_register_read_raw(struct kvm_vcpu * vcpu,int reg)95  static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
96  {
97  	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
98  		return 0;
99  
100  	if (!kvm_register_is_available(vcpu, reg))
101  		static_call(kvm_x86_cache_reg)(vcpu, reg);
102  
103  	return vcpu->arch.regs[reg];
104  }
105  
kvm_register_write_raw(struct kvm_vcpu * vcpu,int reg,unsigned long val)106  static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
107  					  unsigned long val)
108  {
109  	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
110  		return;
111  
112  	vcpu->arch.regs[reg] = val;
113  	kvm_register_mark_dirty(vcpu, reg);
114  }
115  
kvm_rip_read(struct kvm_vcpu * vcpu)116  static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
117  {
118  	return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
119  }
120  
kvm_rip_write(struct kvm_vcpu * vcpu,unsigned long val)121  static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
122  {
123  	kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
124  }
125  
kvm_rsp_read(struct kvm_vcpu * vcpu)126  static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
127  {
128  	return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
129  }
130  
kvm_rsp_write(struct kvm_vcpu * vcpu,unsigned long val)131  static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
132  {
133  	kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
134  }
135  
kvm_pdptr_read(struct kvm_vcpu * vcpu,int index)136  static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
137  {
138  	might_sleep();  /* on svm */
139  
140  	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
141  		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
142  
143  	return vcpu->arch.walk_mmu->pdptrs[index];
144  }
145  
kvm_pdptr_write(struct kvm_vcpu * vcpu,int index,u64 value)146  static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
147  {
148  	vcpu->arch.walk_mmu->pdptrs[index] = value;
149  }
150  
kvm_read_cr0_bits(struct kvm_vcpu * vcpu,ulong mask)151  static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
152  {
153  	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
154  	if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
155  	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
156  		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
157  	return vcpu->arch.cr0 & mask;
158  }
159  
kvm_is_cr0_bit_set(struct kvm_vcpu * vcpu,unsigned long cr0_bit)160  static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
161  					       unsigned long cr0_bit)
162  {
163  	BUILD_BUG_ON(!is_power_of_2(cr0_bit));
164  
165  	return !!kvm_read_cr0_bits(vcpu, cr0_bit);
166  }
167  
kvm_read_cr0(struct kvm_vcpu * vcpu)168  static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
169  {
170  	return kvm_read_cr0_bits(vcpu, ~0UL);
171  }
172  
kvm_read_cr4_bits(struct kvm_vcpu * vcpu,ulong mask)173  static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
174  {
175  	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
176  	if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
177  	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
178  		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
179  	return vcpu->arch.cr4 & mask;
180  }
181  
kvm_is_cr4_bit_set(struct kvm_vcpu * vcpu,unsigned long cr4_bit)182  static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
183  					       unsigned long cr4_bit)
184  {
185  	BUILD_BUG_ON(!is_power_of_2(cr4_bit));
186  
187  	return !!kvm_read_cr4_bits(vcpu, cr4_bit);
188  }
189  
kvm_read_cr3(struct kvm_vcpu * vcpu)190  static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
191  {
192  	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
193  		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
194  	return vcpu->arch.cr3;
195  }
196  
kvm_read_cr4(struct kvm_vcpu * vcpu)197  static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
198  {
199  	return kvm_read_cr4_bits(vcpu, ~0UL);
200  }
201  
kvm_read_edx_eax(struct kvm_vcpu * vcpu)202  static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
203  {
204  	return (kvm_rax_read(vcpu) & -1u)
205  		| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
206  }
207  
enter_guest_mode(struct kvm_vcpu * vcpu)208  static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
209  {
210  	vcpu->arch.hflags |= HF_GUEST_MASK;
211  	vcpu->stat.guest_mode = 1;
212  }
213  
leave_guest_mode(struct kvm_vcpu * vcpu)214  static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
215  {
216  	vcpu->arch.hflags &= ~HF_GUEST_MASK;
217  
218  	if (vcpu->arch.load_eoi_exitmap_pending) {
219  		vcpu->arch.load_eoi_exitmap_pending = false;
220  		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
221  	}
222  
223  	vcpu->stat.guest_mode = 0;
224  }
225  
is_guest_mode(struct kvm_vcpu * vcpu)226  static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
227  {
228  	return vcpu->arch.hflags & HF_GUEST_MASK;
229  }
230  
231  #endif
232