1 #ifndef ARCH_X86_KVM_X86_H 2 #define ARCH_X86_KVM_X86_H 3 4 #include <linux/kvm_host.h> 5 #include "kvm_cache_regs.h" 6 7 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 8 { 9 vcpu->arch.exception.pending = false; 10 } 11 12 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 13 bool soft) 14 { 15 vcpu->arch.interrupt.pending = true; 16 vcpu->arch.interrupt.soft = soft; 17 vcpu->arch.interrupt.nr = vector; 18 } 19 20 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 21 { 22 vcpu->arch.interrupt.pending = false; 23 } 24 25 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 26 { 27 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || 28 vcpu->arch.nmi_injected; 29 } 30 31 static inline bool kvm_exception_is_soft(unsigned int nr) 32 { 33 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 34 } 35 36 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 37 u32 function, u32 index); 38 39 static inline bool is_protmode(struct kvm_vcpu *vcpu) 40 { 41 return kvm_read_cr0_bits(vcpu, X86_CR0_PE); 42 } 43 44 static inline int is_long_mode(struct kvm_vcpu *vcpu) 45 { 46 #ifdef CONFIG_X86_64 47 return vcpu->arch.efer & EFER_LMA; 48 #else 49 return 0; 50 #endif 51 } 52 53 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 54 { 55 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 56 } 57 58 static inline int is_pae(struct kvm_vcpu *vcpu) 59 { 60 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); 61 } 62 63 static inline int is_pse(struct kvm_vcpu *vcpu) 64 { 65 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); 66 } 67 68 static inline int is_paging(struct kvm_vcpu *vcpu) 69 { 70 return kvm_read_cr0_bits(vcpu, X86_CR0_PG); 71 } 72 73 static inline u32 bit(int bitno) 74 { 75 return 1 << (bitno & 31); 76 } 77 78 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 79 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 80 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq); 81 82 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data); 83 84 #endif 85