1 #ifndef _ASM_X86_KVM_PARA_H 2 #define _ASM_X86_KVM_PARA_H 3 4 #include <linux/types.h> 5 #include <asm/hyperv.h> 6 7 /* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It 8 * should be used to determine that a VM is running under KVM. 9 */ 10 #define KVM_CPUID_SIGNATURE 0x40000000 11 12 /* This CPUID returns a feature bitmap in eax. Before enabling a particular 13 * paravirtualization, the appropriate feature bit should be checked. 14 */ 15 #define KVM_CPUID_FEATURES 0x40000001 16 #define KVM_FEATURE_CLOCKSOURCE 0 17 #define KVM_FEATURE_NOP_IO_DELAY 1 18 #define KVM_FEATURE_MMU_OP 2 19 /* This indicates that the new set of kvmclock msrs 20 * are available. The use of 0x11 and 0x12 is deprecated 21 */ 22 #define KVM_FEATURE_CLOCKSOURCE2 3 23 #define KVM_FEATURE_ASYNC_PF 4 24 #define KVM_FEATURE_STEAL_TIME 5 25 26 /* The last 8 bits are used to indicate how to interpret the flags field 27 * in pvclock structure. If no bits are set, all flags are ignored. 28 */ 29 #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 30 31 #define MSR_KVM_WALL_CLOCK 0x11 32 #define MSR_KVM_SYSTEM_TIME 0x12 33 34 #define KVM_MSR_ENABLED 1 35 /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */ 36 #define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00 37 #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 38 #define MSR_KVM_ASYNC_PF_EN 0x4b564d02 39 #define MSR_KVM_STEAL_TIME 0x4b564d03 40 41 struct kvm_steal_time { 42 __u64 steal; 43 __u32 version; 44 __u32 flags; 45 __u32 pad[12]; 46 }; 47 48 #define KVM_STEAL_ALIGNMENT_BITS 5 49 #define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1))) 50 #define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1) 51 52 #define KVM_MAX_MMU_OP_BATCH 32 53 54 #define KVM_ASYNC_PF_ENABLED (1 << 0) 55 #define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1) 56 57 /* Operations for KVM_HC_MMU_OP */ 58 #define KVM_MMU_OP_WRITE_PTE 1 59 #define KVM_MMU_OP_FLUSH_TLB 2 60 #define KVM_MMU_OP_RELEASE_PT 3 61 62 /* Payload for KVM_HC_MMU_OP */ 63 struct kvm_mmu_op_header { 64 __u32 op; 65 __u32 pad; 66 }; 67 68 struct kvm_mmu_op_write_pte { 69 struct kvm_mmu_op_header header; 70 __u64 pte_phys; 71 __u64 pte_val; 72 }; 73 74 struct kvm_mmu_op_flush_tlb { 75 struct kvm_mmu_op_header header; 76 }; 77 78 struct kvm_mmu_op_release_pt { 79 struct kvm_mmu_op_header header; 80 __u64 pt_phys; 81 }; 82 83 #define KVM_PV_REASON_PAGE_NOT_PRESENT 1 84 #define KVM_PV_REASON_PAGE_READY 2 85 86 struct kvm_vcpu_pv_apf_data { 87 __u32 reason; 88 __u8 pad[60]; 89 __u32 enabled; 90 }; 91 92 #ifdef __KERNEL__ 93 #include <asm/processor.h> 94 95 extern void kvmclock_init(void); 96 extern int kvm_register_clock(char *txt); 97 98 99 /* This instruction is vmcall. On non-VT architectures, it will generate a 100 * trap that we will then rewrite to the appropriate instruction. 101 */ 102 #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" 103 104 /* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun 105 * instruction. The hypervisor may replace it with something else but only the 106 * instructions are guaranteed to be supported. 107 * 108 * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively. 109 * The hypercall number should be placed in rax and the return value will be 110 * placed in rax. No other registers will be clobbered unless explicited 111 * noted by the particular hypercall. 112 */ 113 114 static inline long kvm_hypercall0(unsigned int nr) 115 { 116 long ret; 117 asm volatile(KVM_HYPERCALL 118 : "=a"(ret) 119 : "a"(nr) 120 : "memory"); 121 return ret; 122 } 123 124 static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) 125 { 126 long ret; 127 asm volatile(KVM_HYPERCALL 128 : "=a"(ret) 129 : "a"(nr), "b"(p1) 130 : "memory"); 131 return ret; 132 } 133 134 static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, 135 unsigned long p2) 136 { 137 long ret; 138 asm volatile(KVM_HYPERCALL 139 : "=a"(ret) 140 : "a"(nr), "b"(p1), "c"(p2) 141 : "memory"); 142 return ret; 143 } 144 145 static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, 146 unsigned long p2, unsigned long p3) 147 { 148 long ret; 149 asm volatile(KVM_HYPERCALL 150 : "=a"(ret) 151 : "a"(nr), "b"(p1), "c"(p2), "d"(p3) 152 : "memory"); 153 return ret; 154 } 155 156 static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, 157 unsigned long p2, unsigned long p3, 158 unsigned long p4) 159 { 160 long ret; 161 asm volatile(KVM_HYPERCALL 162 : "=a"(ret) 163 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4) 164 : "memory"); 165 return ret; 166 } 167 168 static inline int kvm_para_available(void) 169 { 170 unsigned int eax, ebx, ecx, edx; 171 char signature[13]; 172 173 if (boot_cpu_data.cpuid_level < 0) 174 return 0; /* So we don't blow up on old processors */ 175 176 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 177 memcpy(signature + 0, &ebx, 4); 178 memcpy(signature + 4, &ecx, 4); 179 memcpy(signature + 8, &edx, 4); 180 signature[12] = 0; 181 182 if (strcmp(signature, "KVMKVMKVM") == 0) 183 return 1; 184 185 return 0; 186 } 187 188 static inline unsigned int kvm_arch_para_features(void) 189 { 190 return cpuid_eax(KVM_CPUID_FEATURES); 191 } 192 193 #ifdef CONFIG_KVM_GUEST 194 void __init kvm_guest_init(void); 195 void kvm_async_pf_task_wait(u32 token); 196 void kvm_async_pf_task_wake(u32 token); 197 u32 kvm_read_and_reset_pf_reason(void); 198 extern void kvm_disable_steal_time(void); 199 #else 200 #define kvm_guest_init() do { } while (0) 201 #define kvm_async_pf_task_wait(T) do {} while(0) 202 #define kvm_async_pf_task_wake(T) do {} while(0) 203 static inline u32 kvm_read_and_reset_pf_reason(void) 204 { 205 return 0; 206 } 207 208 static inline void kvm_disable_steal_time(void) 209 { 210 return; 211 } 212 #endif 213 214 #endif /* __KERNEL__ */ 215 216 #endif /* _ASM_X86_KVM_PARA_H */ 217