1 /* 2 * QEMU KVM support 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #ifndef QEMU_KVM_H 15 #define QEMU_KVM_H 16 17 #include <errno.h> 18 #include "config-host.h" 19 #include "qemu/queue.h" 20 #include "qom/cpu.h" 21 22 #ifdef CONFIG_KVM 23 #include <linux/kvm.h> 24 #include <linux/kvm_para.h> 25 #else 26 /* These constants must never be used at runtime if kvm_enabled() is false. 27 * They exist so we don't need #ifdefs around KVM-specific code that already 28 * checks kvm_enabled() properly. 29 */ 30 #define KVM_CPUID_SIGNATURE 0 31 #define KVM_CPUID_FEATURES 0 32 #define KVM_FEATURE_CLOCKSOURCE 0 33 #define KVM_FEATURE_NOP_IO_DELAY 0 34 #define KVM_FEATURE_MMU_OP 0 35 #define KVM_FEATURE_CLOCKSOURCE2 0 36 #define KVM_FEATURE_ASYNC_PF 0 37 #define KVM_FEATURE_STEAL_TIME 0 38 #define KVM_FEATURE_PV_EOI 0 39 #define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 0 40 #endif 41 42 extern bool kvm_allowed; 43 extern bool kvm_kernel_irqchip; 44 extern bool kvm_async_interrupts_allowed; 45 extern bool kvm_irqfds_allowed; 46 extern bool kvm_msi_via_irqfd_allowed; 47 extern bool kvm_gsi_routing_allowed; 48 extern bool kvm_readonly_mem_allowed; 49 50 #if defined CONFIG_KVM || !defined NEED_CPU_H 51 #define kvm_enabled() (kvm_allowed) 52 /** 53 * kvm_irqchip_in_kernel: 54 * 55 * Returns: true if the user asked us to create an in-kernel 56 * irqchip via the "kernel_irqchip=on" machine option. 57 * What this actually means is architecture and machine model 58 * specific: on PC, for instance, it means that the LAPIC, 59 * IOAPIC and PIT are all in kernel. This function should never 60 * be used from generic target-independent code: use one of the 61 * following functions or some other specific check instead. 62 */ 63 #define kvm_irqchip_in_kernel() (kvm_kernel_irqchip) 64 65 /** 66 * kvm_async_interrupts_enabled: 67 * 68 * Returns: true if we can deliver interrupts to KVM 69 * asynchronously (ie by ioctl from any thread at any time) 70 * rather than having to do interrupt delivery synchronously 71 * (where the vcpu must be stopped at a suitable point first). 72 */ 73 #define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed) 74 75 /** 76 * kvm_irqfds_enabled: 77 * 78 * Returns: true if we can use irqfds to inject interrupts into 79 * a KVM CPU (ie the kernel supports irqfds and we are running 80 * with a configuration where it is meaningful to use them). 81 */ 82 #define kvm_irqfds_enabled() (kvm_irqfds_allowed) 83 84 /** 85 * kvm_msi_via_irqfd_enabled: 86 * 87 * Returns: true if we can route a PCI MSI (Message Signaled Interrupt) 88 * to a KVM CPU via an irqfd. This requires that the kernel supports 89 * this and that we're running in a configuration that permits it. 90 */ 91 #define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed) 92 93 /** 94 * kvm_gsi_routing_enabled: 95 * 96 * Returns: true if GSI routing is enabled (ie the kernel supports 97 * it and we're running in a configuration that permits it). 98 */ 99 #define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed) 100 101 /** 102 * kvm_readonly_mem_enabled: 103 * 104 * Returns: true if KVM readonly memory is enabled (ie the kernel 105 * supports it and we're running in a configuration that permits it). 106 */ 107 #define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed) 108 109 #else 110 #define kvm_enabled() (0) 111 #define kvm_irqchip_in_kernel() (false) 112 #define kvm_async_interrupts_enabled() (false) 113 #define kvm_irqfds_enabled() (false) 114 #define kvm_msi_via_irqfd_enabled() (false) 115 #define kvm_gsi_routing_allowed() (false) 116 #define kvm_readonly_mem_enabled() (false) 117 #endif 118 119 struct kvm_run; 120 struct kvm_lapic_state; 121 122 typedef struct KVMCapabilityInfo { 123 const char *name; 124 int value; 125 } KVMCapabilityInfo; 126 127 #define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP } 128 #define KVM_CAP_LAST_INFO { NULL, 0 } 129 130 struct KVMState; 131 typedef struct KVMState KVMState; 132 extern KVMState *kvm_state; 133 134 /* external API */ 135 136 int kvm_init(void); 137 138 int kvm_has_sync_mmu(void); 139 int kvm_has_vcpu_events(void); 140 int kvm_has_robust_singlestep(void); 141 int kvm_has_debugregs(void); 142 int kvm_has_xsave(void); 143 int kvm_has_xcrs(void); 144 int kvm_has_pit_state2(void); 145 int kvm_has_many_ioeventfds(void); 146 int kvm_has_gsi_routing(void); 147 int kvm_has_intx_set_mask(void); 148 149 int kvm_init_vcpu(CPUState *cpu); 150 151 #ifdef NEED_CPU_H 152 int kvm_cpu_exec(CPUArchState *env); 153 154 #if !defined(CONFIG_USER_ONLY) 155 void *kvm_ram_alloc(ram_addr_t size); 156 void *kvm_arch_ram_alloc(ram_addr_t size); 157 #endif 158 159 void kvm_setup_guest_memory(void *start, size_t size); 160 void kvm_flush_coalesced_mmio_buffer(void); 161 162 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, 163 target_ulong len, int type); 164 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, 165 target_ulong len, int type); 166 void kvm_remove_all_breakpoints(CPUArchState *current_env); 167 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap); 168 #ifndef _WIN32 169 int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset); 170 #endif 171 172 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); 173 int kvm_on_sigbus(int code, void *addr); 174 175 /* internal API */ 176 177 int kvm_ioctl(KVMState *s, int type, ...); 178 179 int kvm_vm_ioctl(KVMState *s, int type, ...); 180 181 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...); 182 183 /* Arch specific hooks */ 184 185 extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; 186 187 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); 188 void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); 189 190 int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run); 191 192 int kvm_arch_process_async_events(CPUState *cpu); 193 194 int kvm_arch_get_registers(CPUState *cpu); 195 196 /* state subset only touched by the VCPU itself during runtime */ 197 #define KVM_PUT_RUNTIME_STATE 1 198 /* state subset modified during VCPU reset */ 199 #define KVM_PUT_RESET_STATE 2 200 /* full state set, modified during initialization or on vmload */ 201 #define KVM_PUT_FULL_STATE 3 202 203 int kvm_arch_put_registers(CPUState *cpu, int level); 204 205 int kvm_arch_init(KVMState *s); 206 207 int kvm_arch_init_vcpu(CPUState *cpu); 208 209 /* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */ 210 unsigned long kvm_arch_vcpu_id(CPUState *cpu); 211 212 void kvm_arch_reset_vcpu(CPUState *cpu); 213 214 int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); 215 int kvm_arch_on_sigbus(int code, void *addr); 216 217 void kvm_arch_init_irq_routing(KVMState *s); 218 219 int kvm_set_irq(KVMState *s, int irq, int level); 220 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg); 221 222 void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin); 223 224 void kvm_put_apic_state(DeviceState *d, struct kvm_lapic_state *kapic); 225 void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic); 226 227 struct kvm_guest_debug; 228 struct kvm_debug_exit_arch; 229 230 struct kvm_sw_breakpoint { 231 target_ulong pc; 232 target_ulong saved_insn; 233 int use_count; 234 QTAILQ_ENTRY(kvm_sw_breakpoint) entry; 235 }; 236 237 QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint); 238 239 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, 240 target_ulong pc); 241 242 int kvm_sw_breakpoints_active(CPUState *cpu); 243 244 int kvm_arch_insert_sw_breakpoint(CPUState *current_cpu, 245 struct kvm_sw_breakpoint *bp); 246 int kvm_arch_remove_sw_breakpoint(CPUState *current_cpu, 247 struct kvm_sw_breakpoint *bp); 248 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 249 target_ulong len, int type); 250 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 251 target_ulong len, int type); 252 void kvm_arch_remove_all_hw_breakpoints(void); 253 254 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg); 255 256 bool kvm_arch_stop_on_emulation_error(CPUState *cpu); 257 258 int kvm_check_extension(KVMState *s, unsigned int extension); 259 260 uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function, 261 uint32_t index, int reg); 262 void kvm_cpu_synchronize_state(CPUArchState *env); 263 264 /* generic hooks - to be moved/refactored once there are more users */ 265 266 static inline void cpu_synchronize_state(CPUArchState *env) 267 { 268 if (kvm_enabled()) { 269 kvm_cpu_synchronize_state(env); 270 } 271 } 272 273 #if !defined(CONFIG_USER_ONLY) 274 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr, 275 hwaddr *phys_addr); 276 #endif 277 278 #endif /* NEED_CPU_H */ 279 280 void kvm_cpu_synchronize_post_reset(CPUState *cpu); 281 void kvm_cpu_synchronize_post_init(CPUState *cpu); 282 283 static inline void cpu_synchronize_post_reset(CPUState *cpu) 284 { 285 if (kvm_enabled()) { 286 kvm_cpu_synchronize_post_reset(cpu); 287 } 288 } 289 290 static inline void cpu_synchronize_post_init(CPUState *cpu) 291 { 292 if (kvm_enabled()) { 293 kvm_cpu_synchronize_post_init(cpu); 294 } 295 } 296 297 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg); 298 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg); 299 void kvm_irqchip_release_virq(KVMState *s, int virq); 300 301 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq); 302 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq); 303 void kvm_pc_gsi_handler(void *opaque, int n, int level); 304 void kvm_pc_setup_irq_routing(bool pci_enabled); 305 #endif 306