1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #ifndef __SVM_SVM_H 16 #define __SVM_SVM_H 17 18 #include <linux/kvm_types.h> 19 #include <linux/kvm_host.h> 20 21 #include <asm/svm.h> 22 23 static const u32 host_save_user_msrs[] = { 24 #ifdef CONFIG_X86_64 25 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, 26 MSR_FS_BASE, 27 #endif 28 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 29 MSR_TSC_AUX, 30 }; 31 32 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) 33 34 #define MAX_DIRECT_ACCESS_MSRS 15 35 #define MSRPM_OFFSETS 16 36 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; 37 extern bool npt_enabled; 38 39 enum { 40 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, 41 pause filter count */ 42 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ 43 VMCB_ASID, /* ASID */ 44 VMCB_INTR, /* int_ctl, int_vector */ 45 VMCB_NPT, /* npt_en, nCR3, gPAT */ 46 VMCB_CR, /* CR0, CR3, CR4, EFER */ 47 VMCB_DR, /* DR6, DR7 */ 48 VMCB_DT, /* GDT, IDT */ 49 VMCB_SEG, /* CS, DS, SS, ES, CPL */ 50 VMCB_CR2, /* CR2 only */ 51 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ 52 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, 53 * AVIC PHYSICAL_TABLE pointer, 54 * AVIC LOGICAL_TABLE pointer 55 */ 56 VMCB_DIRTY_MAX, 57 }; 58 59 /* TPR and CR2 are always written before VMRUN */ 60 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) 61 62 struct kvm_sev_info { 63 bool active; /* SEV enabled guest */ 64 unsigned int asid; /* ASID used for this guest */ 65 unsigned int handle; /* SEV firmware handle */ 66 int fd; /* SEV device fd */ 67 unsigned long pages_locked; /* Number of pages locked */ 68 struct list_head regions_list; /* List of registered regions */ 69 }; 70 71 struct kvm_svm { 72 struct kvm kvm; 73 74 /* Struct members for AVIC */ 75 u32 avic_vm_id; 76 struct page *avic_logical_id_table_page; 77 struct page *avic_physical_id_table_page; 78 struct hlist_node hnode; 79 80 struct kvm_sev_info sev_info; 81 }; 82 83 struct kvm_vcpu; 84 85 struct svm_nested_state { 86 struct vmcb *hsave; 87 u64 hsave_msr; 88 u64 vm_cr_msr; 89 u64 vmcb12_gpa; 90 91 /* These are the merged vectors */ 92 u32 *msrpm; 93 94 /* A VMRUN has started but has not yet been performed, so 95 * we cannot inject a nested vmexit yet. */ 96 bool nested_run_pending; 97 98 /* cache for control fields of the guest */ 99 struct vmcb_control_area ctl; 100 101 bool initialized; 102 }; 103 104 struct vcpu_svm { 105 struct kvm_vcpu vcpu; 106 struct vmcb *vmcb; 107 unsigned long vmcb_pa; 108 struct svm_cpu_data *svm_data; 109 uint64_t asid_generation; 110 uint64_t sysenter_esp; 111 uint64_t sysenter_eip; 112 uint64_t tsc_aux; 113 114 u64 msr_decfg; 115 116 u64 next_rip; 117 118 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; 119 struct { 120 u16 fs; 121 u16 gs; 122 u16 ldt; 123 u64 gs_base; 124 } host; 125 126 u64 spec_ctrl; 127 /* 128 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be 129 * translated into the appropriate L2_CFG bits on the host to 130 * perform speculative control. 131 */ 132 u64 virt_spec_ctrl; 133 134 u32 *msrpm; 135 136 ulong nmi_iret_rip; 137 138 struct svm_nested_state nested; 139 140 bool nmi_singlestep; 141 u64 nmi_singlestep_guest_rflags; 142 143 unsigned int3_injected; 144 unsigned long int3_rip; 145 146 /* cached guest cpuid flags for faster access */ 147 bool nrips_enabled : 1; 148 149 u32 ldr_reg; 150 u32 dfr_reg; 151 struct page *avic_backing_page; 152 u64 *avic_physical_id_cache; 153 bool avic_is_running; 154 155 /* 156 * Per-vcpu list of struct amd_svm_iommu_ir: 157 * This is used mainly to store interrupt remapping information used 158 * when update the vcpu affinity. This avoids the need to scan for 159 * IRTE and try to match ga_tag in the IOMMU driver. 160 */ 161 struct list_head ir_list; 162 spinlock_t ir_list_lock; 163 164 /* Save desired MSR intercept (read: pass-through) state */ 165 struct { 166 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); 167 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); 168 } shadow_msr_intercept; 169 }; 170 171 struct svm_cpu_data { 172 int cpu; 173 174 u64 asid_generation; 175 u32 max_asid; 176 u32 next_asid; 177 u32 min_asid; 178 struct kvm_ldttss_desc *tss_desc; 179 180 struct page *save_area; 181 struct vmcb *current_vmcb; 182 183 /* index = sev_asid, value = vmcb pointer */ 184 struct vmcb **sev_vmcbs; 185 }; 186 187 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data); 188 189 void recalc_intercepts(struct vcpu_svm *svm); 190 191 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) 192 { 193 return container_of(kvm, struct kvm_svm, kvm); 194 } 195 196 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) 197 { 198 vmcb->control.clean = 0; 199 } 200 201 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) 202 { 203 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) 204 & ~VMCB_ALWAYS_DIRTY_MASK; 205 } 206 207 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) 208 { 209 vmcb->control.clean &= ~(1 << bit); 210 } 211 212 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 213 { 214 return container_of(vcpu, struct vcpu_svm, vcpu); 215 } 216 217 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) 218 { 219 if (is_guest_mode(&svm->vcpu)) 220 return svm->nested.hsave; 221 else 222 return svm->vmcb; 223 } 224 225 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) 226 { 227 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 228 __set_bit(bit, (unsigned long *)&control->intercepts); 229 } 230 231 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) 232 { 233 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 234 __clear_bit(bit, (unsigned long *)&control->intercepts); 235 } 236 237 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) 238 { 239 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); 240 return test_bit(bit, (unsigned long *)&control->intercepts); 241 } 242 243 static inline void set_dr_intercepts(struct vcpu_svm *svm) 244 { 245 struct vmcb *vmcb = get_host_vmcb(svm); 246 247 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); 248 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); 249 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); 250 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); 251 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); 252 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); 253 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); 254 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); 255 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); 256 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); 257 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); 258 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); 259 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); 260 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); 261 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); 262 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); 263 264 recalc_intercepts(svm); 265 } 266 267 static inline void clr_dr_intercepts(struct vcpu_svm *svm) 268 { 269 struct vmcb *vmcb = get_host_vmcb(svm); 270 271 vmcb->control.intercepts[INTERCEPT_DR] = 0; 272 273 recalc_intercepts(svm); 274 } 275 276 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) 277 { 278 struct vmcb *vmcb = get_host_vmcb(svm); 279 280 WARN_ON_ONCE(bit >= 32); 281 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 282 283 recalc_intercepts(svm); 284 } 285 286 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) 287 { 288 struct vmcb *vmcb = get_host_vmcb(svm); 289 290 WARN_ON_ONCE(bit >= 32); 291 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); 292 293 recalc_intercepts(svm); 294 } 295 296 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) 297 { 298 struct vmcb *vmcb = get_host_vmcb(svm); 299 300 vmcb_set_intercept(&vmcb->control, bit); 301 302 recalc_intercepts(svm); 303 } 304 305 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) 306 { 307 struct vmcb *vmcb = get_host_vmcb(svm); 308 309 vmcb_clr_intercept(&vmcb->control, bit); 310 311 recalc_intercepts(svm); 312 } 313 314 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) 315 { 316 return vmcb_is_intercept(&svm->vmcb->control, bit); 317 } 318 319 static inline bool vgif_enabled(struct vcpu_svm *svm) 320 { 321 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); 322 } 323 324 static inline void enable_gif(struct vcpu_svm *svm) 325 { 326 if (vgif_enabled(svm)) 327 svm->vmcb->control.int_ctl |= V_GIF_MASK; 328 else 329 svm->vcpu.arch.hflags |= HF_GIF_MASK; 330 } 331 332 static inline void disable_gif(struct vcpu_svm *svm) 333 { 334 if (vgif_enabled(svm)) 335 svm->vmcb->control.int_ctl &= ~V_GIF_MASK; 336 else 337 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; 338 } 339 340 static inline bool gif_set(struct vcpu_svm *svm) 341 { 342 if (vgif_enabled(svm)) 343 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); 344 else 345 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); 346 } 347 348 /* svm.c */ 349 #define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U 350 #define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U 351 #define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U 352 #define MSR_INVALID 0xffffffffU 353 354 u32 svm_msrpm_offset(u32 msr); 355 u32 *svm_vcpu_alloc_msrpm(void); 356 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); 357 void svm_vcpu_free_msrpm(u32 *msrpm); 358 359 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); 360 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 361 int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 362 void svm_flush_tlb(struct kvm_vcpu *vcpu); 363 void disable_nmi_singlestep(struct vcpu_svm *svm); 364 bool svm_smi_blocked(struct kvm_vcpu *vcpu); 365 bool svm_nmi_blocked(struct kvm_vcpu *vcpu); 366 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); 367 void svm_set_gif(struct vcpu_svm *svm, bool value); 368 369 /* nested.c */ 370 371 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ 372 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ 373 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ 374 375 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) 376 { 377 struct vcpu_svm *svm = to_svm(vcpu); 378 379 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); 380 } 381 382 static inline bool nested_exit_on_smi(struct vcpu_svm *svm) 383 { 384 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); 385 } 386 387 static inline bool nested_exit_on_intr(struct vcpu_svm *svm) 388 { 389 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); 390 } 391 392 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) 393 { 394 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); 395 } 396 397 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, 398 struct vmcb *nested_vmcb); 399 void svm_leave_nested(struct vcpu_svm *svm); 400 void svm_free_nested(struct vcpu_svm *svm); 401 int svm_allocate_nested(struct vcpu_svm *svm); 402 int nested_svm_vmrun(struct vcpu_svm *svm); 403 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb); 404 int nested_svm_vmexit(struct vcpu_svm *svm); 405 int nested_svm_exit_handled(struct vcpu_svm *svm); 406 int nested_svm_check_permissions(struct vcpu_svm *svm); 407 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 408 bool has_error_code, u32 error_code); 409 int nested_svm_exit_special(struct vcpu_svm *svm); 410 void sync_nested_vmcb_control(struct vcpu_svm *svm); 411 412 extern struct kvm_x86_nested_ops svm_nested_ops; 413 414 /* avic.c */ 415 416 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF) 417 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 418 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) 419 420 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) 421 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) 422 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) 423 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) 424 425 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL 426 427 extern int avic; 428 429 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data) 430 { 431 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK; 432 vmcb_mark_dirty(svm->vmcb, VMCB_AVIC); 433 } 434 435 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu) 436 { 437 struct vcpu_svm *svm = to_svm(vcpu); 438 u64 *entry = svm->avic_physical_id_cache; 439 440 if (!entry) 441 return false; 442 443 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); 444 } 445 446 int avic_ga_log_notifier(u32 ga_tag); 447 void avic_vm_destroy(struct kvm *kvm); 448 int avic_vm_init(struct kvm *kvm); 449 void avic_init_vmcb(struct vcpu_svm *svm); 450 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate); 451 int avic_incomplete_ipi_interception(struct vcpu_svm *svm); 452 int avic_unaccelerated_access_interception(struct vcpu_svm *svm); 453 int avic_init_vcpu(struct vcpu_svm *svm); 454 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 455 void avic_vcpu_put(struct kvm_vcpu *vcpu); 456 void avic_post_state_restore(struct kvm_vcpu *vcpu); 457 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); 458 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); 459 bool svm_check_apicv_inhibit_reasons(ulong bit); 460 void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate); 461 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); 462 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); 463 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr); 464 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec); 465 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu); 466 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, 467 uint32_t guest_irq, bool set); 468 void svm_vcpu_blocking(struct kvm_vcpu *vcpu); 469 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu); 470 471 /* sev.c */ 472 473 extern unsigned int max_sev_asid; 474 475 static inline bool sev_guest(struct kvm *kvm) 476 { 477 #ifdef CONFIG_KVM_AMD_SEV 478 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 479 480 return sev->active; 481 #else 482 return false; 483 #endif 484 } 485 486 static inline bool svm_sev_enabled(void) 487 { 488 return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0; 489 } 490 491 void sev_vm_destroy(struct kvm *kvm); 492 int svm_mem_enc_op(struct kvm *kvm, void __user *argp); 493 int svm_register_enc_region(struct kvm *kvm, 494 struct kvm_enc_region *range); 495 int svm_unregister_enc_region(struct kvm *kvm, 496 struct kvm_enc_region *range); 497 void pre_sev_run(struct vcpu_svm *svm, int cpu); 498 int __init sev_hardware_setup(void); 499 void sev_hardware_teardown(void); 500 501 #endif 502