Lines Matching refs:svm

579 static int sev_es_sync_vmsa(struct vcpu_svm *svm)  in sev_es_sync_vmsa()  argument
581 struct sev_es_save_area *save = svm->sev_es.vmsa; in sev_es_sync_vmsa()
584 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa()
593 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa()
596 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; in sev_es_sync_vmsa()
597 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; in sev_es_sync_vmsa()
598 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in sev_es_sync_vmsa()
599 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; in sev_es_sync_vmsa()
600 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; in sev_es_sync_vmsa()
601 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; in sev_es_sync_vmsa()
602 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; in sev_es_sync_vmsa()
603 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; in sev_es_sync_vmsa()
605 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; in sev_es_sync_vmsa()
606 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; in sev_es_sync_vmsa()
607 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; in sev_es_sync_vmsa()
608 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; in sev_es_sync_vmsa()
609 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; in sev_es_sync_vmsa()
610 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; in sev_es_sync_vmsa()
611 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; in sev_es_sync_vmsa()
612 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; in sev_es_sync_vmsa()
614 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; in sev_es_sync_vmsa()
617 save->xcr0 = svm->vcpu.arch.xcr0; in sev_es_sync_vmsa()
618 save->pkru = svm->vcpu.arch.pkru; in sev_es_sync_vmsa()
619 save->xss = svm->vcpu.arch.ia32_xss; in sev_es_sync_vmsa()
620 save->dr6 = svm->vcpu.arch.dr6; in sev_es_sync_vmsa()
638 struct vcpu_svm *svm = to_svm(vcpu); in __sev_launch_update_vmsa() local
647 ret = sev_es_sync_vmsa(svm); in __sev_launch_update_vmsa()
656 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); in __sev_launch_update_vmsa()
660 vmsa.address = __sme_pa(svm->sev_es.vmsa); in __sev_launch_update_vmsa()
2386 struct vcpu_svm *svm; in sev_free_vcpu() local
2391 svm = to_svm(vcpu); in sev_free_vcpu()
2394 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa); in sev_free_vcpu()
2396 __free_page(virt_to_page(svm->sev_es.vmsa)); in sev_free_vcpu()
2398 if (svm->sev_es.ghcb_sa_free) in sev_free_vcpu()
2399 kvfree(svm->sev_es.ghcb_sa); in sev_free_vcpu()
2402 static void dump_ghcb(struct vcpu_svm *svm) in dump_ghcb() argument
2404 struct ghcb *ghcb = svm->sev_es.ghcb; in dump_ghcb()
2415 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb()
2427 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm) in sev_es_sync_to_ghcb() argument
2429 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_to_ghcb()
2430 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_to_ghcb()
2446 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) in sev_es_sync_from_ghcb() argument
2448 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb()
2449 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_sync_from_ghcb()
2450 struct ghcb *ghcb = svm->sev_es.ghcb; in sev_es_sync_from_ghcb()
2467 BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
2468 memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap)); in sev_es_sync_from_ghcb()
2470 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
2471 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
2472 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
2473 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
2474 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
2476 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
2478 if (kvm_ghcb_xcr0_is_valid(svm)) { in sev_es_sync_from_ghcb()
2489 svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb); in sev_es_sync_from_ghcb()
2500 static int sev_es_validate_vmgexit(struct vcpu_svm *svm) in sev_es_validate_vmgexit() argument
2502 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_validate_vmgexit()
2503 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_validate_vmgexit()
2514 if (svm->sev_es.ghcb->ghcb_usage) { in sev_es_validate_vmgexit()
2521 if (!kvm_ghcb_sw_exit_code_is_valid(svm) || in sev_es_validate_vmgexit()
2522 !kvm_ghcb_sw_exit_info_1_is_valid(svm) || in sev_es_validate_vmgexit()
2523 !kvm_ghcb_sw_exit_info_2_is_valid(svm)) in sev_es_validate_vmgexit()
2530 if (!kvm_ghcb_rax_is_valid(svm)) in sev_es_validate_vmgexit()
2536 if (!kvm_ghcb_rcx_is_valid(svm)) in sev_es_validate_vmgexit()
2540 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
2541 !kvm_ghcb_rcx_is_valid(svm)) in sev_es_validate_vmgexit()
2544 if (!kvm_ghcb_xcr0_is_valid(svm)) in sev_es_validate_vmgexit()
2551 if (!kvm_ghcb_sw_scratch_is_valid(svm)) in sev_es_validate_vmgexit()
2555 if (!kvm_ghcb_rax_is_valid(svm)) in sev_es_validate_vmgexit()
2560 if (!kvm_ghcb_rcx_is_valid(svm)) in sev_es_validate_vmgexit()
2563 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
2564 !kvm_ghcb_rdx_is_valid(svm)) in sev_es_validate_vmgexit()
2569 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
2570 !kvm_ghcb_cpl_is_valid(svm)) in sev_es_validate_vmgexit()
2578 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
2579 !kvm_ghcb_rcx_is_valid(svm) || in sev_es_validate_vmgexit()
2580 !kvm_ghcb_rdx_is_valid(svm)) in sev_es_validate_vmgexit()
2584 if (!kvm_ghcb_rax_is_valid(svm) || in sev_es_validate_vmgexit()
2585 !kvm_ghcb_rcx_is_valid(svm)) in sev_es_validate_vmgexit()
2590 if (!kvm_ghcb_sw_scratch_is_valid(svm)) in sev_es_validate_vmgexit()
2608 svm->sev_es.ghcb->ghcb_usage); in sev_es_validate_vmgexit()
2615 dump_ghcb(svm); in sev_es_validate_vmgexit()
2618 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); in sev_es_validate_vmgexit()
2619 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason); in sev_es_validate_vmgexit()
2625 void sev_es_unmap_ghcb(struct vcpu_svm *svm) in sev_es_unmap_ghcb() argument
2627 if (!svm->sev_es.ghcb) in sev_es_unmap_ghcb()
2630 if (svm->sev_es.ghcb_sa_free) { in sev_es_unmap_ghcb()
2636 if (svm->sev_es.ghcb_sa_sync) { in sev_es_unmap_ghcb()
2637 kvm_write_guest(svm->vcpu.kvm, in sev_es_unmap_ghcb()
2638 svm->sev_es.sw_scratch, in sev_es_unmap_ghcb()
2639 svm->sev_es.ghcb_sa, in sev_es_unmap_ghcb()
2640 svm->sev_es.ghcb_sa_len); in sev_es_unmap_ghcb()
2641 svm->sev_es.ghcb_sa_sync = false; in sev_es_unmap_ghcb()
2644 kvfree(svm->sev_es.ghcb_sa); in sev_es_unmap_ghcb()
2645 svm->sev_es.ghcb_sa = NULL; in sev_es_unmap_ghcb()
2646 svm->sev_es.ghcb_sa_free = false; in sev_es_unmap_ghcb()
2649 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb); in sev_es_unmap_ghcb()
2651 sev_es_sync_to_ghcb(svm); in sev_es_unmap_ghcb()
2653 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true); in sev_es_unmap_ghcb()
2654 svm->sev_es.ghcb = NULL; in sev_es_unmap_ghcb()
2657 void pre_sev_run(struct vcpu_svm *svm, int cpu) in pre_sev_run() argument
2660 unsigned int asid = sev_get_asid(svm->vcpu.kvm); in pre_sev_run()
2663 svm->asid = asid; in pre_sev_run()
2671 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run()
2672 svm->vcpu.arch.last_vmentry_cpu == cpu) in pre_sev_run()
2675 sd->sev_vmcbs[asid] = svm->vmcb; in pre_sev_run()
2676 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in pre_sev_run()
2677 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in pre_sev_run()
2681 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len) in setup_vmgexit_scratch() argument
2683 struct vmcb_control_area *control = &svm->vmcb->control; in setup_vmgexit_scratch()
2688 scratch_gpa_beg = svm->sev_es.sw_scratch; in setup_vmgexit_scratch()
2719 scratch_va = (void *)svm->sev_es.ghcb; in setup_vmgexit_scratch()
2735 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) { in setup_vmgexit_scratch()
2749 svm->sev_es.ghcb_sa_sync = sync; in setup_vmgexit_scratch()
2750 svm->sev_es.ghcb_sa_free = true; in setup_vmgexit_scratch()
2753 svm->sev_es.ghcb_sa = scratch_va; in setup_vmgexit_scratch()
2754 svm->sev_es.ghcb_sa_len = len; in setup_vmgexit_scratch()
2759 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); in setup_vmgexit_scratch()
2760 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA); in setup_vmgexit_scratch()
2765 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, in set_ghcb_msr_bits() argument
2768 svm->vmcb->control.ghcb_gpa &= ~(mask << pos); in set_ghcb_msr_bits()
2769 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos; in set_ghcb_msr_bits()
2772 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos) in get_ghcb_msr_bits() argument
2774 return (svm->vmcb->control.ghcb_gpa >> pos) & mask; in get_ghcb_msr_bits()
2777 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value) in set_ghcb_msr() argument
2779 svm->vmcb->control.ghcb_gpa = value; in set_ghcb_msr()
2782 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm) in sev_handle_vmgexit_msr_protocol() argument
2784 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit_msr_protocol()
2785 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_handle_vmgexit_msr_protocol()
2791 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
2796 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, in sev_handle_vmgexit_msr_protocol()
2803 cpuid_fn = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2817 cpuid_reg = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2829 set_ghcb_msr_bits(svm, cpuid_value, in sev_handle_vmgexit_msr_protocol()
2833 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP, in sev_handle_vmgexit_msr_protocol()
2841 reason_set = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2844 reason_code = get_ghcb_msr_bits(svm, in sev_handle_vmgexit_msr_protocol()
2862 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id, in sev_handle_vmgexit_msr_protocol()
2870 struct vcpu_svm *svm = to_svm(vcpu); in sev_handle_vmgexit() local
2871 struct vmcb_control_area *control = &svm->vmcb->control; in sev_handle_vmgexit()
2878 return sev_handle_vmgexit_msr_protocol(svm); in sev_handle_vmgexit()
2887 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { in sev_handle_vmgexit()
2896 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; in sev_handle_vmgexit()
2898 trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb); in sev_handle_vmgexit()
2900 sev_es_sync_from_ghcb(svm); in sev_handle_vmgexit()
2901 ret = sev_es_validate_vmgexit(svm); in sev_handle_vmgexit()
2905 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0); in sev_handle_vmgexit()
2906 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0); in sev_handle_vmgexit()
2911 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); in sev_handle_vmgexit()
2918 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
2921 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2); in sev_handle_vmgexit()
2928 svm->sev_es.ghcb_sa); in sev_handle_vmgexit()
2932 svm->nmi_masked = false; in sev_handle_vmgexit()
2949 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table); in sev_handle_vmgexit()
2954 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2); in sev_handle_vmgexit()
2955 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT); in sev_handle_vmgexit()
2974 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) in sev_es_string_io() argument
2980 if (svm->vmcb->control.exit_info_2 > INT_MAX) in sev_es_string_io()
2983 count = svm->vmcb->control.exit_info_2; in sev_es_string_io()
2987 r = setup_vmgexit_scratch(svm, in, bytes); in sev_es_string_io()
2991 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa, in sev_es_string_io()
2995 static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm) in sev_es_vcpu_after_set_cpuid() argument
2997 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_vcpu_after_set_cpuid()
3003 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux); in sev_es_vcpu_after_set_cpuid()
3021 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1); in sev_es_vcpu_after_set_cpuid()
3023 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0); in sev_es_vcpu_after_set_cpuid()
3026 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm) in sev_vcpu_after_set_cpuid() argument
3028 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_vcpu_after_set_cpuid()
3036 if (sev_es_guest(svm->vcpu.kvm)) in sev_vcpu_after_set_cpuid()
3037 sev_es_vcpu_after_set_cpuid(svm); in sev_vcpu_after_set_cpuid()
3040 static void sev_es_init_vmcb(struct vcpu_svm *svm) in sev_es_init_vmcb() argument
3042 struct vmcb *vmcb = svm->vmcb01.ptr; in sev_es_init_vmcb()
3043 struct kvm_vcpu *vcpu = &svm->vcpu; in sev_es_init_vmcb()
3045 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; in sev_es_init_vmcb()
3054 if (svm->sev_es.vmsa) in sev_es_init_vmcb()
3055 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa); in sev_es_init_vmcb()
3058 svm_clr_intercept(svm, INTERCEPT_CR0_READ); in sev_es_init_vmcb()
3059 svm_clr_intercept(svm, INTERCEPT_CR4_READ); in sev_es_init_vmcb()
3060 svm_clr_intercept(svm, INTERCEPT_CR8_READ); in sev_es_init_vmcb()
3061 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); in sev_es_init_vmcb()
3062 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE); in sev_es_init_vmcb()
3063 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); in sev_es_init_vmcb()
3065 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0); in sev_es_init_vmcb()
3068 svm_set_intercept(svm, TRAP_EFER_WRITE); in sev_es_init_vmcb()
3069 svm_set_intercept(svm, TRAP_CR0_WRITE); in sev_es_init_vmcb()
3070 svm_set_intercept(svm, TRAP_CR4_WRITE); in sev_es_init_vmcb()
3071 svm_set_intercept(svm, TRAP_CR8_WRITE); in sev_es_init_vmcb()
3077 recalc_intercepts(svm); in sev_es_init_vmcb()
3088 clr_exception_intercept(svm, DB_VECTOR); in sev_es_init_vmcb()
3092 svm_clr_intercept(svm, INTERCEPT_XSETBV); in sev_es_init_vmcb()
3095 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); in sev_es_init_vmcb()
3096 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); in sev_es_init_vmcb()
3099 void sev_init_vmcb(struct vcpu_svm *svm) in sev_init_vmcb() argument
3101 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in sev_init_vmcb()
3102 clr_exception_intercept(svm, UD_VECTOR); in sev_init_vmcb()
3108 clr_exception_intercept(svm, GP_VECTOR); in sev_init_vmcb()
3110 if (sev_es_guest(svm->vcpu.kvm)) in sev_init_vmcb()
3111 sev_es_init_vmcb(svm); in sev_init_vmcb()
3114 void sev_es_vcpu_reset(struct vcpu_svm *svm) in sev_es_vcpu_reset() argument
3120 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX, in sev_es_vcpu_reset()
3167 struct vcpu_svm *svm = to_svm(vcpu); in sev_vcpu_deliver_sipi_vector() local
3170 if (!svm->sev_es.received_first_sipi) { in sev_vcpu_deliver_sipi_vector()
3171 svm->sev_es.received_first_sipi = true; in sev_vcpu_deliver_sipi_vector()
3180 if (!svm->sev_es.ghcb) in sev_vcpu_deliver_sipi_vector()
3183 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); in sev_vcpu_deliver_sipi_vector()