Lines Matching refs:arch

359 		vcpu->arch.apf.gfns[i] = ~0;  in kvm_async_pf_hash_reset()
476 return vcpu->arch.apic_base; in kvm_get_apic_base()
586 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
603 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
604 vcpu->arch.dr6 |= ex->payload; in kvm_deliver_exception_payload()
605 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
613 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
616 vcpu->arch.cr2 = ex->payload; in kvm_deliver_exception_payload()
629 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; in kvm_queue_exception_vmexit()
669 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
680 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
690 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
691 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
693 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
694 vcpu->arch.exception.vector = nr; in kvm_multiple_exception()
695 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
696 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
697 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
700 &vcpu->arch.exception); in kvm_multiple_exception()
705 prev_nr = vcpu->arch.exception.vector; in kvm_multiple_exception()
719 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
720 vcpu->arch.exception.pending = false; in kvm_multiple_exception()
802 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
803 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
820 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
860 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); in pdptr_rsvd_bits()
868 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()
907 vcpu->arch.pdptrs_from_userspace = false; in load_pdptrs()
984 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
995 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
1020 if (vcpu->arch.guest_state_protected) in kvm_load_guest_xsave_state()
1025 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
1026 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
1029 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
1030 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
1034 vcpu->arch.pkru != vcpu->arch.host_pkru && in kvm_load_guest_xsave_state()
1035 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_guest_xsave_state()
1037 write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
1043 if (vcpu->arch.guest_state_protected) in kvm_load_host_xsave_state()
1047 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_host_xsave_state()
1049 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
1050 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
1051 write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
1056 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
1060 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
1070 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; in kvm_guest_supported_xfd()
1077 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
1093 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
1112 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
1137 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in __kvm_is_valid_cr4()
1225 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_invalidate_pcid()
1296 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1322 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1332 return vcpu->arch.cr8; in kvm_get_cr8()
1342 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1351 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1353 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1355 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1357 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1375 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_set_dr()
1379 vcpu->arch.db[array_index_nospec(dr, size)] = val; in kvm_set_dr()
1381 vcpu->arch.eff_db[dr] = val; in kvm_set_dr()
1387 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in kvm_set_dr()
1393 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in kvm_set_dr()
1404 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1408 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1412 *val = vcpu->arch.dr6; in kvm_get_dr()
1416 *val = vcpu->arch.dr7; in kvm_get_dr()
1757 u64 old_efer = vcpu->arch.efer; in set_efer()
1769 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1774 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1809 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); in kvm_msr_allowed()
2041 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
2050 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
2162 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
2169 return kvm_x2apic_icr_write(vcpu->arch.apic, data); in handle_fastpath_set_x2apic_icr_irqoff()
2363 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2372 vcpu->arch.time = system_time; in kvm_write_system_time()
2377 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2380 kvm_gpc_deactivate(&vcpu->arch.pv_time); in kvm_write_system_time()
2448 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2449 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2485 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2486 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2487 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2507 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2508 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2509 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2510 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2525 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2578 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); in kvm_compute_l1_tsc_offset()
2585 return vcpu->arch.l1_tsc_offset + in kvm_read_l1_tsc()
2586 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); in kvm_read_l1_tsc()
2618 vcpu->arch.l1_tsc_offset, in kvm_vcpu_write_tsc_offset()
2621 vcpu->arch.l1_tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2629 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in kvm_vcpu_write_tsc_offset()
2634 vcpu->arch.tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2641 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2645 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( in kvm_vcpu_write_tsc_multiplier()
2649 vcpu->arch.tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2678 lockdep_assert_held(&kvm->arch.tsc_write_lock); in __kvm_synchronize_tsc()
2684 kvm->arch.last_tsc_nsec = ns; in __kvm_synchronize_tsc()
2685 kvm->arch.last_tsc_write = tsc; in __kvm_synchronize_tsc()
2686 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in __kvm_synchronize_tsc()
2687 kvm->arch.last_tsc_offset = offset; in __kvm_synchronize_tsc()
2689 vcpu->arch.last_guest_tsc = tsc; in __kvm_synchronize_tsc()
2703 kvm->arch.cur_tsc_generation++; in __kvm_synchronize_tsc()
2704 kvm->arch.cur_tsc_nsec = ns; in __kvm_synchronize_tsc()
2705 kvm->arch.cur_tsc_write = tsc; in __kvm_synchronize_tsc()
2706 kvm->arch.cur_tsc_offset = offset; in __kvm_synchronize_tsc()
2707 kvm->arch.nr_vcpus_matched_tsc = 0; in __kvm_synchronize_tsc()
2708 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { in __kvm_synchronize_tsc()
2709 kvm->arch.nr_vcpus_matched_tsc++; in __kvm_synchronize_tsc()
2713 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in __kvm_synchronize_tsc()
2714 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in __kvm_synchronize_tsc()
2715 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in __kvm_synchronize_tsc()
2728 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2731 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_synchronize_tsc()
2733 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2742 u64 tsc_exp = kvm->arch.last_tsc_write + in kvm_synchronize_tsc()
2744 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2762 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2764 offset = kvm->arch.cur_tsc_offset; in kvm_synchronize_tsc()
2774 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2780 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2786 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2789 vcpu->arch.l1_tsc_scaling_ratio); in adjust_tsc_offset_host()
2957 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
2961 lockdep_assert_held(&kvm->arch.tsc_write_lock); in pvclock_update_vm_gtod_copy()
2993 raw_spin_lock_irq(&kvm->arch.tsc_write_lock); in __kvm_start_pvclock_update()
2994 write_seqcount_begin(&kvm->arch.pvclock_sc); in __kvm_start_pvclock_update()
3007 struct kvm_arch *ka = &kvm->arch; in kvm_end_pvclock_update()
3048 struct kvm_arch *ka = &kvm->arch; in __get_kvmclock()
3083 struct kvm_arch *ka = &kvm->arch; in get_kvmclock()
3104 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_guest_pvclock()
3153 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
3154 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
3215 v->arch.l1_tsc_scaling_ratio); in kvm_guest_time_update()
3226 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
3269 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_update_fn()
3283 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
3294 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_sync_fn()
3299 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
3300 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
3321 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
3328 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
3336 vcpu->arch.mcg_status = data; in set_msr_mce()
3344 vcpu->arch.mcg_ctl = data; in set_msr_mce()
3358 vcpu->arch.mci_ctl2_banks[offset] = data; in set_msr_mce()
3390 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
3402 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
3424 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
3432 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
3436 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
3437 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
3453 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
3455 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3462 kvm_gpc_deactivate(&vcpu->arch.pv_time); in kvmclock_reset()
3463 vcpu->arch.time = 0; in kvmclock_reset()
3524 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in record_steal_time()
3527 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in record_steal_time()
3536 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3579 vcpu->arch.st.preempted = 0; in record_steal_time()
3593 vcpu->arch.st.preempted = 0; in record_steal_time()
3607 vcpu->arch.st.last_steal; in record_steal_time()
3608 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3637 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) in kvm_set_msr_common()
3653 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3658 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3671 if (vcpu->arch.perf_capabilities == data) in kvm_set_msr_common()
3674 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3709 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3725 vcpu->arch.pat = data; in kvm_set_msr_common()
3740 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3747 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3751 u64 old_val = vcpu->arch.ia32_misc_enable_msr; in kvm_set_msr_common()
3767 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3770 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3777 vcpu->arch.smbase = data; in kvm_set_msr_common()
3780 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3786 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3788 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3802 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3808 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3814 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3821 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3854 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3868 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3892 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3942 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3947 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3954 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3961 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3972 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); in kvm_set_msr_common()
3982 vcpu->arch.guest_fpu.xfd_err = data; in kvm_set_msr_common()
4006 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
4016 data = vcpu->arch.mcg_cap; in get_msr_mce()
4021 data = vcpu->arch.mcg_ctl; in get_msr_mce()
4024 data = vcpu->arch.mcg_status; in get_msr_mce()
4035 data = vcpu->arch.mci_ctl2_banks[offset]; in get_msr_mce()
4044 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
4096 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
4102 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
4108 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
4111 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
4126 offset = vcpu->arch.l1_tsc_offset; in kvm_get_msr_common()
4127 ratio = vcpu->arch.l1_tsc_scaling_ratio; in kvm_get_msr_common()
4129 offset = vcpu->arch.tsc_offset; in kvm_get_msr_common()
4130 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_get_msr_common()
4137 msr_info->data = vcpu->arch.pat; in kvm_get_msr_common()
4169 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
4172 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
4177 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
4180 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
4189 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
4195 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4201 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4207 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4213 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4219 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
4225 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
4237 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
4243 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
4249 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
4264 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
4307 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
4312 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
4316 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
4318 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
4321 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
4324 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
4332 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; in kvm_get_msr_common()
4339 msr_info->data = vcpu->arch.guest_fpu.xfd_err; in kvm_get_msr_common()
4814 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
4823 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
4826 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
4827 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
4828 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
4833 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
4834 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
4840 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
4842 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
4852 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
4864 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in kvm_steal_time_set_preempted()
4868 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in kvm_steal_time_set_preempted()
4877 if (!vcpu->arch.at_instruction_boundary) { in kvm_steal_time_set_preempted()
4883 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
4886 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
4904 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
4914 if (!vcpu->arch.guest_state_protected) in kvm_arch_vcpu_put()
4915 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); in kvm_arch_vcpu_put()
4930 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
5004 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
5007 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
5024 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
5040 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
5043 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5046 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5048 vcpu->arch.mci_ctl2_banks[bank] = 0; in kvm_vcpu_ioctl_x86_setup_mce()
5077 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_x86_set_ucna()
5082 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_x86_set_ucna()
5085 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) in kvm_vcpu_x86_set_ucna()
5089 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); in kvm_vcpu_x86_set_ucna()
5097 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
5099 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
5114 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5123 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
5132 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
5166 if (vcpu->arch.exception_vmexit.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5167 !vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5168 !vcpu->arch.exception.injected) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5169 ex = &vcpu->arch.exception_vmexit; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5171 ex = &vcpu->arch.exception; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5181 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5201 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5211 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5212 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5215 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5223 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5225 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5232 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5234 if (vcpu->kvm->arch.triple_fault_event) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
5252 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5270 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5283 vcpu->arch.exception_from_userspace = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5285 vcpu->arch.exception_vmexit.pending = false; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5287 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5288 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5289 vcpu->arch.exception.vector = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5290 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5291 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5292 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5293 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5295 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5296 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5297 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5302 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5304 vcpu->arch.nmi_pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5305 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5313 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5317 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5322 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5326 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5328 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5339 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5341 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5346 if (!vcpu->kvm->arch.triple_fault_event) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5365 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
5368 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
5382 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
5384 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
5385 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
5407 u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 | in kvm_vcpu_ioctl_x86_get_xsave2()
5410 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_get_xsave2()
5413 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size, in kvm_vcpu_ioctl_x86_get_xsave2()
5414 supported_xcr0, vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_get_xsave2()
5427 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_set_xsave()
5430 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_set_xsave()
5433 &vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_set_xsave()
5447 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
5481 if (!vcpu->arch.pv_time.active) in kvm_set_guest_paused()
5483 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
5516 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) in kvm_arch_tsc_get_attr()
5547 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_arch_tsc_set_attr()
5549 matched = (vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5550 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5551 kvm->arch.last_tsc_offset == offset); in kvm_arch_tsc_set_attr()
5553 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; in kvm_arch_tsc_set_attr()
5557 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_arch_tsc_set_attr()
5639 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
5640 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
5862 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) in kvm_arch_vcpu_ioctl()
5879 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
5892 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
5953 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
6123 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
6131 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_get_irqchip()
6156 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_set_irqchip()
6186 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; in kvm_vm_ioctl_get_pit()
6199 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit()
6211 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
6212 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
6214 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
6215 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
6225 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit2()
6245 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_reinject()
6301 kvm->arch.disabled_quirks = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6319 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; in kvm_vm_ioctl_enable_cap()
6320 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6333 kvm->arch.x2apic_format = true; in kvm_vm_ioctl_enable_cap()
6335 kvm->arch.x2apic_broadcast_quirk_disabled = true; in kvm_vm_ioctl_enable_cap()
6345 kvm->arch.pause_in_guest = true; in kvm_vm_ioctl_enable_cap()
6357 kvm->arch.mwait_in_guest = true; in kvm_vm_ioctl_enable_cap()
6359 kvm->arch.hlt_in_guest = true; in kvm_vm_ioctl_enable_cap()
6361 kvm->arch.cstate_in_guest = true; in kvm_vm_ioctl_enable_cap()
6367 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6371 kvm->arch.exception_payload_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6375 kvm->arch.triple_fault_event = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6382 kvm->arch.user_space_msr_mask = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6396 kvm->arch.bus_lock_detection_enabled = true; in kvm_vm_ioctl_enable_cap()
6410 kvm->arch.sgx_provisioning_allowed = true; in kvm_vm_ioctl_enable_cap()
6435 kvm->arch.hypercall_exit_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6442 kvm->arch.exit_on_emulation_error = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6452 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE); in kvm_vm_ioctl_enable_cap()
6463 if (kvm->arch.max_vcpu_ids == cap->args[0]) { in kvm_vm_ioctl_enable_cap()
6465 } else if (!kvm->arch.max_vcpu_ids) { in kvm_vm_ioctl_enable_cap()
6466 kvm->arch.max_vcpu_ids = cap->args[0]; in kvm_vm_ioctl_enable_cap()
6481 kvm->arch.notify_window = cap->args[0] >> 32; in kvm_vm_ioctl_enable_cap()
6482 kvm->arch.notify_vmexit_flags = (u32)cap->args[0]; in kvm_vm_ioctl_enable_cap()
6511 kvm->arch.disable_nx_huge_pages = true; in kvm_vm_ioctl_enable_cap()
6614 old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter, in kvm_vm_ioctl_set_msr_filter()
6691 if (!vcpu->arch.pv_time.active) in kvm_arch_suspend_notifier()
6731 struct kvm_arch *ka = &kvm->arch; in kvm_vm_ioctl_set_clock()
6842 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; in kvm_arch_vm_ioctl()
6859 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
6862 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
6863 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
6915 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6932 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6941 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6958 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6971 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
6982 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
7043 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz); in kvm_arch_vm_ioctl()
7049 r = READ_ONCE(kvm->arch.default_tsc_khz); in kvm_arch_vm_ioctl()
7256 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
7276 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
7305 struct kvm_mmu *mmu = vcpu->arch.mmu; in translate_nested_gpa()
7320 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()
7330 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()
7342 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()
7351 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()
7384 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()
7443 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()
7489 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
7547 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()
7557 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
7558 vcpu->arch.mmio_access, 0, access))) { in vcpu_mmio_gva_to_gpa()
7559 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
7667 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
7875 WARN_ON_ONCE(vcpu->arch.pio.count); in emulator_pio_in_out()
7900 vcpu->arch.pio.port = port; in emulator_pio_in_out()
7901 vcpu->arch.pio.in = in; in emulator_pio_in_out()
7902 vcpu->arch.pio.count = count; in emulator_pio_in_out()
7903 vcpu->arch.pio.size = size; in emulator_pio_in_out()
7906 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in_out()
7908 memcpy(vcpu->arch.pio_data, data, size * count); in emulator_pio_in_out()
7931 int size = vcpu->arch.pio.size; in complete_emulator_pio_in()
7932 unsigned int count = vcpu->arch.pio.count; in complete_emulator_pio_in()
7933 memcpy(val, vcpu->arch.pio_data, size * count); in complete_emulator_pio_in()
7934 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); in complete_emulator_pio_in()
7935 vcpu->arch.pio.count = 0; in complete_emulator_pio_in()
7943 if (vcpu->arch.pio.count) { in emulator_pio_in_emulated()
7991 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
7992 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
7995 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
8043 value = vcpu->arch.cr2; in emulator_get_cr()
8072 vcpu->arch.cr2 = val; in emulator_set_cr()
8256 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
8407 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
8430 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
8437 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
8458 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
8463 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
8536 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in prepare_emulation_ctxt_failure_exit()
8567 if (kvm->arch.exit_on_emulation_error || in handle_emulation_failure()
8596 if (!vcpu->arch.mmu->root_role.direct) { in reexecute_instruction()
8629 if (vcpu->arch.mmu->root_role.direct) { in reexecute_instruction()
8633 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
8663 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
8664 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
8679 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
8694 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
8695 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
8697 if (!vcpu->arch.mmu->root_role.direct) in retry_instruction()
8728 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW; in kvm_vcpu_do_singlestep()
8729 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
8730 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_do_singlestep()
8804 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_code_breakpoint()
8808 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_code_breakpoint()
8809 vcpu->arch.eff_db); in kvm_vcpu_check_code_breakpoint()
8812 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW; in kvm_vcpu_check_code_breakpoint()
8813 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_code_breakpoint()
8814 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_code_breakpoint()
8821 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_code_breakpoint()
8825 vcpu->arch.dr7, in kvm_vcpu_check_code_breakpoint()
8826 vcpu->arch.db); in kvm_vcpu_check_code_breakpoint()
8882 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
8900 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
8906 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
8980 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
8981 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
8991 if (vcpu->arch.mmu->root_role.direct) { in x86_emulate_instruction()
9017 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
9018 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
9020 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
9023 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
9032 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
9033 } else if (vcpu->arch.complete_userspace_io) { in x86_emulate_instruction()
9045 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
9073 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
9093 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
9099 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
9101 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
9122 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
9126 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
9127 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
9137 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
9139 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
9140 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
9145 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
9168 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
9169 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
9659 vcpu->arch.mp_state = state; in __kvm_emulate_halt()
9709 if (vcpu->arch.tsc_always_catchup) in kvm_pv_clock_pairing()
9753 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); in kvm_apicv_activated()
9759 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); in kvm_vcpu_apicv_activated()
9779 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons; in kvm_apicv_init()
9781 init_rwsem(&kvm->arch.apicv_update_lock); in kvm_apicv_init()
9801 map = rcu_dereference(vcpu->kvm->arch.apic_map); in kvm_sched_yield()
9904 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) in kvm_emulate_hypercall()
9923 vcpu->arch.complete_userspace_io = complete_hypercall_exit; in kvm_emulate_hypercall()
9996 if (vcpu->arch.apic->apicv_active) in update_cr8_intercept()
9999 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
10032 vcpu->arch.exception.has_error_code &= is_protmode(vcpu); in kvm_inject_exception()
10034 trace_kvm_inj_exception(vcpu->arch.exception.vector, in kvm_inject_exception()
10035 vcpu->arch.exception.has_error_code, in kvm_inject_exception()
10036 vcpu->arch.exception.error_code, in kvm_inject_exception()
10037 vcpu->arch.exception.injected); in kvm_inject_exception()
10120 if (vcpu->arch.exception.injected) in kvm_check_and_inject_events()
10124 else if (vcpu->arch.nmi_injected) in kvm_check_and_inject_events()
10126 else if (vcpu->arch.interrupt.injected) in kvm_check_and_inject_events()
10134 WARN_ON_ONCE(vcpu->arch.exception.injected && in kvm_check_and_inject_events()
10135 vcpu->arch.exception.pending); in kvm_check_and_inject_events()
10154 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || in kvm_check_and_inject_events()
10155 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10164 if (vcpu->arch.exception.pending) { in kvm_check_and_inject_events()
10175 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) in kvm_check_and_inject_events()
10179 if (vcpu->arch.exception.vector == DB_VECTOR) { in kvm_check_and_inject_events()
10180 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); in kvm_check_and_inject_events()
10181 if (vcpu->arch.dr7 & DR7_GD) { in kvm_check_and_inject_events()
10182 vcpu->arch.dr7 &= ~DR7_GD; in kvm_check_and_inject_events()
10189 vcpu->arch.exception.pending = false; in kvm_check_and_inject_events()
10190 vcpu->arch.exception.injected = true; in kvm_check_and_inject_events()
10211 if (vcpu->arch.smi_pending) { in kvm_check_and_inject_events()
10216 vcpu->arch.smi_pending = false; in kvm_check_and_inject_events()
10217 ++vcpu->arch.smi_count; in kvm_check_and_inject_events()
10225 if (vcpu->arch.nmi_pending) { in kvm_check_and_inject_events()
10230 --vcpu->arch.nmi_pending; in kvm_check_and_inject_events()
10231 vcpu->arch.nmi_injected = true; in kvm_check_and_inject_events()
10236 if (vcpu->arch.nmi_pending) in kvm_check_and_inject_events()
10274 WARN_ON_ONCE(vcpu->arch.exception.pending || in kvm_check_and_inject_events()
10275 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10300 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
10312 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
10313 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
10315 if (vcpu->arch.nmi_pending && in process_nmi()
10317 vcpu->arch.nmi_pending--; in process_nmi()
10319 if (vcpu->arch.nmi_pending) in process_nmi()
10326 return vcpu->arch.nmi_pending + in kvm_get_nr_pending_nmis()
10343 struct kvm_lapic *apic = vcpu->arch.apic; in __kvm_vcpu_update_apicv()
10349 down_read(&vcpu->kvm->arch.apicv_update_lock); in __kvm_vcpu_update_apicv()
10374 up_read(&vcpu->kvm->arch.apicv_update_lock); in __kvm_vcpu_update_apicv()
10394 if (apic_x2apic_mode(vcpu->arch.apic) && in kvm_vcpu_update_apicv()
10406 lockdep_assert_held_write(&kvm->arch.apicv_update_lock); in __kvm_set_or_clear_apicv_inhibit()
10411 old = new = kvm->arch.apicv_inhibit_reasons; in __kvm_set_or_clear_apicv_inhibit()
10429 kvm->arch.apicv_inhibit_reasons = new; in __kvm_set_or_clear_apicv_inhibit()
10438 kvm->arch.apicv_inhibit_reasons = new; in __kvm_set_or_clear_apicv_inhibit()
10448 down_write(&kvm->arch.apicv_update_lock); in kvm_set_or_clear_apicv_inhibit()
10450 up_write(&kvm->arch.apicv_update_lock); in kvm_set_or_clear_apicv_inhibit()
10459 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
10464 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10466 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10469 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
10478 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
10483 vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
10490 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); in vcpu_load_eoi_exitmap()
10601 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
10618 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
10619 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
10620 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
10623 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
10683 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
10766 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
10767 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); in vcpu_enter_guest()
10769 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
10771 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
10772 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
10773 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
10774 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
10813 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
10830 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
10831 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
10841 if (vcpu->arch.xfd_no_write_intercept) in vcpu_enter_guest()
10846 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
10884 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
10887 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
10897 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
10921 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in vcpu_block()
10952 switch(vcpu->arch.mp_state) { in vcpu_block()
10955 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
10956 vcpu->arch.mp_state = in vcpu_block()
10960 vcpu->arch.apf.halted = false; in vcpu_block()
10973 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
10974 !vcpu->arch.apf.halted); in kvm_vcpu_running()
10982 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
10991 vcpu->arch.at_instruction_boundary = false; in vcpu_run()
11035 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
11099 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
11107 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); in kvm_load_guest_fpu()
11114 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); in kvm_put_guest_fpu()
11121 struct kvm_queued_exception *ex = &vcpu->arch.exception; in kvm_arch_vcpu_ioctl_run()
11131 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
11188 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && in kvm_arch_vcpu_ioctl_run()
11197 vcpu->arch.exception_from_userspace = false; in kvm_arch_vcpu_ioctl_run()
11199 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
11200 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
11201 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
11206 WARN_ON_ONCE(vcpu->arch.pio.count); in kvm_arch_vcpu_ioctl_run()
11235 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
11243 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
11244 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
11279 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
11280 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
11304 vcpu->arch.exception.pending = false; in __set_regs()
11305 vcpu->arch.exception_vmexit.pending = false; in __set_regs()
11322 if (vcpu->arch.guest_state_protected) in __get_sregs_common()
11342 sregs->cr2 = vcpu->arch.cr2; in __get_sregs_common()
11349 sregs->efer = vcpu->arch.efer; in __get_sregs_common()
11357 if (vcpu->arch.guest_state_protected) in __get_sregs()
11360 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
11361 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
11371 if (vcpu->arch.guest_state_protected) in __get_sregs2()
11404 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || in kvm_arch_vcpu_ioctl_get_mpstate()
11405 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && in kvm_arch_vcpu_ioctl_get_mpstate()
11406 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
11409 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
11448 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
11454 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
11455 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
11457 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
11469 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
11529 if (vcpu->arch.guest_state_protected) in __set_sregs_common()
11539 vcpu->arch.cr2 = sregs->cr2; in __set_sregs_common()
11541 vcpu->arch.cr3 = sregs->cr3; in __set_sregs_common()
11547 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs_common()
11552 vcpu->arch.cr0 = sregs->cr0; in __set_sregs_common()
11582 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs_common()
11622 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) in __set_sregs2()
11636 vcpu->arch.pdptrs_from_userspace = true; in __set_sregs2()
11663 down_write(&kvm->arch.apicv_update_lock); in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
11672 up_write(&kvm->arch.apicv_update_lock); in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
11681 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_guest_debug()
11708 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11709 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11712 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11717 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
11764 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_get_fpu()
11769 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
11787 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_set_fpu()
11792 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
11856 if (!kvm->arch.max_vcpu_ids) in kvm_arch_vcpu_precreate()
11857 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS; in kvm_arch_vcpu_precreate()
11859 if (id >= kvm->arch.max_vcpu_ids) in kvm_arch_vcpu_precreate()
11870 vcpu->arch.last_vmentry_cpu = -1; in kvm_arch_vcpu_create()
11871 vcpu->arch.regs_avail = ~0; in kvm_arch_vcpu_create()
11872 vcpu->arch.regs_dirty = ~0; in kvm_arch_vcpu_create()
11874 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN); in kvm_arch_vcpu_create()
11877 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
11879 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
11901 vcpu->arch.apic->apicv_active = true; in kvm_arch_vcpu_create()
11912 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
11914 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), in kvm_arch_vcpu_create()
11916 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), in kvm_arch_vcpu_create()
11918 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) in kvm_arch_vcpu_create()
11920 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
11922 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
11929 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { in kvm_arch_vcpu_create()
11934 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
11935 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); in kvm_arch_vcpu_create()
11937 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
11941 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; in kvm_arch_vcpu_create()
11944 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
11945 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
11948 vcpu->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_vcpu_create()
11955 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
11956 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
11960 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); in kvm_arch_vcpu_create()
11967 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
11969 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
11971 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
11973 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
11974 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_create()
11975 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
11994 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
11999 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
12011 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
12012 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
12013 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
12018 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
12019 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_destroy()
12024 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
12025 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
12058 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
12060 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
12061 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
12062 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
12063 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
12064 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
12068 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
12070 vcpu->arch.dr6 = DR6_ACTIVE_LOW; in kvm_vcpu_reset()
12071 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
12074 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
12077 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
12078 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
12079 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
12085 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
12087 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { in kvm_vcpu_reset()
12088 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; in kvm_vcpu_reset()
12106 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
12108 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
12109 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | in kvm_vcpu_reset()
12117 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
12135 vcpu->arch.cr3 = 0; in kvm_vcpu_reset()
12219 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
12221 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
12222 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
12268 kvm->arch.backwards_tsc_observed = true; in kvm_arch_hardware_enable()
12270 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
12271 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
12281 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
12282 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
12297 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
12302 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
12312 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
12345 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
12346 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
12347 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
12350 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
12353 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
12355 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
12356 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
12357 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
12358 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); in kvm_arch_init_vm()
12360 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_arch_init_vm()
12362 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_arch_init_vm()
12364 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz; in kvm_arch_init_vm()
12365 kvm->arch.guest_can_read_msr_platform_info = true; in kvm_arch_init_vm()
12366 kvm->arch.enable_pmu = enable_pmu; in kvm_arch_init_vm()
12369 spin_lock_init(&kvm->arch.hv_root_tdp_lock); in kvm_arch_init_vm()
12370 kvm->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_init_vm()
12373 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
12374 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
12414 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
12415 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
12517 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
12521 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
12522 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
12534 kvfree(slot->arch.rmap[i]); in memslot_rmap_free()
12535 slot->arch.rmap[i] = NULL; in memslot_rmap_free()
12546 kvfree(slot->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
12547 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
12555 const int sz = sizeof(*slot->arch.rmap[0]); in memslot_rmap_alloc()
12562 if (slot->arch.rmap[i]) in memslot_rmap_alloc()
12565 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT); in memslot_rmap_alloc()
12566 if (!slot->arch.rmap[i]) { in memslot_rmap_alloc()
12586 memset(&slot->arch, 0, sizeof(slot->arch)); in kvm_alloc_memslot_metadata()
12606 slot->arch.lpage_info[i - 1] = linfo; in kvm_alloc_memslot_metadata()
12634 kvfree(slot->arch.lpage_info[i - 1]); in kvm_alloc_memslot_metadata()
12635 slot->arch.lpage_info[i - 1] = NULL; in kvm_alloc_memslot_metadata()
12676 memcpy(&new->arch, &old->arch, sizeof(old->arch)); in kvm_arch_prepare_memory_region()
12825 if (!kvm->arch.n_requested_mmu_pages && in kvm_arch_commit_memory_region()
12856 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
12863 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
12869 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
12912 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
12927 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_in_kernel()
12930 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
12951 if (vcpu->arch.guest_state_protected) in kvm_get_linear_rip()
12981 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
13009 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
13012 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
13021 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
13022 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
13030 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
13039 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
13043 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
13046 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
13048 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
13055 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
13064 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
13072 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
13081 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
13094 if (vcpu->arch.apf.send_user_only && in kvm_can_deliver_async_pf()
13103 return vcpu->arch.apf.delivery_as_pf_vmexit; in kvm_can_deliver_async_pf()
13136 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_not_present()
13137 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
13145 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
13168 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
13172 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
13174 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
13175 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_present()
13179 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
13180 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
13184 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
13185 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
13191 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
13205 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) in kvm_arch_start_assignment()
13212 atomic_dec(&kvm->arch.assigned_device_count); in kvm_arch_end_assignment()
13218 return raw_atomic_read(&kvm->arch.assigned_device_count); in kvm_arch_has_assigned_device()
13224 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
13230 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
13236 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()
13309 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
13342 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fixup_and_inject_pf_error()
13361 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
13495 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in complete_sev_es_emulated_mmio()
13533 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_write()
13571 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_read()
13579 vcpu->arch.sev_pio_count -= count; in advance_sev_es_emulated_pio()
13580 vcpu->arch.sev_pio_data += count * size; in advance_sev_es_emulated_pio()
13588 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_outs()
13589 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_outs()
13591 vcpu->arch.pio.count = 0; in complete_sev_es_emulated_outs()
13592 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_outs()
13602 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_outs()
13603 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); in kvm_sev_es_outs()
13611 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_outs()
13615 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; in kvm_sev_es_outs()
13624 unsigned count = vcpu->arch.pio.count; in complete_sev_es_emulated_ins()
13625 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_ins()
13626 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_ins()
13628 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); in complete_sev_es_emulated_ins()
13630 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_ins()
13640 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_ins()
13641 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) in kvm_sev_es_ins()
13646 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_ins()
13650 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; in kvm_sev_es_ins()
13658 vcpu->arch.sev_pio_data = data; in kvm_sev_es_string_io()
13659 vcpu->arch.sev_pio_count = count; in kvm_sev_es_string_io()