Lines Matching refs:arch

39 	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;  in kvm_xen_shared_info_init()
75 BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924); in kvm_xen_shared_info_init()
83 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_shared_info_init()
93 wc_sec_hi = &shinfo->arch.wc_sec_hi; in kvm_xen_shared_info_init()
118 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) { in kvm_xen_inject_timer_irqs()
123 e.port = vcpu->arch.xen.timer_virq; in kvm_xen_inject_timer_irqs()
128 vcpu->arch.xen.timer_expires = 0; in kvm_xen_inject_timer_irqs()
129 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_inject_timer_irqs()
136 arch.xen.timer); in xen_timer_callback()
137 if (atomic_read(&vcpu->arch.xen.timer_pending)) in xen_timer_callback()
140 atomic_inc(&vcpu->arch.xen.timer_pending); in xen_timer_callback()
149 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_start_timer()
150 vcpu->arch.xen.timer_expires = guest_abs; in kvm_xen_start_timer()
153 xen_timer_callback(&vcpu->arch.xen.timer); in kvm_xen_start_timer()
156 hrtimer_start(&vcpu->arch.xen.timer, in kvm_xen_start_timer()
164 hrtimer_cancel(&vcpu->arch.xen.timer); in kvm_xen_stop_timer()
165 vcpu->arch.xen.timer_expires = 0; in kvm_xen_stop_timer()
166 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_stop_timer()
171 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC, in kvm_xen_init_timer()
173 vcpu->arch.xen.timer.function = xen_timer_callback; in kvm_xen_init_timer()
178 struct kvm_vcpu_xen *vx = &v->arch.xen; in kvm_xen_update_runstate_guest()
248 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { in kvm_xen_update_runstate_guest()
312 if (v->kvm->arch.xen.runstate_update_flag) in kvm_xen_update_runstate_guest()
376 if (v->kvm->arch.xen.runstate_update_flag) { in kvm_xen_update_runstate_guest()
445 struct kvm_vcpu_xen *vx = &v->arch.xen; in kvm_xen_update_runstate()
480 irq.vector = v->arch.xen.upcall_vector; in kvm_xen_inject_vcpu_vector()
499 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); in kvm_xen_inject_pending_events()
500 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in kvm_xen_inject_pending_events()
522 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { in kvm_xen_inject_pending_events()
530 "+m" (v->arch.xen.evtchn_pending_sel) in kvm_xen_inject_pending_events()
542 "+m" (v->arch.xen.evtchn_pending_sel) in kvm_xen_inject_pending_events()
549 if (v->arch.xen.upcall_vector) in kvm_xen_inject_pending_events()
557 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in __kvm_xen_has_interrupt()
614 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
615 kvm->arch.xen.long_mode = !!data->u.long_mode; in kvm_xen_hvm_set_attr()
616 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
622 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
624 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
631 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
632 kvm->arch.xen.upcall_vector = data->u.vector; in kvm_xen_hvm_set_attr()
633 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
643 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
644 kvm->arch.xen.xen_version = data->u.xen_version; in kvm_xen_hvm_set_attr()
645 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
654 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
655 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag; in kvm_xen_hvm_set_attr()
656 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
671 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_get_attr()
675 data->u.long_mode = kvm->arch.xen.long_mode; in kvm_xen_hvm_get_attr()
680 if (kvm->arch.xen.shinfo_cache.active) in kvm_xen_hvm_get_attr()
681 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); in kvm_xen_hvm_get_attr()
688 data->u.vector = kvm->arch.xen.upcall_vector; in kvm_xen_hvm_get_attr()
693 data->u.xen_version = kvm->arch.xen.xen_version; in kvm_xen_hvm_get_attr()
702 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag; in kvm_xen_hvm_get_attr()
710 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_get_attr()
718 mutex_lock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_set_attr()
730 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_vcpu_set_attr()
735 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache, in kvm_xen_vcpu_set_attr()
744 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_vcpu_set_attr()
749 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache, in kvm_xen_vcpu_set_attr()
766 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); in kvm_xen_vcpu_set_attr()
767 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_vcpu_set_attr()
776 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode) in kvm_xen_vcpu_set_attr()
783 r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache, in kvm_xen_vcpu_set_attr()
790 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_vcpu_set_attr()
794 r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache, in kvm_xen_vcpu_set_attr()
840 vcpu->arch.xen.current_runstate = data->u.runstate.state; in kvm_xen_vcpu_set_attr()
841 vcpu->arch.xen.runstate_entry_time = in kvm_xen_vcpu_set_attr()
843 vcpu->arch.xen.runstate_times[RUNSTATE_running] = in kvm_xen_vcpu_set_attr()
845 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] = in kvm_xen_vcpu_set_attr()
847 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] = in kvm_xen_vcpu_set_attr()
849 vcpu->arch.xen.runstate_times[RUNSTATE_offline] = in kvm_xen_vcpu_set_attr()
851 vcpu->arch.xen.last_steal = current->sched_info.run_delay; in kvm_xen_vcpu_set_attr()
876 (vcpu->arch.xen.runstate_entry_time + in kvm_xen_vcpu_set_attr()
882 vcpu->arch.xen.runstate_entry_time += in kvm_xen_vcpu_set_attr()
884 vcpu->arch.xen.runstate_times[RUNSTATE_running] += in kvm_xen_vcpu_set_attr()
886 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] += in kvm_xen_vcpu_set_attr()
888 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] += in kvm_xen_vcpu_set_attr()
890 vcpu->arch.xen.runstate_times[RUNSTATE_offline] += in kvm_xen_vcpu_set_attr()
895 else if (vcpu->arch.xen.runstate_cache.active) in kvm_xen_vcpu_set_attr()
904 vcpu->arch.xen.vcpu_id = data->u.vcpu_id; in kvm_xen_vcpu_set_attr()
916 if (!vcpu->arch.xen.timer.function) in kvm_xen_vcpu_set_attr()
921 vcpu->arch.xen.timer_virq = data->u.timer.port; in kvm_xen_vcpu_set_attr()
936 vcpu->arch.xen.upcall_vector = data->u.vector; in kvm_xen_vcpu_set_attr()
946 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_set_attr()
954 mutex_lock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_get_attr()
958 if (vcpu->arch.xen.vcpu_info_cache.active) in kvm_xen_vcpu_get_attr()
959 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; in kvm_xen_vcpu_get_attr()
966 if (vcpu->arch.xen.vcpu_time_info_cache.active) in kvm_xen_vcpu_get_attr()
967 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; in kvm_xen_vcpu_get_attr()
978 if (vcpu->arch.xen.runstate_cache.active) { in kvm_xen_vcpu_get_attr()
979 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; in kvm_xen_vcpu_get_attr()
989 data->u.runstate.state = vcpu->arch.xen.current_runstate; in kvm_xen_vcpu_get_attr()
998 data->u.runstate.state = vcpu->arch.xen.current_runstate; in kvm_xen_vcpu_get_attr()
1000 vcpu->arch.xen.runstate_entry_time; in kvm_xen_vcpu_get_attr()
1002 vcpu->arch.xen.runstate_times[RUNSTATE_running]; in kvm_xen_vcpu_get_attr()
1004 vcpu->arch.xen.runstate_times[RUNSTATE_runnable]; in kvm_xen_vcpu_get_attr()
1006 vcpu->arch.xen.runstate_times[RUNSTATE_blocked]; in kvm_xen_vcpu_get_attr()
1008 vcpu->arch.xen.runstate_times[RUNSTATE_offline]; in kvm_xen_vcpu_get_attr()
1017 data->u.vcpu_id = vcpu->arch.xen.vcpu_id; in kvm_xen_vcpu_get_attr()
1022 data->u.timer.port = vcpu->arch.xen.timer_virq; in kvm_xen_vcpu_get_attr()
1024 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires; in kvm_xen_vcpu_get_attr()
1029 data->u.vector = vcpu->arch.xen.upcall_vector; in kvm_xen_vcpu_get_attr()
1037 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_get_attr()
1049 vcpu->kvm->arch.xen.long_mode = lm; in kvm_xen_write_hypercall_page()
1088 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 in kvm_xen_write_hypercall_page()
1089 : kvm->arch.xen_hvm_config.blob_addr_32; in kvm_xen_write_hypercall_page()
1090 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in kvm_xen_write_hypercall_page()
1091 : kvm->arch.xen_hvm_config.blob_size_32; in kvm_xen_write_hypercall_page()
1130 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_config()
1132 if (xhc->msr && !kvm->arch.xen_hvm_config.msr) in kvm_xen_hvm_config()
1134 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr) in kvm_xen_hvm_config()
1137 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); in kvm_xen_hvm_config()
1139 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_config()
1153 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) in kvm_xen_hypercall_complete_userspace()
1161 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) in max_evtchn_port()
1171 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in wait_pending_event()
1183 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in wait_pending_event()
1214 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) in kvm_xen_schedop_poll()
1274 vcpu->arch.xen.poll_evtchn = port; in kvm_xen_schedop_poll()
1276 vcpu->arch.xen.poll_evtchn = -1; in kvm_xen_schedop_poll()
1278 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); in kvm_xen_schedop_poll()
1281 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_xen_schedop_poll()
1284 mod_timer(&vcpu->arch.xen.poll_timer, in kvm_xen_schedop_poll()
1290 del_timer(&vcpu->arch.xen.poll_timer); in kvm_xen_schedop_poll()
1292 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_xen_schedop_poll()
1295 vcpu->arch.xen.poll_evtchn = 0; in kvm_xen_schedop_poll()
1299 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); in kvm_xen_schedop_poll()
1308 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer); in cancel_evtchn_poll()
1350 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op()
1388 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op()
1480 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { in kvm_xen_hypercall()
1481 r = vcpu->kvm->arch.xen.xen_version; in kvm_xen_hypercall()
1524 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); in kvm_xen_hypercall()
1525 vcpu->arch.complete_userspace_io = in kvm_xen_hypercall()
1533 int poll_evtchn = vcpu->arch.xen.poll_evtchn; in kvm_xen_check_poller()
1536 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) { in kvm_xen_check_poller()
1554 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn_fast()
1572 if (!vcpu->arch.xen.vcpu_info_cache.active) in kvm_xen_set_evtchn_fast()
1586 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_set_evtchn_fast()
1614 gpc = &vcpu->arch.xen.vcpu_info_cache; in kvm_xen_set_evtchn_fast()
1622 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel)) in kvm_xen_set_evtchn_fast()
1627 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_set_evtchn_fast()
1643 if (kick_vcpu && vcpu->arch.xen.upcall_vector) { in kvm_xen_set_evtchn_fast()
1682 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_set_evtchn()
1699 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn()
1711 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_set_evtchn()
1827 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_update()
1828 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); in kvm_xen_eventfd_update()
1858 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_update()
1921 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_assign()
1922 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, in kvm_xen_eventfd_assign()
1924 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_assign()
1942 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_deassign()
1943 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); in kvm_xen_eventfd_deassign()
1944 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_deassign()
1962 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
1969 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) in kvm_xen_eventfd_reset()
1974 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
1979 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { in kvm_xen_eventfd_reset()
1981 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); in kvm_xen_eventfd_reset()
1983 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
2036 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); in kvm_xen_hcall_evtchn_send()
2055 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx; in kvm_xen_init_vcpu()
2056 vcpu->arch.xen.poll_evtchn = 0; in kvm_xen_init_vcpu()
2058 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0); in kvm_xen_init_vcpu()
2060 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL, in kvm_xen_init_vcpu()
2062 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL, in kvm_xen_init_vcpu()
2064 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL, in kvm_xen_init_vcpu()
2066 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL, in kvm_xen_init_vcpu()
2075 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); in kvm_xen_destroy_vcpu()
2076 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_destroy_vcpu()
2077 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_destroy_vcpu()
2078 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_destroy_vcpu()
2080 del_timer_sync(&vcpu->arch.xen.poll_timer); in kvm_xen_destroy_vcpu()
2088 if (!vcpu->arch.xen.cpuid.base) in kvm_xen_update_tsc_info()
2091 function = vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3); in kvm_xen_update_tsc_info()
2092 if (function > vcpu->arch.xen.cpuid.limit) in kvm_xen_update_tsc_info()
2097 entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul; in kvm_xen_update_tsc_info()
2098 entry->edx = vcpu->arch.hv_clock.tsc_shift; in kvm_xen_update_tsc_info()
2103 entry->eax = vcpu->arch.hw_tsc_khz; in kvm_xen_update_tsc_info()
2108 mutex_init(&kvm->arch.xen.xen_lock); in kvm_xen_init_vm()
2109 idr_init(&kvm->arch.xen.evtchn_ports); in kvm_xen_init_vm()
2110 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN); in kvm_xen_init_vm()
2118 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); in kvm_xen_destroy_vm()
2120 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { in kvm_xen_destroy_vm()
2125 idr_destroy(&kvm->arch.xen.evtchn_ports); in kvm_xen_destroy_vm()
2127 if (kvm->arch.xen_hvm_config.msr) in kvm_xen_destroy_vm()