xref: /openbmc/linux/arch/x86/kvm/hyperv.c (revision db9cf24cea69773410f0049bdfa795d7c2bd0ea9)
120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e83d5887SAndrey Smetanin /*
3e83d5887SAndrey Smetanin  * KVM Microsoft Hyper-V emulation
4e83d5887SAndrey Smetanin  *
5e83d5887SAndrey Smetanin  * derived from arch/x86/kvm/x86.c
6e83d5887SAndrey Smetanin  *
7e83d5887SAndrey Smetanin  * Copyright (C) 2006 Qumranet, Inc.
8e83d5887SAndrey Smetanin  * Copyright (C) 2008 Qumranet, Inc.
9e83d5887SAndrey Smetanin  * Copyright IBM Corporation, 2008
10e83d5887SAndrey Smetanin  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11e83d5887SAndrey Smetanin  * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12e83d5887SAndrey Smetanin  *
13e83d5887SAndrey Smetanin  * Authors:
14e83d5887SAndrey Smetanin  *   Avi Kivity   <avi@qumranet.com>
15e83d5887SAndrey Smetanin  *   Yaniv Kamay  <yaniv@qumranet.com>
16e83d5887SAndrey Smetanin  *   Amit Shah    <amit.shah@qumranet.com>
17e83d5887SAndrey Smetanin  *   Ben-Ami Yassour <benami@il.ibm.com>
18e83d5887SAndrey Smetanin  *   Andrey Smetanin <asmetanin@virtuozzo.com>
19e83d5887SAndrey Smetanin  */
208d20bd63SSean Christopherson #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21e83d5887SAndrey Smetanin 
22e83d5887SAndrey Smetanin #include "x86.h"
23e83d5887SAndrey Smetanin #include "lapic.h"
245c919412SAndrey Smetanin #include "ioapic.h"
25f97f5a56SJon Doron #include "cpuid.h"
26e83d5887SAndrey Smetanin #include "hyperv.h"
27aee73823SVitaly Kuznetsov #include "mmu.h"
2879033bebSJoao Martins #include "xen.h"
29e83d5887SAndrey Smetanin 
30b2d8b167SVitaly Kuznetsov #include <linux/cpu.h>
31e83d5887SAndrey Smetanin #include <linux/kvm_host.h>
32765eaa0fSAndrey Smetanin #include <linux/highmem.h>
3332ef5517SIngo Molnar #include <linux/sched/cputime.h>
340823570fSVitaly Kuznetsov #include <linux/spinlock.h>
35faeb7833SRoman Kagan #include <linux/eventfd.h>
3632ef5517SIngo Molnar 
375c919412SAndrey Smetanin #include <asm/apicdef.h>
38c58a318fSVitaly Kuznetsov #include <asm/mshyperv.h>
39e83d5887SAndrey Smetanin #include <trace/events/kvm.h>
40e83d5887SAndrey Smetanin 
41e83d5887SAndrey Smetanin #include "trace.h"
4259508b30SPeter Xu #include "irq.h"
435974565bSSiddharth Chandrasekaran #include "fpu.h"
44e83d5887SAndrey Smetanin 
45ca7372acSVitaly Kuznetsov #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK)
46f21dd494SVitaly Kuznetsov 
47*db9cf24cSVipin Sharma /*
48*db9cf24cSVipin Sharma  * As per Hyper-V TLFS, extended hypercalls start from 0x8001
49*db9cf24cSVipin Sharma  * (HvExtCallQueryCapabilities). Response of this hypercalls is a 64 bit value
50*db9cf24cSVipin Sharma  * where each bit tells which extended hypercall is available besides
51*db9cf24cSVipin Sharma  * HvExtCallQueryCapabilities.
52*db9cf24cSVipin Sharma  *
53*db9cf24cSVipin Sharma  * 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit
54*db9cf24cSVipin Sharma  * assigned.
55*db9cf24cSVipin Sharma  *
56*db9cf24cSVipin Sharma  * 0x8002 - Bit 0
57*db9cf24cSVipin Sharma  * 0x8003 - Bit 1
58*db9cf24cSVipin Sharma  * ..
59*db9cf24cSVipin Sharma  * 0x8041 - Bit 63
60*db9cf24cSVipin Sharma  *
61*db9cf24cSVipin Sharma  * Therefore, HV_EXT_CALL_MAX = 0x8001 + 64
62*db9cf24cSVipin Sharma  */
63*db9cf24cSVipin Sharma #define HV_EXT_CALL_MAX (HV_EXT_CALL_QUERY_CAPABILITIES + 64)
64*db9cf24cSVipin Sharma 
658644f771SVitaly Kuznetsov static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
668644f771SVitaly Kuznetsov 				bool vcpu_kick);
678644f771SVitaly Kuznetsov 
685c919412SAndrey Smetanin static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
695c919412SAndrey Smetanin {
705c919412SAndrey Smetanin 	return atomic64_read(&synic->sint[sint]);
715c919412SAndrey Smetanin }
725c919412SAndrey Smetanin 
735c919412SAndrey Smetanin static inline int synic_get_sint_vector(u64 sint_value)
745c919412SAndrey Smetanin {
755c919412SAndrey Smetanin 	if (sint_value & HV_SYNIC_SINT_MASKED)
765c919412SAndrey Smetanin 		return -1;
775c919412SAndrey Smetanin 	return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
785c919412SAndrey Smetanin }
795c919412SAndrey Smetanin 
805c919412SAndrey Smetanin static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
815c919412SAndrey Smetanin 				      int vector)
825c919412SAndrey Smetanin {
835c919412SAndrey Smetanin 	int i;
845c919412SAndrey Smetanin 
855c919412SAndrey Smetanin 	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
865c919412SAndrey Smetanin 		if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
875c919412SAndrey Smetanin 			return true;
885c919412SAndrey Smetanin 	}
895c919412SAndrey Smetanin 	return false;
905c919412SAndrey Smetanin }
915c919412SAndrey Smetanin 
925c919412SAndrey Smetanin static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
935c919412SAndrey Smetanin 				     int vector)
945c919412SAndrey Smetanin {
955c919412SAndrey Smetanin 	int i;
965c919412SAndrey Smetanin 	u64 sint_value;
975c919412SAndrey Smetanin 
985c919412SAndrey Smetanin 	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
995c919412SAndrey Smetanin 		sint_value = synic_read_sint(synic, i);
1005c919412SAndrey Smetanin 		if (synic_get_sint_vector(sint_value) == vector &&
1015c919412SAndrey Smetanin 		    sint_value & HV_SYNIC_SINT_AUTO_EOI)
1025c919412SAndrey Smetanin 			return true;
1035c919412SAndrey Smetanin 	}
1045c919412SAndrey Smetanin 	return false;
1055c919412SAndrey Smetanin }
1065c919412SAndrey Smetanin 
10798f65ad4SVitaly Kuznetsov static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
10898f65ad4SVitaly Kuznetsov 				int vector)
1095c919412SAndrey Smetanin {
1100f250a64SVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
1110f250a64SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
112fe06a0c0SYury Norov 	bool auto_eoi_old, auto_eoi_new;
1130f250a64SVitaly Kuznetsov 
11487a8d795SVitaly Kuznetsov 	if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
11587a8d795SVitaly Kuznetsov 		return;
11687a8d795SVitaly Kuznetsov 
1175c919412SAndrey Smetanin 	if (synic_has_vector_connected(synic, vector))
1185c919412SAndrey Smetanin 		__set_bit(vector, synic->vec_bitmap);
1195c919412SAndrey Smetanin 	else
1205c919412SAndrey Smetanin 		__clear_bit(vector, synic->vec_bitmap);
1215c919412SAndrey Smetanin 
122fe06a0c0SYury Norov 	auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
1230f250a64SVitaly Kuznetsov 
1245c919412SAndrey Smetanin 	if (synic_has_vector_auto_eoi(synic, vector))
1255c919412SAndrey Smetanin 		__set_bit(vector, synic->auto_eoi_bitmap);
1265c919412SAndrey Smetanin 	else
1275c919412SAndrey Smetanin 		__clear_bit(vector, synic->auto_eoi_bitmap);
1280f250a64SVitaly Kuznetsov 
129fe06a0c0SYury Norov 	auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
1300f250a64SVitaly Kuznetsov 
131fe06a0c0SYury Norov 	if (auto_eoi_old == auto_eoi_new)
1320f250a64SVitaly Kuznetsov 		return;
1330f250a64SVitaly Kuznetsov 
134f1575642SSean Christopherson 	if (!enable_apicv)
135f1575642SSean Christopherson 		return;
136f1575642SSean Christopherson 
137187c8833SSean Christopherson 	down_write(&vcpu->kvm->arch.apicv_update_lock);
1380f250a64SVitaly Kuznetsov 
1390f250a64SVitaly Kuznetsov 	if (auto_eoi_new)
1400f250a64SVitaly Kuznetsov 		hv->synic_auto_eoi_used++;
1410f250a64SVitaly Kuznetsov 	else
1420f250a64SVitaly Kuznetsov 		hv->synic_auto_eoi_used--;
1430f250a64SVitaly Kuznetsov 
144320af55aSSean Christopherson 	/*
145320af55aSSean Christopherson 	 * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
146320af55aSSean Christopherson 	 * the hypervisor to manually inject IRQs.
147320af55aSSean Christopherson 	 */
148320af55aSSean Christopherson 	__kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
149320af55aSSean Christopherson 					 APICV_INHIBIT_REASON_HYPERV,
150320af55aSSean Christopherson 					 !!hv->synic_auto_eoi_used);
1510f250a64SVitaly Kuznetsov 
152187c8833SSean Christopherson 	up_write(&vcpu->kvm->arch.apicv_update_lock);
15398f65ad4SVitaly Kuznetsov }
15498f65ad4SVitaly Kuznetsov 
15598f65ad4SVitaly Kuznetsov static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
15698f65ad4SVitaly Kuznetsov 			  u64 data, bool host)
15798f65ad4SVitaly Kuznetsov {
15898f65ad4SVitaly Kuznetsov 	int vector, old_vector;
159915e6f78SVitaly Kuznetsov 	bool masked;
16098f65ad4SVitaly Kuznetsov 
16198f65ad4SVitaly Kuznetsov 	vector = data & HV_SYNIC_SINT_VECTOR_MASK;
162915e6f78SVitaly Kuznetsov 	masked = data & HV_SYNIC_SINT_MASKED;
163915e6f78SVitaly Kuznetsov 
164915e6f78SVitaly Kuznetsov 	/*
165915e6f78SVitaly Kuznetsov 	 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
166915e6f78SVitaly Kuznetsov 	 * default '0x10000' value on boot and this should not #GP. We need to
167915e6f78SVitaly Kuznetsov 	 * allow zero-initing the register from host as well.
168915e6f78SVitaly Kuznetsov 	 */
169915e6f78SVitaly Kuznetsov 	if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
17098f65ad4SVitaly Kuznetsov 		return 1;
17198f65ad4SVitaly Kuznetsov 	/*
17298f65ad4SVitaly Kuznetsov 	 * Guest may configure multiple SINTs to use the same vector, so
17398f65ad4SVitaly Kuznetsov 	 * we maintain a bitmap of vectors handled by synic, and a
17498f65ad4SVitaly Kuznetsov 	 * bitmap of vectors with auto-eoi behavior.  The bitmaps are
17598f65ad4SVitaly Kuznetsov 	 * updated here, and atomically queried on fast paths.
17698f65ad4SVitaly Kuznetsov 	 */
17798f65ad4SVitaly Kuznetsov 	old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
17898f65ad4SVitaly Kuznetsov 
17998f65ad4SVitaly Kuznetsov 	atomic64_set(&synic->sint[sint], data);
18098f65ad4SVitaly Kuznetsov 
18198f65ad4SVitaly Kuznetsov 	synic_update_vector(synic, old_vector);
18298f65ad4SVitaly Kuznetsov 
18398f65ad4SVitaly Kuznetsov 	synic_update_vector(synic, vector);
1845c919412SAndrey Smetanin 
1855c919412SAndrey Smetanin 	/* Load SynIC vectors into EOI exit bitmap */
186e0121fa2SVitaly Kuznetsov 	kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
1875c919412SAndrey Smetanin 	return 0;
1885c919412SAndrey Smetanin }
1895c919412SAndrey Smetanin 
190d3457c87SRoman Kagan static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
191d3457c87SRoman Kagan {
192d3457c87SRoman Kagan 	struct kvm_vcpu *vcpu = NULL;
19346808a4cSMarc Zyngier 	unsigned long i;
194d3457c87SRoman Kagan 
1959170200eSVitaly Kuznetsov 	if (vpidx >= KVM_MAX_VCPUS)
1969170200eSVitaly Kuznetsov 		return NULL;
1979170200eSVitaly Kuznetsov 
198d3457c87SRoman Kagan 	vcpu = kvm_get_vcpu(kvm, vpidx);
199f2bc14b6SVitaly Kuznetsov 	if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
200d3457c87SRoman Kagan 		return vcpu;
201d3457c87SRoman Kagan 	kvm_for_each_vcpu(i, vcpu, kvm)
202f2bc14b6SVitaly Kuznetsov 		if (kvm_hv_get_vpindex(vcpu) == vpidx)
203d3457c87SRoman Kagan 			return vcpu;
204d3457c87SRoman Kagan 	return NULL;
205d3457c87SRoman Kagan }
206d3457c87SRoman Kagan 
207d3457c87SRoman Kagan static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
2085c919412SAndrey Smetanin {
2095c919412SAndrey Smetanin 	struct kvm_vcpu *vcpu;
2105c919412SAndrey Smetanin 	struct kvm_vcpu_hv_synic *synic;
2115c919412SAndrey Smetanin 
212d3457c87SRoman Kagan 	vcpu = get_vcpu_by_vpidx(kvm, vpidx);
213919f4ebcSWanpeng Li 	if (!vcpu || !to_hv_vcpu(vcpu))
2145c919412SAndrey Smetanin 		return NULL;
215e0121fa2SVitaly Kuznetsov 	synic = to_hv_synic(vcpu);
2165c919412SAndrey Smetanin 	return (synic->active) ? synic : NULL;
2175c919412SAndrey Smetanin }
2185c919412SAndrey Smetanin 
2195c919412SAndrey Smetanin static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
2205c919412SAndrey Smetanin {
2215c919412SAndrey Smetanin 	struct kvm *kvm = vcpu->kvm;
222e0121fa2SVitaly Kuznetsov 	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
223ef3f3980SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2241f4b34f8SAndrey Smetanin 	struct kvm_vcpu_hv_stimer *stimer;
22508a800acSVitaly Kuznetsov 	int gsi, idx;
2265c919412SAndrey Smetanin 
22718659a9cSAndrey Smetanin 	trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
2285c919412SAndrey Smetanin 
2291f4b34f8SAndrey Smetanin 	/* Try to deliver pending Hyper-V SynIC timers messages */
2301f4b34f8SAndrey Smetanin 	for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
2311f4b34f8SAndrey Smetanin 		stimer = &hv_vcpu->stimer[idx];
2326a058a1eSVitaly Kuznetsov 		if (stimer->msg_pending && stimer->config.enable &&
2338644f771SVitaly Kuznetsov 		    !stimer->config.direct_mode &&
23408a800acSVitaly Kuznetsov 		    stimer->config.sintx == sint)
23508a800acSVitaly Kuznetsov 			stimer_mark_pending(stimer, false);
2361f4b34f8SAndrey Smetanin 	}
2371f4b34f8SAndrey Smetanin 
2385c919412SAndrey Smetanin 	idx = srcu_read_lock(&kvm->irq_srcu);
2391f4b34f8SAndrey Smetanin 	gsi = atomic_read(&synic->sint_to_gsi[sint]);
2405c919412SAndrey Smetanin 	if (gsi != -1)
2415c919412SAndrey Smetanin 		kvm_notify_acked_gsi(kvm, gsi);
2425c919412SAndrey Smetanin 	srcu_read_unlock(&kvm->irq_srcu, idx);
2435c919412SAndrey Smetanin }
2445c919412SAndrey Smetanin 
245db397571SAndrey Smetanin static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
246db397571SAndrey Smetanin {
247e0121fa2SVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
2489ff5e030SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
249db397571SAndrey Smetanin 
250db397571SAndrey Smetanin 	hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
251db397571SAndrey Smetanin 	hv_vcpu->exit.u.synic.msr = msr;
252db397571SAndrey Smetanin 	hv_vcpu->exit.u.synic.control = synic->control;
253db397571SAndrey Smetanin 	hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
254db397571SAndrey Smetanin 	hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
255db397571SAndrey Smetanin 
256db397571SAndrey Smetanin 	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
257db397571SAndrey Smetanin }
258db397571SAndrey Smetanin 
2595c919412SAndrey Smetanin static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
2605c919412SAndrey Smetanin 			 u32 msr, u64 data, bool host)
2615c919412SAndrey Smetanin {
262e0121fa2SVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
2635c919412SAndrey Smetanin 	int ret;
2645c919412SAndrey Smetanin 
265b1e34d32SVitaly Kuznetsov 	if (!synic->active && (!host || data))
2665c919412SAndrey Smetanin 		return 1;
2675c919412SAndrey Smetanin 
26818659a9cSAndrey Smetanin 	trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
26918659a9cSAndrey Smetanin 
2705c919412SAndrey Smetanin 	ret = 0;
2715c919412SAndrey Smetanin 	switch (msr) {
2725c919412SAndrey Smetanin 	case HV_X64_MSR_SCONTROL:
2735c919412SAndrey Smetanin 		synic->control = data;
274db397571SAndrey Smetanin 		if (!host)
275db397571SAndrey Smetanin 			synic_exit(synic, msr);
2765c919412SAndrey Smetanin 		break;
2775c919412SAndrey Smetanin 	case HV_X64_MSR_SVERSION:
2785c919412SAndrey Smetanin 		if (!host) {
2795c919412SAndrey Smetanin 			ret = 1;
2805c919412SAndrey Smetanin 			break;
2815c919412SAndrey Smetanin 		}
2825c919412SAndrey Smetanin 		synic->version = data;
2835c919412SAndrey Smetanin 		break;
2845c919412SAndrey Smetanin 	case HV_X64_MSR_SIEFP:
285efc479e6SRoman Kagan 		if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
286efc479e6SRoman Kagan 		    !synic->dont_zero_synic_pages)
2875c919412SAndrey Smetanin 			if (kvm_clear_guest(vcpu->kvm,
2885c919412SAndrey Smetanin 					    data & PAGE_MASK, PAGE_SIZE)) {
2895c919412SAndrey Smetanin 				ret = 1;
2905c919412SAndrey Smetanin 				break;
2915c919412SAndrey Smetanin 			}
2925c919412SAndrey Smetanin 		synic->evt_page = data;
293db397571SAndrey Smetanin 		if (!host)
294db397571SAndrey Smetanin 			synic_exit(synic, msr);
2955c919412SAndrey Smetanin 		break;
2965c919412SAndrey Smetanin 	case HV_X64_MSR_SIMP:
297efc479e6SRoman Kagan 		if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
298efc479e6SRoman Kagan 		    !synic->dont_zero_synic_pages)
2995c919412SAndrey Smetanin 			if (kvm_clear_guest(vcpu->kvm,
3005c919412SAndrey Smetanin 					    data & PAGE_MASK, PAGE_SIZE)) {
3015c919412SAndrey Smetanin 				ret = 1;
3025c919412SAndrey Smetanin 				break;
3035c919412SAndrey Smetanin 			}
3045c919412SAndrey Smetanin 		synic->msg_page = data;
305db397571SAndrey Smetanin 		if (!host)
306db397571SAndrey Smetanin 			synic_exit(synic, msr);
3075c919412SAndrey Smetanin 		break;
3085c919412SAndrey Smetanin 	case HV_X64_MSR_EOM: {
3095c919412SAndrey Smetanin 		int i;
3105c919412SAndrey Smetanin 
311b1e34d32SVitaly Kuznetsov 		if (!synic->active)
312b1e34d32SVitaly Kuznetsov 			break;
313b1e34d32SVitaly Kuznetsov 
3145c919412SAndrey Smetanin 		for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
3155c919412SAndrey Smetanin 			kvm_hv_notify_acked_sint(vcpu, i);
3165c919412SAndrey Smetanin 		break;
3175c919412SAndrey Smetanin 	}
3185c919412SAndrey Smetanin 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
3197be58a64SAndrey Smetanin 		ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
3205c919412SAndrey Smetanin 		break;
3215c919412SAndrey Smetanin 	default:
3225c919412SAndrey Smetanin 		ret = 1;
3235c919412SAndrey Smetanin 		break;
3245c919412SAndrey Smetanin 	}
3255c919412SAndrey Smetanin 	return ret;
3265c919412SAndrey Smetanin }
3275c919412SAndrey Smetanin 
328f97f5a56SJon Doron static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
329f97f5a56SJon Doron {
33010d7bf1eSVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
331f97f5a56SJon Doron 
33210d7bf1eSVitaly Kuznetsov 	return hv_vcpu->cpuid_cache.syndbg_cap_eax &
33310d7bf1eSVitaly Kuznetsov 		HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
334f97f5a56SJon Doron }
335f97f5a56SJon Doron 
336f97f5a56SJon Doron static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
337f97f5a56SJon Doron {
33805f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
339f97f5a56SJon Doron 
340f97f5a56SJon Doron 	if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
341f97f5a56SJon Doron 		hv->hv_syndbg.control.status =
342f97f5a56SJon Doron 			vcpu->run->hyperv.u.syndbg.status;
343f97f5a56SJon Doron 	return 1;
344f97f5a56SJon Doron }
345f97f5a56SJon Doron 
346f97f5a56SJon Doron static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
347f97f5a56SJon Doron {
348f69b55efSVitaly Kuznetsov 	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
3499ff5e030SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
350f97f5a56SJon Doron 
351f97f5a56SJon Doron 	hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
352f97f5a56SJon Doron 	hv_vcpu->exit.u.syndbg.msr = msr;
353f97f5a56SJon Doron 	hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
354f97f5a56SJon Doron 	hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
355f97f5a56SJon Doron 	hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
356f97f5a56SJon Doron 	hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
357f97f5a56SJon Doron 	vcpu->arch.complete_userspace_io =
358f97f5a56SJon Doron 			kvm_hv_syndbg_complete_userspace;
359f97f5a56SJon Doron 
360f97f5a56SJon Doron 	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
361f97f5a56SJon Doron }
362f97f5a56SJon Doron 
363f97f5a56SJon Doron static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
364f97f5a56SJon Doron {
365f69b55efSVitaly Kuznetsov 	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
366f97f5a56SJon Doron 
367f97f5a56SJon Doron 	if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
368f97f5a56SJon Doron 		return 1;
369f97f5a56SJon Doron 
370f97f5a56SJon Doron 	trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
371ef3f3980SVitaly Kuznetsov 				    to_hv_vcpu(vcpu)->vp_index, msr, data);
372f97f5a56SJon Doron 	switch (msr) {
373f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_CONTROL:
374f97f5a56SJon Doron 		syndbg->control.control = data;
375f97f5a56SJon Doron 		if (!host)
376f97f5a56SJon Doron 			syndbg_exit(vcpu, msr);
377f97f5a56SJon Doron 		break;
378f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_STATUS:
379f97f5a56SJon Doron 		syndbg->control.status = data;
380f97f5a56SJon Doron 		break;
381f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_SEND_BUFFER:
382f97f5a56SJon Doron 		syndbg->control.send_page = data;
383f97f5a56SJon Doron 		break;
384f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_RECV_BUFFER:
385f97f5a56SJon Doron 		syndbg->control.recv_page = data;
386f97f5a56SJon Doron 		break;
387f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
388f97f5a56SJon Doron 		syndbg->control.pending_page = data;
389f97f5a56SJon Doron 		if (!host)
390f97f5a56SJon Doron 			syndbg_exit(vcpu, msr);
391f97f5a56SJon Doron 		break;
392f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_OPTIONS:
393f97f5a56SJon Doron 		syndbg->options = data;
394f97f5a56SJon Doron 		break;
395f97f5a56SJon Doron 	default:
396f97f5a56SJon Doron 		break;
397f97f5a56SJon Doron 	}
398f97f5a56SJon Doron 
399f97f5a56SJon Doron 	return 0;
400f97f5a56SJon Doron }
401f97f5a56SJon Doron 
402f97f5a56SJon Doron static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
403f97f5a56SJon Doron {
404f69b55efSVitaly Kuznetsov 	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
405f97f5a56SJon Doron 
406f97f5a56SJon Doron 	if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
407f97f5a56SJon Doron 		return 1;
408f97f5a56SJon Doron 
409f97f5a56SJon Doron 	switch (msr) {
410f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_CONTROL:
411f97f5a56SJon Doron 		*pdata = syndbg->control.control;
412f97f5a56SJon Doron 		break;
413f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_STATUS:
414f97f5a56SJon Doron 		*pdata = syndbg->control.status;
415f97f5a56SJon Doron 		break;
416f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_SEND_BUFFER:
417f97f5a56SJon Doron 		*pdata = syndbg->control.send_page;
418f97f5a56SJon Doron 		break;
419f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_RECV_BUFFER:
420f97f5a56SJon Doron 		*pdata = syndbg->control.recv_page;
421f97f5a56SJon Doron 		break;
422f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
423f97f5a56SJon Doron 		*pdata = syndbg->control.pending_page;
424f97f5a56SJon Doron 		break;
425f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_OPTIONS:
426f97f5a56SJon Doron 		*pdata = syndbg->options;
427f97f5a56SJon Doron 		break;
428f97f5a56SJon Doron 	default:
429f97f5a56SJon Doron 		break;
430f97f5a56SJon Doron 	}
431f97f5a56SJon Doron 
432f2bc14b6SVitaly Kuznetsov 	trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
433f97f5a56SJon Doron 
434f97f5a56SJon Doron 	return 0;
435f97f5a56SJon Doron }
436f97f5a56SJon Doron 
43744883f01SPaolo Bonzini static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
43844883f01SPaolo Bonzini 			 bool host)
4395c919412SAndrey Smetanin {
4405c919412SAndrey Smetanin 	int ret;
4415c919412SAndrey Smetanin 
44244883f01SPaolo Bonzini 	if (!synic->active && !host)
4435c919412SAndrey Smetanin 		return 1;
4445c919412SAndrey Smetanin 
4455c919412SAndrey Smetanin 	ret = 0;
4465c919412SAndrey Smetanin 	switch (msr) {
4475c919412SAndrey Smetanin 	case HV_X64_MSR_SCONTROL:
4485c919412SAndrey Smetanin 		*pdata = synic->control;
4495c919412SAndrey Smetanin 		break;
4505c919412SAndrey Smetanin 	case HV_X64_MSR_SVERSION:
4515c919412SAndrey Smetanin 		*pdata = synic->version;
4525c919412SAndrey Smetanin 		break;
4535c919412SAndrey Smetanin 	case HV_X64_MSR_SIEFP:
4545c919412SAndrey Smetanin 		*pdata = synic->evt_page;
4555c919412SAndrey Smetanin 		break;
4565c919412SAndrey Smetanin 	case HV_X64_MSR_SIMP:
4575c919412SAndrey Smetanin 		*pdata = synic->msg_page;
4585c919412SAndrey Smetanin 		break;
4595c919412SAndrey Smetanin 	case HV_X64_MSR_EOM:
4605c919412SAndrey Smetanin 		*pdata = 0;
4615c919412SAndrey Smetanin 		break;
4625c919412SAndrey Smetanin 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
4635c919412SAndrey Smetanin 		*pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
4645c919412SAndrey Smetanin 		break;
4655c919412SAndrey Smetanin 	default:
4665c919412SAndrey Smetanin 		ret = 1;
4675c919412SAndrey Smetanin 		break;
4685c919412SAndrey Smetanin 	}
4695c919412SAndrey Smetanin 	return ret;
4705c919412SAndrey Smetanin }
4715c919412SAndrey Smetanin 
472ecd8a8c2SJiang Biao static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
4735c919412SAndrey Smetanin {
474e0121fa2SVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
4755c919412SAndrey Smetanin 	struct kvm_lapic_irq irq;
4765c919412SAndrey Smetanin 	int ret, vector;
4775c919412SAndrey Smetanin 
4787ec37d1cSVitaly Kuznetsov 	if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
4797ec37d1cSVitaly Kuznetsov 		return -EINVAL;
4807ec37d1cSVitaly Kuznetsov 
4815c919412SAndrey Smetanin 	if (sint >= ARRAY_SIZE(synic->sint))
4825c919412SAndrey Smetanin 		return -EINVAL;
4835c919412SAndrey Smetanin 
4845c919412SAndrey Smetanin 	vector = synic_get_sint_vector(synic_read_sint(synic, sint));
4855c919412SAndrey Smetanin 	if (vector < 0)
4865c919412SAndrey Smetanin 		return -ENOENT;
4875c919412SAndrey Smetanin 
4885c919412SAndrey Smetanin 	memset(&irq, 0, sizeof(irq));
489f98a3efbSRadim Krčmář 	irq.shorthand = APIC_DEST_SELF;
4905c919412SAndrey Smetanin 	irq.dest_mode = APIC_DEST_PHYSICAL;
4915c919412SAndrey Smetanin 	irq.delivery_mode = APIC_DM_FIXED;
4925c919412SAndrey Smetanin 	irq.vector = vector;
4935c919412SAndrey Smetanin 	irq.level = 1;
4945c919412SAndrey Smetanin 
495f98a3efbSRadim Krčmář 	ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
49618659a9cSAndrey Smetanin 	trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
4975c919412SAndrey Smetanin 	return ret;
4985c919412SAndrey Smetanin }
4995c919412SAndrey Smetanin 
500d3457c87SRoman Kagan int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
5015c919412SAndrey Smetanin {
5025c919412SAndrey Smetanin 	struct kvm_vcpu_hv_synic *synic;
5035c919412SAndrey Smetanin 
504d3457c87SRoman Kagan 	synic = synic_get(kvm, vpidx);
5055c919412SAndrey Smetanin 	if (!synic)
5065c919412SAndrey Smetanin 		return -EINVAL;
5075c919412SAndrey Smetanin 
5085c919412SAndrey Smetanin 	return synic_set_irq(synic, sint);
5095c919412SAndrey Smetanin }
5105c919412SAndrey Smetanin 
5115c919412SAndrey Smetanin void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
5125c919412SAndrey Smetanin {
513e0121fa2SVitaly Kuznetsov 	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
5145c919412SAndrey Smetanin 	int i;
5155c919412SAndrey Smetanin 
51618659a9cSAndrey Smetanin 	trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
5175c919412SAndrey Smetanin 
5185c919412SAndrey Smetanin 	for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
5195c919412SAndrey Smetanin 		if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
5205c919412SAndrey Smetanin 			kvm_hv_notify_acked_sint(vcpu, i);
5215c919412SAndrey Smetanin }
5225c919412SAndrey Smetanin 
523d3457c87SRoman Kagan static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
5245c919412SAndrey Smetanin {
5255c919412SAndrey Smetanin 	struct kvm_vcpu_hv_synic *synic;
5265c919412SAndrey Smetanin 
527d3457c87SRoman Kagan 	synic = synic_get(kvm, vpidx);
5285c919412SAndrey Smetanin 	if (!synic)
5295c919412SAndrey Smetanin 		return -EINVAL;
5305c919412SAndrey Smetanin 
5315c919412SAndrey Smetanin 	if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
5325c919412SAndrey Smetanin 		return -EINVAL;
5335c919412SAndrey Smetanin 
5345c919412SAndrey Smetanin 	atomic_set(&synic->sint_to_gsi[sint], gsi);
5355c919412SAndrey Smetanin 	return 0;
5365c919412SAndrey Smetanin }
5375c919412SAndrey Smetanin 
5385c919412SAndrey Smetanin void kvm_hv_irq_routing_update(struct kvm *kvm)
5395c919412SAndrey Smetanin {
5405c919412SAndrey Smetanin 	struct kvm_irq_routing_table *irq_rt;
5415c919412SAndrey Smetanin 	struct kvm_kernel_irq_routing_entry *e;
5425c919412SAndrey Smetanin 	u32 gsi;
5435c919412SAndrey Smetanin 
5445c919412SAndrey Smetanin 	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
5455c919412SAndrey Smetanin 					lockdep_is_held(&kvm->irq_lock));
5465c919412SAndrey Smetanin 
5475c919412SAndrey Smetanin 	for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
5485c919412SAndrey Smetanin 		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
5495c919412SAndrey Smetanin 			if (e->type == KVM_IRQ_ROUTING_HV_SINT)
5505c919412SAndrey Smetanin 				kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
5515c919412SAndrey Smetanin 						    e->hv_sint.sint, gsi);
5525c919412SAndrey Smetanin 		}
5535c919412SAndrey Smetanin 	}
5545c919412SAndrey Smetanin }
5555c919412SAndrey Smetanin 
5565c919412SAndrey Smetanin static void synic_init(struct kvm_vcpu_hv_synic *synic)
5575c919412SAndrey Smetanin {
5585c919412SAndrey Smetanin 	int i;
5595c919412SAndrey Smetanin 
5605c919412SAndrey Smetanin 	memset(synic, 0, sizeof(*synic));
5615c919412SAndrey Smetanin 	synic->version = HV_SYNIC_VERSION_1;
5625c919412SAndrey Smetanin 	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
5635c919412SAndrey Smetanin 		atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
5645c919412SAndrey Smetanin 		atomic_set(&synic->sint_to_gsi[i], -1);
5655c919412SAndrey Smetanin 	}
5665c919412SAndrey Smetanin }
5675c919412SAndrey Smetanin 
56893bf4172SAndrey Smetanin static u64 get_time_ref_counter(struct kvm *kvm)
56993bf4172SAndrey Smetanin {
57005f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
571095cf55dSPaolo Bonzini 	struct kvm_vcpu *vcpu;
572095cf55dSPaolo Bonzini 	u64 tsc;
573095cf55dSPaolo Bonzini 
574095cf55dSPaolo Bonzini 	/*
575cc9cfddbSVitaly Kuznetsov 	 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
576cc9cfddbSVitaly Kuznetsov 	 * is broken, disabled or being updated.
577095cf55dSPaolo Bonzini 	 */
578cc9cfddbSVitaly Kuznetsov 	if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
579108b249cSPaolo Bonzini 		return div_u64(get_kvmclock_ns(kvm), 100);
580095cf55dSPaolo Bonzini 
581095cf55dSPaolo Bonzini 	vcpu = kvm_get_vcpu(kvm, 0);
582095cf55dSPaolo Bonzini 	tsc = kvm_read_l1_tsc(vcpu, rdtsc());
583095cf55dSPaolo Bonzini 	return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
584095cf55dSPaolo Bonzini 		+ hv->tsc_ref.tsc_offset;
58593bf4172SAndrey Smetanin }
58693bf4172SAndrey Smetanin 
587f3b138c5SAndrey Smetanin static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
5881f4b34f8SAndrey Smetanin 				bool vcpu_kick)
5891f4b34f8SAndrey Smetanin {
590aafa97fdSVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
5911f4b34f8SAndrey Smetanin 
5921f4b34f8SAndrey Smetanin 	set_bit(stimer->index,
593ef3f3980SVitaly Kuznetsov 		to_hv_vcpu(vcpu)->stimer_pending_bitmap);
5941f4b34f8SAndrey Smetanin 	kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
5951f4b34f8SAndrey Smetanin 	if (vcpu_kick)
5961f4b34f8SAndrey Smetanin 		kvm_vcpu_kick(vcpu);
5971f4b34f8SAndrey Smetanin }
5981f4b34f8SAndrey Smetanin 
5991f4b34f8SAndrey Smetanin static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
6001f4b34f8SAndrey Smetanin {
601aafa97fdSVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
6021f4b34f8SAndrey Smetanin 
603aafa97fdSVitaly Kuznetsov 	trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
604ac3e5fcaSAndrey Smetanin 				    stimer->index);
605ac3e5fcaSAndrey Smetanin 
606019b9781SAndrey Smetanin 	hrtimer_cancel(&stimer->timer);
6071f4b34f8SAndrey Smetanin 	clear_bit(stimer->index,
608ef3f3980SVitaly Kuznetsov 		  to_hv_vcpu(vcpu)->stimer_pending_bitmap);
6091f4b34f8SAndrey Smetanin 	stimer->msg_pending = false;
610f808495dSAndrey Smetanin 	stimer->exp_time = 0;
6111f4b34f8SAndrey Smetanin }
6121f4b34f8SAndrey Smetanin 
6131f4b34f8SAndrey Smetanin static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
6141f4b34f8SAndrey Smetanin {
6151f4b34f8SAndrey Smetanin 	struct kvm_vcpu_hv_stimer *stimer;
6161f4b34f8SAndrey Smetanin 
6171f4b34f8SAndrey Smetanin 	stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
618aafa97fdSVitaly Kuznetsov 	trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
619ac3e5fcaSAndrey Smetanin 				     stimer->index);
620f3b138c5SAndrey Smetanin 	stimer_mark_pending(stimer, true);
6211f4b34f8SAndrey Smetanin 
6221f4b34f8SAndrey Smetanin 	return HRTIMER_NORESTART;
6231f4b34f8SAndrey Smetanin }
6241f4b34f8SAndrey Smetanin 
625f808495dSAndrey Smetanin /*
626f808495dSAndrey Smetanin  * stimer_start() assumptions:
627f808495dSAndrey Smetanin  * a) stimer->count is not equal to 0
628f808495dSAndrey Smetanin  * b) stimer->config has HV_STIMER_ENABLE flag
629f808495dSAndrey Smetanin  */
6301f4b34f8SAndrey Smetanin static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
6311f4b34f8SAndrey Smetanin {
6321f4b34f8SAndrey Smetanin 	u64 time_now;
6331f4b34f8SAndrey Smetanin 	ktime_t ktime_now;
6341f4b34f8SAndrey Smetanin 
635aafa97fdSVitaly Kuznetsov 	time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
6361f4b34f8SAndrey Smetanin 	ktime_now = ktime_get();
6371f4b34f8SAndrey Smetanin 
6386a058a1eSVitaly Kuznetsov 	if (stimer->config.periodic) {
639f808495dSAndrey Smetanin 		if (stimer->exp_time) {
640f808495dSAndrey Smetanin 			if (time_now >= stimer->exp_time) {
641f808495dSAndrey Smetanin 				u64 remainder;
6421f4b34f8SAndrey Smetanin 
643f808495dSAndrey Smetanin 				div64_u64_rem(time_now - stimer->exp_time,
644f808495dSAndrey Smetanin 					      stimer->count, &remainder);
645f808495dSAndrey Smetanin 				stimer->exp_time =
646f808495dSAndrey Smetanin 					time_now + (stimer->count - remainder);
647f808495dSAndrey Smetanin 			}
648f808495dSAndrey Smetanin 		} else
6491f4b34f8SAndrey Smetanin 			stimer->exp_time = time_now + stimer->count;
650f808495dSAndrey Smetanin 
651ac3e5fcaSAndrey Smetanin 		trace_kvm_hv_stimer_start_periodic(
652aafa97fdSVitaly Kuznetsov 					hv_stimer_to_vcpu(stimer)->vcpu_id,
653ac3e5fcaSAndrey Smetanin 					stimer->index,
654ac3e5fcaSAndrey Smetanin 					time_now, stimer->exp_time);
655ac3e5fcaSAndrey Smetanin 
6561f4b34f8SAndrey Smetanin 		hrtimer_start(&stimer->timer,
657f808495dSAndrey Smetanin 			      ktime_add_ns(ktime_now,
658f808495dSAndrey Smetanin 					   100 * (stimer->exp_time - time_now)),
6591f4b34f8SAndrey Smetanin 			      HRTIMER_MODE_ABS);
6601f4b34f8SAndrey Smetanin 		return 0;
6611f4b34f8SAndrey Smetanin 	}
6621f4b34f8SAndrey Smetanin 	stimer->exp_time = stimer->count;
6631f4b34f8SAndrey Smetanin 	if (time_now >= stimer->count) {
6641f4b34f8SAndrey Smetanin 		/*
6651f4b34f8SAndrey Smetanin 		 * Expire timer according to Hypervisor Top-Level Functional
6661f4b34f8SAndrey Smetanin 		 * specification v4(15.3.1):
6671f4b34f8SAndrey Smetanin 		 * "If a one shot is enabled and the specified count is in
6681f4b34f8SAndrey Smetanin 		 * the past, it will expire immediately."
6691f4b34f8SAndrey Smetanin 		 */
670f3b138c5SAndrey Smetanin 		stimer_mark_pending(stimer, false);
6711f4b34f8SAndrey Smetanin 		return 0;
6721f4b34f8SAndrey Smetanin 	}
6731f4b34f8SAndrey Smetanin 
674aafa97fdSVitaly Kuznetsov 	trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
675ac3e5fcaSAndrey Smetanin 					   stimer->index,
676ac3e5fcaSAndrey Smetanin 					   time_now, stimer->count);
677ac3e5fcaSAndrey Smetanin 
6781f4b34f8SAndrey Smetanin 	hrtimer_start(&stimer->timer,
6791f4b34f8SAndrey Smetanin 		      ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
6801f4b34f8SAndrey Smetanin 		      HRTIMER_MODE_ABS);
6811f4b34f8SAndrey Smetanin 	return 0;
6821f4b34f8SAndrey Smetanin }
6831f4b34f8SAndrey Smetanin 
6841f4b34f8SAndrey Smetanin static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
6851f4b34f8SAndrey Smetanin 			     bool host)
6861f4b34f8SAndrey Smetanin {
6878644f771SVitaly Kuznetsov 	union hv_stimer_config new_config = {.as_uint64 = config},
6888644f771SVitaly Kuznetsov 		old_config = {.as_uint64 = stimer->config.as_uint64};
689aafa97fdSVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
6901aa8a418SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
691e0121fa2SVitaly Kuznetsov 	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
692dbcf3f96SVitaly Kuznetsov 
693b1e34d32SVitaly Kuznetsov 	if (!synic->active && (!host || config))
694dbcf3f96SVitaly Kuznetsov 		return 1;
6956a058a1eSVitaly Kuznetsov 
6961aa8a418SVitaly Kuznetsov 	if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
6971aa8a418SVitaly Kuznetsov 		     !(hv_vcpu->cpuid_cache.features_edx &
6981aa8a418SVitaly Kuznetsov 		       HV_STIMER_DIRECT_MODE_AVAILABLE)))
6991aa8a418SVitaly Kuznetsov 		return 1;
7001aa8a418SVitaly Kuznetsov 
701aafa97fdSVitaly Kuznetsov 	trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
702ac3e5fcaSAndrey Smetanin 				       stimer->index, config, host);
703ac3e5fcaSAndrey Smetanin 
704f3b138c5SAndrey Smetanin 	stimer_cleanup(stimer);
7058644f771SVitaly Kuznetsov 	if (old_config.enable &&
7068644f771SVitaly Kuznetsov 	    !new_config.direct_mode && new_config.sintx == 0)
7076a058a1eSVitaly Kuznetsov 		new_config.enable = 0;
7086a058a1eSVitaly Kuznetsov 	stimer->config.as_uint64 = new_config.as_uint64;
7098644f771SVitaly Kuznetsov 
710013cc6ebSVitaly Kuznetsov 	if (stimer->config.enable)
711f3b138c5SAndrey Smetanin 		stimer_mark_pending(stimer, false);
712013cc6ebSVitaly Kuznetsov 
7131f4b34f8SAndrey Smetanin 	return 0;
7141f4b34f8SAndrey Smetanin }
7151f4b34f8SAndrey Smetanin 
7161f4b34f8SAndrey Smetanin static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
7171f4b34f8SAndrey Smetanin 			    bool host)
7181f4b34f8SAndrey Smetanin {
719aafa97fdSVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
720e0121fa2SVitaly Kuznetsov 	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
721dbcf3f96SVitaly Kuznetsov 
722b1e34d32SVitaly Kuznetsov 	if (!synic->active && (!host || count))
723dbcf3f96SVitaly Kuznetsov 		return 1;
724dbcf3f96SVitaly Kuznetsov 
725aafa97fdSVitaly Kuznetsov 	trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
726ac3e5fcaSAndrey Smetanin 				      stimer->index, count, host);
727ac3e5fcaSAndrey Smetanin 
7281f4b34f8SAndrey Smetanin 	stimer_cleanup(stimer);
729f3b138c5SAndrey Smetanin 	stimer->count = count;
7301f4b34f8SAndrey Smetanin 	if (stimer->count == 0)
7316a058a1eSVitaly Kuznetsov 		stimer->config.enable = 0;
7326a058a1eSVitaly Kuznetsov 	else if (stimer->config.auto_enable)
7336a058a1eSVitaly Kuznetsov 		stimer->config.enable = 1;
734013cc6ebSVitaly Kuznetsov 
735013cc6ebSVitaly Kuznetsov 	if (stimer->config.enable)
736f3b138c5SAndrey Smetanin 		stimer_mark_pending(stimer, false);
737013cc6ebSVitaly Kuznetsov 
7381f4b34f8SAndrey Smetanin 	return 0;
7391f4b34f8SAndrey Smetanin }
7401f4b34f8SAndrey Smetanin 
7411f4b34f8SAndrey Smetanin static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
7421f4b34f8SAndrey Smetanin {
7436a058a1eSVitaly Kuznetsov 	*pconfig = stimer->config.as_uint64;
7441f4b34f8SAndrey Smetanin 	return 0;
7451f4b34f8SAndrey Smetanin }
7461f4b34f8SAndrey Smetanin 
7471f4b34f8SAndrey Smetanin static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
7481f4b34f8SAndrey Smetanin {
7491f4b34f8SAndrey Smetanin 	*pcount = stimer->count;
7501f4b34f8SAndrey Smetanin 	return 0;
7511f4b34f8SAndrey Smetanin }
7521f4b34f8SAndrey Smetanin 
7531f4b34f8SAndrey Smetanin static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
7547deec5e0SRoman Kagan 			     struct hv_message *src_msg, bool no_retry)
7551f4b34f8SAndrey Smetanin {
756e0121fa2SVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
7573a0e7731SRoman Kagan 	int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
7583a0e7731SRoman Kagan 	gfn_t msg_page_gfn;
7593a0e7731SRoman Kagan 	struct hv_message_header hv_hdr;
7601f4b34f8SAndrey Smetanin 	int r;
7611f4b34f8SAndrey Smetanin 
7621f4b34f8SAndrey Smetanin 	if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
7631f4b34f8SAndrey Smetanin 		return -ENOENT;
7641f4b34f8SAndrey Smetanin 
7653a0e7731SRoman Kagan 	msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
7661f4b34f8SAndrey Smetanin 
7673a0e7731SRoman Kagan 	/*
7683a0e7731SRoman Kagan 	 * Strictly following the spec-mandated ordering would assume setting
7693a0e7731SRoman Kagan 	 * .msg_pending before checking .message_type.  However, this function
7703a0e7731SRoman Kagan 	 * is only called in vcpu context so the entire update is atomic from
7713a0e7731SRoman Kagan 	 * guest POV and thus the exact order here doesn't matter.
7723a0e7731SRoman Kagan 	 */
7733a0e7731SRoman Kagan 	r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
7743a0e7731SRoman Kagan 				     msg_off + offsetof(struct hv_message,
7753a0e7731SRoman Kagan 							header.message_type),
7763a0e7731SRoman Kagan 				     sizeof(hv_hdr.message_type));
7773a0e7731SRoman Kagan 	if (r < 0)
7781f4b34f8SAndrey Smetanin 		return r;
7793a0e7731SRoman Kagan 
7803a0e7731SRoman Kagan 	if (hv_hdr.message_type != HVMSG_NONE) {
7817deec5e0SRoman Kagan 		if (no_retry)
7827deec5e0SRoman Kagan 			return 0;
7837deec5e0SRoman Kagan 
7843a0e7731SRoman Kagan 		hv_hdr.message_flags.msg_pending = 1;
7853a0e7731SRoman Kagan 		r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
7863a0e7731SRoman Kagan 					      &hv_hdr.message_flags,
7873a0e7731SRoman Kagan 					      msg_off +
7883a0e7731SRoman Kagan 					      offsetof(struct hv_message,
7893a0e7731SRoman Kagan 						       header.message_flags),
7903a0e7731SRoman Kagan 					      sizeof(hv_hdr.message_flags));
7913a0e7731SRoman Kagan 		if (r < 0)
7923a0e7731SRoman Kagan 			return r;
7933a0e7731SRoman Kagan 		return -EAGAIN;
7943a0e7731SRoman Kagan 	}
7953a0e7731SRoman Kagan 
7963a0e7731SRoman Kagan 	r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
7973a0e7731SRoman Kagan 				      sizeof(src_msg->header) +
7983a0e7731SRoman Kagan 				      src_msg->header.payload_size);
7993a0e7731SRoman Kagan 	if (r < 0)
8003a0e7731SRoman Kagan 		return r;
8013a0e7731SRoman Kagan 
8023a0e7731SRoman Kagan 	r = synic_set_irq(synic, sint);
8033a0e7731SRoman Kagan 	if (r < 0)
8043a0e7731SRoman Kagan 		return r;
8053a0e7731SRoman Kagan 	if (r == 0)
8063a0e7731SRoman Kagan 		return -EFAULT;
8073a0e7731SRoman Kagan 	return 0;
8081f4b34f8SAndrey Smetanin }
8091f4b34f8SAndrey Smetanin 
8100cdeabb1SAndrey Smetanin static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
8111f4b34f8SAndrey Smetanin {
812aafa97fdSVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
8131f4b34f8SAndrey Smetanin 	struct hv_message *msg = &stimer->msg;
8141f4b34f8SAndrey Smetanin 	struct hv_timer_message_payload *payload =
8151f4b34f8SAndrey Smetanin 			(struct hv_timer_message_payload *)&msg->u.payload;
8161f4b34f8SAndrey Smetanin 
8177deec5e0SRoman Kagan 	/*
8187deec5e0SRoman Kagan 	 * To avoid piling up periodic ticks, don't retry message
8197deec5e0SRoman Kagan 	 * delivery for them (within "lazy" lost ticks policy).
8207deec5e0SRoman Kagan 	 */
8216a058a1eSVitaly Kuznetsov 	bool no_retry = stimer->config.periodic;
8227deec5e0SRoman Kagan 
8231f4b34f8SAndrey Smetanin 	payload->expiration_time = stimer->exp_time;
8241f4b34f8SAndrey Smetanin 	payload->delivery_time = get_time_ref_counter(vcpu->kvm);
825e0121fa2SVitaly Kuznetsov 	return synic_deliver_msg(to_hv_synic(vcpu),
8266a058a1eSVitaly Kuznetsov 				 stimer->config.sintx, msg,
8277deec5e0SRoman Kagan 				 no_retry);
8281f4b34f8SAndrey Smetanin }
8291f4b34f8SAndrey Smetanin 
8308644f771SVitaly Kuznetsov static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
8318644f771SVitaly Kuznetsov {
832aafa97fdSVitaly Kuznetsov 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
8338644f771SVitaly Kuznetsov 	struct kvm_lapic_irq irq = {
8348644f771SVitaly Kuznetsov 		.delivery_mode = APIC_DM_FIXED,
8358644f771SVitaly Kuznetsov 		.vector = stimer->config.apic_vector
8368644f771SVitaly Kuznetsov 	};
8378644f771SVitaly Kuznetsov 
838a073d7e3SWanpeng Li 	if (lapic_in_kernel(vcpu))
8398644f771SVitaly Kuznetsov 		return !kvm_apic_set_irq(vcpu, &irq, NULL);
840a073d7e3SWanpeng Li 	return 0;
8418644f771SVitaly Kuznetsov }
8428644f771SVitaly Kuznetsov 
8431f4b34f8SAndrey Smetanin static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
8441f4b34f8SAndrey Smetanin {
8458644f771SVitaly Kuznetsov 	int r, direct = stimer->config.direct_mode;
846ac3e5fcaSAndrey Smetanin 
8470cdeabb1SAndrey Smetanin 	stimer->msg_pending = true;
8488644f771SVitaly Kuznetsov 	if (!direct)
849ac3e5fcaSAndrey Smetanin 		r = stimer_send_msg(stimer);
8508644f771SVitaly Kuznetsov 	else
8518644f771SVitaly Kuznetsov 		r = stimer_notify_direct(stimer);
852aafa97fdSVitaly Kuznetsov 	trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
8538644f771SVitaly Kuznetsov 				       stimer->index, direct, r);
854ac3e5fcaSAndrey Smetanin 	if (!r) {
8550cdeabb1SAndrey Smetanin 		stimer->msg_pending = false;
8566a058a1eSVitaly Kuznetsov 		if (!(stimer->config.periodic))
8576a058a1eSVitaly Kuznetsov 			stimer->config.enable = 0;
8580cdeabb1SAndrey Smetanin 	}
8591f4b34f8SAndrey Smetanin }
8601f4b34f8SAndrey Smetanin 
8611f4b34f8SAndrey Smetanin void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
8621f4b34f8SAndrey Smetanin {
863ef3f3980SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
8641f4b34f8SAndrey Smetanin 	struct kvm_vcpu_hv_stimer *stimer;
865f3b138c5SAndrey Smetanin 	u64 time_now, exp_time;
8661f4b34f8SAndrey Smetanin 	int i;
8671f4b34f8SAndrey Smetanin 
868f2bc14b6SVitaly Kuznetsov 	if (!hv_vcpu)
869f2bc14b6SVitaly Kuznetsov 		return;
870f2bc14b6SVitaly Kuznetsov 
8711f4b34f8SAndrey Smetanin 	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
8721f4b34f8SAndrey Smetanin 		if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
8731f4b34f8SAndrey Smetanin 			stimer = &hv_vcpu->stimer[i];
8746a058a1eSVitaly Kuznetsov 			if (stimer->config.enable) {
875f3b138c5SAndrey Smetanin 				exp_time = stimer->exp_time;
8760cdeabb1SAndrey Smetanin 
877f3b138c5SAndrey Smetanin 				if (exp_time) {
878f3b138c5SAndrey Smetanin 					time_now =
879f3b138c5SAndrey Smetanin 						get_time_ref_counter(vcpu->kvm);
880f3b138c5SAndrey Smetanin 					if (time_now >= exp_time)
881f3b138c5SAndrey Smetanin 						stimer_expiration(stimer);
882f3b138c5SAndrey Smetanin 				}
883f3b138c5SAndrey Smetanin 
8846a058a1eSVitaly Kuznetsov 				if ((stimer->config.enable) &&
885f1ff89ecSRoman Kagan 				    stimer->count) {
886f1ff89ecSRoman Kagan 					if (!stimer->msg_pending)
8870cdeabb1SAndrey Smetanin 						stimer_start(stimer);
888f1ff89ecSRoman Kagan 				} else
8890cdeabb1SAndrey Smetanin 					stimer_cleanup(stimer);
8901f4b34f8SAndrey Smetanin 			}
8911f4b34f8SAndrey Smetanin 		}
8921f4b34f8SAndrey Smetanin }
8931f4b34f8SAndrey Smetanin 
8941f4b34f8SAndrey Smetanin void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
8951f4b34f8SAndrey Smetanin {
896ef3f3980SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
8971f4b34f8SAndrey Smetanin 	int i;
8981f4b34f8SAndrey Smetanin 
899fc08b628SVitaly Kuznetsov 	if (!hv_vcpu)
900fc08b628SVitaly Kuznetsov 		return;
901fc08b628SVitaly Kuznetsov 
9021f4b34f8SAndrey Smetanin 	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
9031f4b34f8SAndrey Smetanin 		stimer_cleanup(&hv_vcpu->stimer[i]);
9044592b7eaSVitaly Kuznetsov 
9054592b7eaSVitaly Kuznetsov 	kfree(hv_vcpu);
9064592b7eaSVitaly Kuznetsov 	vcpu->arch.hyperv = NULL;
9071f4b34f8SAndrey Smetanin }
9081f4b34f8SAndrey Smetanin 
90972bbf935SLadi Prosek bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
91072bbf935SLadi Prosek {
9119ff5e030SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
9129ff5e030SVitaly Kuznetsov 
913f2bc14b6SVitaly Kuznetsov 	if (!hv_vcpu)
914f2bc14b6SVitaly Kuznetsov 		return false;
915f2bc14b6SVitaly Kuznetsov 
9169ff5e030SVitaly Kuznetsov 	if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
91772bbf935SLadi Prosek 		return false;
91872bbf935SLadi Prosek 	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
91972bbf935SLadi Prosek }
92072bbf935SLadi Prosek EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
92172bbf935SLadi Prosek 
922b415d8d4SVitaly Kuznetsov int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
92372bbf935SLadi Prosek {
924046f5756SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
925046f5756SVitaly Kuznetsov 
926046f5756SVitaly Kuznetsov 	if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
927b415d8d4SVitaly Kuznetsov 		return -EFAULT;
928046f5756SVitaly Kuznetsov 
929b415d8d4SVitaly Kuznetsov 	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
930046f5756SVitaly Kuznetsov 				     &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
93172bbf935SLadi Prosek }
93272bbf935SLadi Prosek EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
93372bbf935SLadi Prosek 
9341f4b34f8SAndrey Smetanin static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
9351f4b34f8SAndrey Smetanin {
9361f4b34f8SAndrey Smetanin 	struct hv_message *msg = &stimer->msg;
9371f4b34f8SAndrey Smetanin 	struct hv_timer_message_payload *payload =
9381f4b34f8SAndrey Smetanin 			(struct hv_timer_message_payload *)&msg->u.payload;
9391f4b34f8SAndrey Smetanin 
9401f4b34f8SAndrey Smetanin 	memset(&msg->header, 0, sizeof(msg->header));
9411f4b34f8SAndrey Smetanin 	msg->header.message_type = HVMSG_TIMER_EXPIRED;
9421f4b34f8SAndrey Smetanin 	msg->header.payload_size = sizeof(*payload);
9431f4b34f8SAndrey Smetanin 
9441f4b34f8SAndrey Smetanin 	payload->timer_index = stimer->index;
9451f4b34f8SAndrey Smetanin 	payload->expiration_time = 0;
9461f4b34f8SAndrey Smetanin 	payload->delivery_time = 0;
9471f4b34f8SAndrey Smetanin }
9481f4b34f8SAndrey Smetanin 
9491f4b34f8SAndrey Smetanin static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
9501f4b34f8SAndrey Smetanin {
9511f4b34f8SAndrey Smetanin 	memset(stimer, 0, sizeof(*stimer));
9521f4b34f8SAndrey Smetanin 	stimer->index = timer_index;
9531f4b34f8SAndrey Smetanin 	hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
9541f4b34f8SAndrey Smetanin 	stimer->timer.function = stimer_timer_callback;
9551f4b34f8SAndrey Smetanin 	stimer_prepare_msg(stimer);
9561f4b34f8SAndrey Smetanin }
9571f4b34f8SAndrey Smetanin 
9583be29eb7SSean Christopherson int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
9595c919412SAndrey Smetanin {
9601cac8d9fSSean Christopherson 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
9611f4b34f8SAndrey Smetanin 	int i;
9621f4b34f8SAndrey Smetanin 
9631cac8d9fSSean Christopherson 	if (hv_vcpu)
9641cac8d9fSSean Christopherson 		return 0;
9651cac8d9fSSean Christopherson 
9664592b7eaSVitaly Kuznetsov 	hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
9674592b7eaSVitaly Kuznetsov 	if (!hv_vcpu)
9684592b7eaSVitaly Kuznetsov 		return -ENOMEM;
9694592b7eaSVitaly Kuznetsov 
9704592b7eaSVitaly Kuznetsov 	vcpu->arch.hyperv = hv_vcpu;
9714592b7eaSVitaly Kuznetsov 	hv_vcpu->vcpu = vcpu;
9724592b7eaSVitaly Kuznetsov 
9731f4b34f8SAndrey Smetanin 	synic_init(&hv_vcpu->synic);
9741f4b34f8SAndrey Smetanin 
9751f4b34f8SAndrey Smetanin 	bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
9761f4b34f8SAndrey Smetanin 	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
9771f4b34f8SAndrey Smetanin 		stimer_init(&hv_vcpu->stimer[i], i);
9784592b7eaSVitaly Kuznetsov 
9794eeef242SSean Christopherson 	hv_vcpu->vp_index = vcpu->vcpu_idx;
980fc08b628SVitaly Kuznetsov 
98153ca765aSVitaly Kuznetsov 	for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) {
98253ca765aSVitaly Kuznetsov 		INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries);
98353ca765aSVitaly Kuznetsov 		spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock);
98453ca765aSVitaly Kuznetsov 	}
9850823570fSVitaly Kuznetsov 
986fc08b628SVitaly Kuznetsov 	return 0;
987d3457c87SRoman Kagan }
988d3457c87SRoman Kagan 
989efc479e6SRoman Kagan int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
9905c919412SAndrey Smetanin {
991fc08b628SVitaly Kuznetsov 	struct kvm_vcpu_hv_synic *synic;
992fc08b628SVitaly Kuznetsov 	int r;
993fc08b628SVitaly Kuznetsov 
994fc08b628SVitaly Kuznetsov 	r = kvm_hv_vcpu_init(vcpu);
995fc08b628SVitaly Kuznetsov 	if (r)
996fc08b628SVitaly Kuznetsov 		return r;
997fc08b628SVitaly Kuznetsov 
998fc08b628SVitaly Kuznetsov 	synic = to_hv_synic(vcpu);
999efc479e6SRoman Kagan 
1000efc479e6SRoman Kagan 	synic->active = true;
1001efc479e6SRoman Kagan 	synic->dont_zero_synic_pages = dont_zero_synic_pages;
100299b48eccSJon Doron 	synic->control = HV_SYNIC_CONTROL_ENABLE;
10035c919412SAndrey Smetanin 	return 0;
10045c919412SAndrey Smetanin }
10055c919412SAndrey Smetanin 
1006e83d5887SAndrey Smetanin static bool kvm_hv_msr_partition_wide(u32 msr)
1007e83d5887SAndrey Smetanin {
1008e83d5887SAndrey Smetanin 	bool r = false;
1009e83d5887SAndrey Smetanin 
1010e83d5887SAndrey Smetanin 	switch (msr) {
1011e83d5887SAndrey Smetanin 	case HV_X64_MSR_GUEST_OS_ID:
1012e83d5887SAndrey Smetanin 	case HV_X64_MSR_HYPERCALL:
1013e83d5887SAndrey Smetanin 	case HV_X64_MSR_REFERENCE_TSC:
1014e83d5887SAndrey Smetanin 	case HV_X64_MSR_TIME_REF_COUNT:
1015e7d9513bSAndrey Smetanin 	case HV_X64_MSR_CRASH_CTL:
1016e7d9513bSAndrey Smetanin 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1017e516cebbSAndrey Smetanin 	case HV_X64_MSR_RESET:
1018a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1019a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
1020a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_TSC_EMULATION_STATUS:
10212be1bd3aSVitaly Kuznetsov 	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1022f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_OPTIONS:
1023f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1024e83d5887SAndrey Smetanin 		r = true;
1025e83d5887SAndrey Smetanin 		break;
1026e83d5887SAndrey Smetanin 	}
1027e83d5887SAndrey Smetanin 
1028e83d5887SAndrey Smetanin 	return r;
1029e83d5887SAndrey Smetanin }
1030e83d5887SAndrey Smetanin 
103105f04ae4SVitaly Kuznetsov static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
1032e7d9513bSAndrey Smetanin {
103305f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
103486187937SMarios Pomonis 	size_t size = ARRAY_SIZE(hv->hv_crash_param);
1035e7d9513bSAndrey Smetanin 
103686187937SMarios Pomonis 	if (WARN_ON_ONCE(index >= size))
1037e7d9513bSAndrey Smetanin 		return -EINVAL;
1038e7d9513bSAndrey Smetanin 
103986187937SMarios Pomonis 	*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
1040e7d9513bSAndrey Smetanin 	return 0;
1041e7d9513bSAndrey Smetanin }
1042e7d9513bSAndrey Smetanin 
104305f04ae4SVitaly Kuznetsov static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
1044e7d9513bSAndrey Smetanin {
104505f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
1046e7d9513bSAndrey Smetanin 
1047e7d9513bSAndrey Smetanin 	*pdata = hv->hv_crash_ctl;
1048e7d9513bSAndrey Smetanin 	return 0;
1049e7d9513bSAndrey Smetanin }
1050e7d9513bSAndrey Smetanin 
105105f04ae4SVitaly Kuznetsov static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
1052e7d9513bSAndrey Smetanin {
105305f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
1054e7d9513bSAndrey Smetanin 
1055a4987defSVitaly Kuznetsov 	hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1056e7d9513bSAndrey Smetanin 
1057e7d9513bSAndrey Smetanin 	return 0;
1058e7d9513bSAndrey Smetanin }
1059e7d9513bSAndrey Smetanin 
106005f04ae4SVitaly Kuznetsov static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1061e7d9513bSAndrey Smetanin {
106205f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
106386187937SMarios Pomonis 	size_t size = ARRAY_SIZE(hv->hv_crash_param);
1064e7d9513bSAndrey Smetanin 
106586187937SMarios Pomonis 	if (WARN_ON_ONCE(index >= size))
1066e7d9513bSAndrey Smetanin 		return -EINVAL;
1067e7d9513bSAndrey Smetanin 
106886187937SMarios Pomonis 	hv->hv_crash_param[array_index_nospec(index, size)] = data;
1069e7d9513bSAndrey Smetanin 	return 0;
1070e7d9513bSAndrey Smetanin }
1071e7d9513bSAndrey Smetanin 
1072095cf55dSPaolo Bonzini /*
1073095cf55dSPaolo Bonzini  * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1074095cf55dSPaolo Bonzini  * between them is possible:
1075095cf55dSPaolo Bonzini  *
1076095cf55dSPaolo Bonzini  * kvmclock formula:
1077095cf55dSPaolo Bonzini  *    nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1078095cf55dSPaolo Bonzini  *           + system_time
1079095cf55dSPaolo Bonzini  *
1080095cf55dSPaolo Bonzini  * Hyper-V formula:
1081095cf55dSPaolo Bonzini  *    nsec/100 = ticks * scale / 2^64 + offset
1082095cf55dSPaolo Bonzini  *
1083095cf55dSPaolo Bonzini  * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1084095cf55dSPaolo Bonzini  * By dividing the kvmclock formula by 100 and equating what's left we get:
1085095cf55dSPaolo Bonzini  *    ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1086095cf55dSPaolo Bonzini  *            scale / 2^64 =         tsc_to_system_mul * 2^(tsc_shift-32) / 100
1087095cf55dSPaolo Bonzini  *            scale        =         tsc_to_system_mul * 2^(32+tsc_shift) / 100
1088095cf55dSPaolo Bonzini  *
1089095cf55dSPaolo Bonzini  * Now expand the kvmclock formula and divide by 100:
1090095cf55dSPaolo Bonzini  *    nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1091095cf55dSPaolo Bonzini  *           - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1092095cf55dSPaolo Bonzini  *           + system_time
1093095cf55dSPaolo Bonzini  *    nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1094095cf55dSPaolo Bonzini  *               - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1095095cf55dSPaolo Bonzini  *               + system_time / 100
1096095cf55dSPaolo Bonzini  *
1097095cf55dSPaolo Bonzini  * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1098095cf55dSPaolo Bonzini  *    nsec/100 = ticks * scale / 2^64
1099095cf55dSPaolo Bonzini  *               - tsc_timestamp * scale / 2^64
1100095cf55dSPaolo Bonzini  *               + system_time / 100
1101095cf55dSPaolo Bonzini  *
1102095cf55dSPaolo Bonzini  * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1103095cf55dSPaolo Bonzini  *    offset = system_time / 100 - tsc_timestamp * scale / 2^64
1104095cf55dSPaolo Bonzini  *
1105095cf55dSPaolo Bonzini  * These two equivalencies are implemented in this function.
1106095cf55dSPaolo Bonzini  */
1107095cf55dSPaolo Bonzini static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
11087357b1dfSMichael Kelley 					struct ms_hyperv_tsc_page *tsc_ref)
1109095cf55dSPaolo Bonzini {
1110095cf55dSPaolo Bonzini 	u64 max_mul;
1111095cf55dSPaolo Bonzini 
1112095cf55dSPaolo Bonzini 	if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1113095cf55dSPaolo Bonzini 		return false;
1114095cf55dSPaolo Bonzini 
1115095cf55dSPaolo Bonzini 	/*
1116095cf55dSPaolo Bonzini 	 * check if scale would overflow, if so we use the time ref counter
1117095cf55dSPaolo Bonzini 	 *    tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1118095cf55dSPaolo Bonzini 	 *    tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1119095cf55dSPaolo Bonzini 	 *    tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1120095cf55dSPaolo Bonzini 	 */
1121095cf55dSPaolo Bonzini 	max_mul = 100ull << (32 - hv_clock->tsc_shift);
1122095cf55dSPaolo Bonzini 	if (hv_clock->tsc_to_system_mul >= max_mul)
1123095cf55dSPaolo Bonzini 		return false;
1124095cf55dSPaolo Bonzini 
1125095cf55dSPaolo Bonzini 	/*
1126095cf55dSPaolo Bonzini 	 * Otherwise compute the scale and offset according to the formulas
1127095cf55dSPaolo Bonzini 	 * derived above.
1128095cf55dSPaolo Bonzini 	 */
1129095cf55dSPaolo Bonzini 	tsc_ref->tsc_scale =
1130095cf55dSPaolo Bonzini 		mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1131095cf55dSPaolo Bonzini 				hv_clock->tsc_to_system_mul,
1132095cf55dSPaolo Bonzini 				100);
1133095cf55dSPaolo Bonzini 
1134095cf55dSPaolo Bonzini 	tsc_ref->tsc_offset = hv_clock->system_time;
1135095cf55dSPaolo Bonzini 	do_div(tsc_ref->tsc_offset, 100);
1136095cf55dSPaolo Bonzini 	tsc_ref->tsc_offset -=
1137095cf55dSPaolo Bonzini 		mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1138095cf55dSPaolo Bonzini 	return true;
1139095cf55dSPaolo Bonzini }
1140095cf55dSPaolo Bonzini 
11410469f2f7SVitaly Kuznetsov /*
11420469f2f7SVitaly Kuznetsov  * Don't touch TSC page values if the guest has opted for TSC emulation after
11430469f2f7SVitaly Kuznetsov  * migration. KVM doesn't fully support reenlightenment notifications and TSC
11440469f2f7SVitaly Kuznetsov  * access emulation and Hyper-V is known to expect the values in TSC page to
11450469f2f7SVitaly Kuznetsov  * stay constant before TSC access emulation is disabled from guest side
11460469f2f7SVitaly Kuznetsov  * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
11470469f2f7SVitaly Kuznetsov  * frequency and guest visible TSC value across migration (and prevent it when
11480469f2f7SVitaly Kuznetsov  * TSC scaling is unsupported).
11490469f2f7SVitaly Kuznetsov  */
11500469f2f7SVitaly Kuznetsov static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
11510469f2f7SVitaly Kuznetsov {
11520469f2f7SVitaly Kuznetsov 	return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
11530469f2f7SVitaly Kuznetsov 		hv->hv_tsc_emulation_control;
11540469f2f7SVitaly Kuznetsov }
11550469f2f7SVitaly Kuznetsov 
1156095cf55dSPaolo Bonzini void kvm_hv_setup_tsc_page(struct kvm *kvm,
1157095cf55dSPaolo Bonzini 			   struct pvclock_vcpu_time_info *hv_clock)
1158095cf55dSPaolo Bonzini {
115905f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
1160095cf55dSPaolo Bonzini 	u32 tsc_seq;
1161095cf55dSPaolo Bonzini 	u64 gfn;
1162095cf55dSPaolo Bonzini 
1163095cf55dSPaolo Bonzini 	BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
11647357b1dfSMichael Kelley 	BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1165095cf55dSPaolo Bonzini 
116605f04ae4SVitaly Kuznetsov 	mutex_lock(&hv->hv_lock);
116742dcbe7dSVitaly Kuznetsov 
116842dcbe7dSVitaly Kuznetsov 	if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
116942dcbe7dSVitaly Kuznetsov 	    hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
117042dcbe7dSVitaly Kuznetsov 	    hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
117142dcbe7dSVitaly Kuznetsov 		goto out_unlock;
117242dcbe7dSVitaly Kuznetsov 
11733f5ad8beSPaolo Bonzini 	if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
11743f5ad8beSPaolo Bonzini 		goto out_unlock;
11753f5ad8beSPaolo Bonzini 
1176095cf55dSPaolo Bonzini 	gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1177095cf55dSPaolo Bonzini 	/*
1178095cf55dSPaolo Bonzini 	 * Because the TSC parameters only vary when there is a
1179095cf55dSPaolo Bonzini 	 * change in the master clock, do not bother with caching.
1180095cf55dSPaolo Bonzini 	 */
1181095cf55dSPaolo Bonzini 	if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1182095cf55dSPaolo Bonzini 				    &tsc_seq, sizeof(tsc_seq))))
1183cc9cfddbSVitaly Kuznetsov 		goto out_err;
1184095cf55dSPaolo Bonzini 
11850469f2f7SVitaly Kuznetsov 	if (tsc_seq && tsc_page_update_unsafe(hv)) {
11860469f2f7SVitaly Kuznetsov 		if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
11870469f2f7SVitaly Kuznetsov 			goto out_err;
11880469f2f7SVitaly Kuznetsov 
11890469f2f7SVitaly Kuznetsov 		hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
11900469f2f7SVitaly Kuznetsov 		goto out_unlock;
11910469f2f7SVitaly Kuznetsov 	}
11920469f2f7SVitaly Kuznetsov 
1193095cf55dSPaolo Bonzini 	/*
1194095cf55dSPaolo Bonzini 	 * While we're computing and writing the parameters, force the
1195095cf55dSPaolo Bonzini 	 * guest to use the time reference count MSR.
1196095cf55dSPaolo Bonzini 	 */
1197095cf55dSPaolo Bonzini 	hv->tsc_ref.tsc_sequence = 0;
1198095cf55dSPaolo Bonzini 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1199095cf55dSPaolo Bonzini 			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1200cc9cfddbSVitaly Kuznetsov 		goto out_err;
1201095cf55dSPaolo Bonzini 
1202095cf55dSPaolo Bonzini 	if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1203cc9cfddbSVitaly Kuznetsov 		goto out_err;
1204095cf55dSPaolo Bonzini 
1205095cf55dSPaolo Bonzini 	/* Ensure sequence is zero before writing the rest of the struct.  */
1206095cf55dSPaolo Bonzini 	smp_wmb();
1207095cf55dSPaolo Bonzini 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1208cc9cfddbSVitaly Kuznetsov 		goto out_err;
1209095cf55dSPaolo Bonzini 
1210095cf55dSPaolo Bonzini 	/*
1211095cf55dSPaolo Bonzini 	 * Now switch to the TSC page mechanism by writing the sequence.
1212095cf55dSPaolo Bonzini 	 */
1213095cf55dSPaolo Bonzini 	tsc_seq++;
1214095cf55dSPaolo Bonzini 	if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1215095cf55dSPaolo Bonzini 		tsc_seq = 1;
1216095cf55dSPaolo Bonzini 
1217095cf55dSPaolo Bonzini 	/* Write the struct entirely before the non-zero sequence.  */
1218095cf55dSPaolo Bonzini 	smp_wmb();
1219095cf55dSPaolo Bonzini 
1220095cf55dSPaolo Bonzini 	hv->tsc_ref.tsc_sequence = tsc_seq;
1221cc9cfddbSVitaly Kuznetsov 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1222cc9cfddbSVitaly Kuznetsov 			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1223cc9cfddbSVitaly Kuznetsov 		goto out_err;
1224cc9cfddbSVitaly Kuznetsov 
1225cc9cfddbSVitaly Kuznetsov 	hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1226cc9cfddbSVitaly Kuznetsov 	goto out_unlock;
1227cc9cfddbSVitaly Kuznetsov 
1228cc9cfddbSVitaly Kuznetsov out_err:
1229cc9cfddbSVitaly Kuznetsov 	hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
12303f5ad8beSPaolo Bonzini out_unlock:
123105f04ae4SVitaly Kuznetsov 	mutex_unlock(&hv->hv_lock);
1232095cf55dSPaolo Bonzini }
1233095cf55dSPaolo Bonzini 
123442dcbe7dSVitaly Kuznetsov void kvm_hv_request_tsc_page_update(struct kvm *kvm)
1235e880c6eaSVitaly Kuznetsov {
1236e880c6eaSVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
1237e880c6eaSVitaly Kuznetsov 
1238e880c6eaSVitaly Kuznetsov 	mutex_lock(&hv->hv_lock);
1239e880c6eaSVitaly Kuznetsov 
124042dcbe7dSVitaly Kuznetsov 	if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
124142dcbe7dSVitaly Kuznetsov 	    !tsc_page_update_unsafe(hv))
124242dcbe7dSVitaly Kuznetsov 		hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1243e880c6eaSVitaly Kuznetsov 
1244e880c6eaSVitaly Kuznetsov 	mutex_unlock(&hv->hv_lock);
1245e880c6eaSVitaly Kuznetsov }
1246e880c6eaSVitaly Kuznetsov 
1247b4128000SVitaly Kuznetsov static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1248b4128000SVitaly Kuznetsov {
12491561c2cbSVitaly Kuznetsov 	if (!hv_vcpu->enforce_cpuid)
12501561c2cbSVitaly Kuznetsov 		return true;
12511561c2cbSVitaly Kuznetsov 
12521561c2cbSVitaly Kuznetsov 	switch (msr) {
12531561c2cbSVitaly Kuznetsov 	case HV_X64_MSR_GUEST_OS_ID:
12541561c2cbSVitaly Kuznetsov 	case HV_X64_MSR_HYPERCALL:
12551561c2cbSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
12561561c2cbSVitaly Kuznetsov 			HV_MSR_HYPERCALL_AVAILABLE;
1257b80a92ffSVitaly Kuznetsov 	case HV_X64_MSR_VP_RUNTIME:
1258b80a92ffSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
1259b80a92ffSVitaly Kuznetsov 			HV_MSR_VP_RUNTIME_AVAILABLE;
1260c2b32867SVitaly Kuznetsov 	case HV_X64_MSR_TIME_REF_COUNT:
1261c2b32867SVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
1262c2b32867SVitaly Kuznetsov 			HV_MSR_TIME_REF_COUNT_AVAILABLE;
1263d2ac25d4SVitaly Kuznetsov 	case HV_X64_MSR_VP_INDEX:
1264d2ac25d4SVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
1265d2ac25d4SVitaly Kuznetsov 			HV_MSR_VP_INDEX_AVAILABLE;
1266679008e4SVitaly Kuznetsov 	case HV_X64_MSR_RESET:
1267679008e4SVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
1268679008e4SVitaly Kuznetsov 			HV_MSR_RESET_AVAILABLE;
1269a1ec661cSVitaly Kuznetsov 	case HV_X64_MSR_REFERENCE_TSC:
1270a1ec661cSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
1271a1ec661cSVitaly Kuznetsov 			HV_MSR_REFERENCE_TSC_AVAILABLE;
12729e2715caSVitaly Kuznetsov 	case HV_X64_MSR_SCONTROL:
12739e2715caSVitaly Kuznetsov 	case HV_X64_MSR_SVERSION:
12749e2715caSVitaly Kuznetsov 	case HV_X64_MSR_SIEFP:
12759e2715caSVitaly Kuznetsov 	case HV_X64_MSR_SIMP:
12769e2715caSVitaly Kuznetsov 	case HV_X64_MSR_EOM:
12779e2715caSVitaly Kuznetsov 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
12789e2715caSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
12799e2715caSVitaly Kuznetsov 			HV_MSR_SYNIC_AVAILABLE;
1280eba60ddaSVitaly Kuznetsov 	case HV_X64_MSR_STIMER0_CONFIG:
1281eba60ddaSVitaly Kuznetsov 	case HV_X64_MSR_STIMER1_CONFIG:
1282eba60ddaSVitaly Kuznetsov 	case HV_X64_MSR_STIMER2_CONFIG:
1283eba60ddaSVitaly Kuznetsov 	case HV_X64_MSR_STIMER3_CONFIG:
1284eba60ddaSVitaly Kuznetsov 	case HV_X64_MSR_STIMER0_COUNT:
1285eba60ddaSVitaly Kuznetsov 	case HV_X64_MSR_STIMER1_COUNT:
1286eba60ddaSVitaly Kuznetsov 	case HV_X64_MSR_STIMER2_COUNT:
1287eba60ddaSVitaly Kuznetsov 	case HV_X64_MSR_STIMER3_COUNT:
1288eba60ddaSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
1289eba60ddaSVitaly Kuznetsov 			HV_MSR_SYNTIMER_AVAILABLE;
1290978b5747SVitaly Kuznetsov 	case HV_X64_MSR_EOI:
1291978b5747SVitaly Kuznetsov 	case HV_X64_MSR_ICR:
1292978b5747SVitaly Kuznetsov 	case HV_X64_MSR_TPR:
1293978b5747SVitaly Kuznetsov 	case HV_X64_MSR_VP_ASSIST_PAGE:
1294978b5747SVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
1295978b5747SVitaly Kuznetsov 			HV_MSR_APIC_ACCESS_AVAILABLE;
1296978b5747SVitaly Kuznetsov 		break;
12979442f3bdSVitaly Kuznetsov 	case HV_X64_MSR_TSC_FREQUENCY:
12989442f3bdSVitaly Kuznetsov 	case HV_X64_MSR_APIC_FREQUENCY:
12999442f3bdSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
13009442f3bdSVitaly Kuznetsov 			HV_ACCESS_FREQUENCY_MSRS;
1301234d01baSVitaly Kuznetsov 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1302234d01baSVitaly Kuznetsov 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
1303234d01baSVitaly Kuznetsov 	case HV_X64_MSR_TSC_EMULATION_STATUS:
1304234d01baSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
1305234d01baSVitaly Kuznetsov 			HV_ACCESS_REENLIGHTENMENT;
13062be1bd3aSVitaly Kuznetsov 	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
13072be1bd3aSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_eax &
13082be1bd3aSVitaly Kuznetsov 			HV_ACCESS_TSC_INVARIANT;
13090a19c899SVitaly Kuznetsov 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
13100a19c899SVitaly Kuznetsov 	case HV_X64_MSR_CRASH_CTL:
13110a19c899SVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_edx &
13120a19c899SVitaly Kuznetsov 			HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
131317b6d517SVitaly Kuznetsov 	case HV_X64_MSR_SYNDBG_OPTIONS:
131417b6d517SVitaly Kuznetsov 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
131517b6d517SVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_edx &
131617b6d517SVitaly Kuznetsov 			HV_FEATURE_DEBUG_MSRS_AVAILABLE;
13171561c2cbSVitaly Kuznetsov 	default:
13181561c2cbSVitaly Kuznetsov 		break;
13191561c2cbSVitaly Kuznetsov 	}
13201561c2cbSVitaly Kuznetsov 
1321d66bfa36SVitaly Kuznetsov 	return false;
1322b4128000SVitaly Kuznetsov }
1323b4128000SVitaly Kuznetsov 
1324e7d9513bSAndrey Smetanin static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1325e7d9513bSAndrey Smetanin 			     bool host)
1326e83d5887SAndrey Smetanin {
1327e83d5887SAndrey Smetanin 	struct kvm *kvm = vcpu->kvm;
132805f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
1329e83d5887SAndrey Smetanin 
1330b4128000SVitaly Kuznetsov 	if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1331b4128000SVitaly Kuznetsov 		return 1;
1332b4128000SVitaly Kuznetsov 
1333e83d5887SAndrey Smetanin 	switch (msr) {
1334e83d5887SAndrey Smetanin 	case HV_X64_MSR_GUEST_OS_ID:
1335e83d5887SAndrey Smetanin 		hv->hv_guest_os_id = data;
1336e83d5887SAndrey Smetanin 		/* setting guest os id to zero disables hypercall page */
1337e83d5887SAndrey Smetanin 		if (!hv->hv_guest_os_id)
1338e83d5887SAndrey Smetanin 			hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1339e83d5887SAndrey Smetanin 		break;
1340e83d5887SAndrey Smetanin 	case HV_X64_MSR_HYPERCALL: {
134179033bebSJoao Martins 		u8 instructions[9];
134279033bebSJoao Martins 		int i = 0;
134379033bebSJoao Martins 		u64 addr;
1344e83d5887SAndrey Smetanin 
1345e83d5887SAndrey Smetanin 		/* if guest os id is not set hypercall should remain disabled */
1346e83d5887SAndrey Smetanin 		if (!hv->hv_guest_os_id)
1347e83d5887SAndrey Smetanin 			break;
1348e83d5887SAndrey Smetanin 		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1349e83d5887SAndrey Smetanin 			hv->hv_hypercall = data;
1350e83d5887SAndrey Smetanin 			break;
1351e83d5887SAndrey Smetanin 		}
135279033bebSJoao Martins 
135379033bebSJoao Martins 		/*
135479033bebSJoao Martins 		 * If Xen and Hyper-V hypercalls are both enabled, disambiguate
135579033bebSJoao Martins 		 * the same way Xen itself does, by setting the bit 31 of EAX
135679033bebSJoao Martins 		 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
135779033bebSJoao Martins 		 * going to be clobbered on 64-bit.
135879033bebSJoao Martins 		 */
135979033bebSJoao Martins 		if (kvm_xen_hypercall_enabled(kvm)) {
136079033bebSJoao Martins 			/* orl $0x80000000, %eax */
136179033bebSJoao Martins 			instructions[i++] = 0x0d;
136279033bebSJoao Martins 			instructions[i++] = 0x00;
136379033bebSJoao Martins 			instructions[i++] = 0x00;
136479033bebSJoao Martins 			instructions[i++] = 0x00;
136579033bebSJoao Martins 			instructions[i++] = 0x80;
136679033bebSJoao Martins 		}
136779033bebSJoao Martins 
136879033bebSJoao Martins 		/* vmcall/vmmcall */
136979033bebSJoao Martins 		static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
137079033bebSJoao Martins 		i += 3;
137179033bebSJoao Martins 
137279033bebSJoao Martins 		/* ret */
137379033bebSJoao Martins 		((unsigned char *)instructions)[i++] = 0xc3;
137479033bebSJoao Martins 
137579033bebSJoao Martins 		addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
137679033bebSJoao Martins 		if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1377e83d5887SAndrey Smetanin 			return 1;
1378e83d5887SAndrey Smetanin 		hv->hv_hypercall = data;
1379e83d5887SAndrey Smetanin 		break;
1380e83d5887SAndrey Smetanin 	}
1381095cf55dSPaolo Bonzini 	case HV_X64_MSR_REFERENCE_TSC:
1382e83d5887SAndrey Smetanin 		hv->hv_tsc_page = data;
1383cc9cfddbSVitaly Kuznetsov 		if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1384cc9cfddbSVitaly Kuznetsov 			if (!host)
1385cc9cfddbSVitaly Kuznetsov 				hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1386cc9cfddbSVitaly Kuznetsov 			else
1387cc9cfddbSVitaly Kuznetsov 				hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1388095cf55dSPaolo Bonzini 			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1389cc9cfddbSVitaly Kuznetsov 		} else {
1390cc9cfddbSVitaly Kuznetsov 			hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1391cc9cfddbSVitaly Kuznetsov 		}
1392e83d5887SAndrey Smetanin 		break;
1393e7d9513bSAndrey Smetanin 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
139405f04ae4SVitaly Kuznetsov 		return kvm_hv_msr_set_crash_data(kvm,
1395e7d9513bSAndrey Smetanin 						 msr - HV_X64_MSR_CRASH_P0,
1396e7d9513bSAndrey Smetanin 						 data);
1397e7d9513bSAndrey Smetanin 	case HV_X64_MSR_CRASH_CTL:
139805f04ae4SVitaly Kuznetsov 		if (host)
139905f04ae4SVitaly Kuznetsov 			return kvm_hv_msr_set_crash_ctl(kvm, data);
140005f04ae4SVitaly Kuznetsov 
140105f04ae4SVitaly Kuznetsov 		if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
140205f04ae4SVitaly Kuznetsov 			vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
140305f04ae4SVitaly Kuznetsov 				   hv->hv_crash_param[0],
140405f04ae4SVitaly Kuznetsov 				   hv->hv_crash_param[1],
140505f04ae4SVitaly Kuznetsov 				   hv->hv_crash_param[2],
140605f04ae4SVitaly Kuznetsov 				   hv->hv_crash_param[3],
140705f04ae4SVitaly Kuznetsov 				   hv->hv_crash_param[4]);
140805f04ae4SVitaly Kuznetsov 
140905f04ae4SVitaly Kuznetsov 			/* Send notification about crash to user space */
141005f04ae4SVitaly Kuznetsov 			kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
141105f04ae4SVitaly Kuznetsov 		}
141205f04ae4SVitaly Kuznetsov 		break;
1413e516cebbSAndrey Smetanin 	case HV_X64_MSR_RESET:
1414e516cebbSAndrey Smetanin 		if (data == 1) {
1415e516cebbSAndrey Smetanin 			vcpu_debug(vcpu, "hyper-v reset requested\n");
1416e516cebbSAndrey Smetanin 			kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1417e516cebbSAndrey Smetanin 		}
1418e516cebbSAndrey Smetanin 		break;
1419a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1420a2e164e7SVitaly Kuznetsov 		hv->hv_reenlightenment_control = data;
1421a2e164e7SVitaly Kuznetsov 		break;
1422a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
1423a2e164e7SVitaly Kuznetsov 		hv->hv_tsc_emulation_control = data;
1424a2e164e7SVitaly Kuznetsov 		break;
1425a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_TSC_EMULATION_STATUS:
1426d2547cf5SVitaly Kuznetsov 		if (data && !host)
1427d2547cf5SVitaly Kuznetsov 			return 1;
1428d2547cf5SVitaly Kuznetsov 
1429a2e164e7SVitaly Kuznetsov 		hv->hv_tsc_emulation_status = data;
1430a2e164e7SVitaly Kuznetsov 		break;
143144883f01SPaolo Bonzini 	case HV_X64_MSR_TIME_REF_COUNT:
143244883f01SPaolo Bonzini 		/* read-only, but still ignore it if host-initiated */
143344883f01SPaolo Bonzini 		if (!host)
143444883f01SPaolo Bonzini 			return 1;
143544883f01SPaolo Bonzini 		break;
14362be1bd3aSVitaly Kuznetsov 	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
14372be1bd3aSVitaly Kuznetsov 		/* Only bit 0 is supported */
14382be1bd3aSVitaly Kuznetsov 		if (data & ~HV_EXPOSE_INVARIANT_TSC)
14392be1bd3aSVitaly Kuznetsov 			return 1;
14402be1bd3aSVitaly Kuznetsov 
14412be1bd3aSVitaly Kuznetsov 		/* The feature can't be disabled from the guest */
14422be1bd3aSVitaly Kuznetsov 		if (!host && hv->hv_invtsc_control && !data)
14432be1bd3aSVitaly Kuznetsov 			return 1;
14442be1bd3aSVitaly Kuznetsov 
14452be1bd3aSVitaly Kuznetsov 		hv->hv_invtsc_control = data;
14462be1bd3aSVitaly Kuznetsov 		break;
1447f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_OPTIONS:
1448f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1449f97f5a56SJon Doron 		return syndbg_set_msr(vcpu, msr, data, host);
1450e83d5887SAndrey Smetanin 	default:
14512f9f5cddSMiaohe Lin 		vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1452e83d5887SAndrey Smetanin 			    msr, data);
1453e83d5887SAndrey Smetanin 		return 1;
1454e83d5887SAndrey Smetanin 	}
1455e83d5887SAndrey Smetanin 	return 0;
1456e83d5887SAndrey Smetanin }
1457e83d5887SAndrey Smetanin 
14589eec50b8SAndrey Smetanin /* Calculate cpu time spent by current task in 100ns units */
14599eec50b8SAndrey Smetanin static u64 current_task_runtime_100ns(void)
14609eec50b8SAndrey Smetanin {
14615613fda9SFrederic Weisbecker 	u64 utime, stime;
14629eec50b8SAndrey Smetanin 
14639eec50b8SAndrey Smetanin 	task_cputime_adjusted(current, &utime, &stime);
14645613fda9SFrederic Weisbecker 
14655613fda9SFrederic Weisbecker 	return div_u64(utime + stime, 100);
14669eec50b8SAndrey Smetanin }
14679eec50b8SAndrey Smetanin 
14689eec50b8SAndrey Smetanin static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1469e83d5887SAndrey Smetanin {
14709ff5e030SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1471e83d5887SAndrey Smetanin 
1472b4128000SVitaly Kuznetsov 	if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1473b4128000SVitaly Kuznetsov 		return 1;
1474b4128000SVitaly Kuznetsov 
1475e83d5887SAndrey Smetanin 	switch (msr) {
147687ee613dSVitaly Kuznetsov 	case HV_X64_MSR_VP_INDEX: {
147705f04ae4SVitaly Kuznetsov 		struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
147887ee613dSVitaly Kuznetsov 		u32 new_vp_index = (u32)data;
147987ee613dSVitaly Kuznetsov 
148087ee613dSVitaly Kuznetsov 		if (!host || new_vp_index >= KVM_MAX_VCPUS)
1481d3457c87SRoman Kagan 			return 1;
148287ee613dSVitaly Kuznetsov 
148387ee613dSVitaly Kuznetsov 		if (new_vp_index == hv_vcpu->vp_index)
148487ee613dSVitaly Kuznetsov 			return 0;
148587ee613dSVitaly Kuznetsov 
148687ee613dSVitaly Kuznetsov 		/*
148787ee613dSVitaly Kuznetsov 		 * The VP index is initialized to vcpu_index by
148887ee613dSVitaly Kuznetsov 		 * kvm_hv_vcpu_postcreate so they initially match.  Now the
148987ee613dSVitaly Kuznetsov 		 * VP index is changing, adjust num_mismatched_vp_indexes if
149087ee613dSVitaly Kuznetsov 		 * it now matches or no longer matches vcpu_idx.
149187ee613dSVitaly Kuznetsov 		 */
14924eeef242SSean Christopherson 		if (hv_vcpu->vp_index == vcpu->vcpu_idx)
149387ee613dSVitaly Kuznetsov 			atomic_inc(&hv->num_mismatched_vp_indexes);
14944eeef242SSean Christopherson 		else if (new_vp_index == vcpu->vcpu_idx)
149587ee613dSVitaly Kuznetsov 			atomic_dec(&hv->num_mismatched_vp_indexes);
149687ee613dSVitaly Kuznetsov 
149787ee613dSVitaly Kuznetsov 		hv_vcpu->vp_index = new_vp_index;
1498d3457c87SRoman Kagan 		break;
149987ee613dSVitaly Kuznetsov 	}
1500d4abc577SLadi Prosek 	case HV_X64_MSR_VP_ASSIST_PAGE: {
1501e83d5887SAndrey Smetanin 		u64 gfn;
1502e83d5887SAndrey Smetanin 		unsigned long addr;
1503e83d5887SAndrey Smetanin 
1504d4abc577SLadi Prosek 		if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
15051779a39fSVitaly Kuznetsov 			hv_vcpu->hv_vapic = data;
150677c3323fSVitaly Kuznetsov 			if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
1507e83d5887SAndrey Smetanin 				return 1;
1508e83d5887SAndrey Smetanin 			break;
1509e83d5887SAndrey Smetanin 		}
1510d4abc577SLadi Prosek 		gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1511e83d5887SAndrey Smetanin 		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1512e83d5887SAndrey Smetanin 		if (kvm_is_error_hva(addr))
1513e83d5887SAndrey Smetanin 			return 1;
151412e0c618SVitaly Kuznetsov 
151512e0c618SVitaly Kuznetsov 		/*
151667b0ae43SMiaohe Lin 		 * Clear apic_assist portion of struct hv_vp_assist_page
151712e0c618SVitaly Kuznetsov 		 * only, there can be valuable data in the rest which needs
151812e0c618SVitaly Kuznetsov 		 * to be preserved e.g. on migration.
151912e0c618SVitaly Kuznetsov 		 */
15209eb41c52SAl Viro 		if (__put_user(0, (u32 __user *)addr))
1521e83d5887SAndrey Smetanin 			return 1;
15221779a39fSVitaly Kuznetsov 		hv_vcpu->hv_vapic = data;
1523e83d5887SAndrey Smetanin 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
152477c3323fSVitaly Kuznetsov 		if (kvm_lapic_set_pv_eoi(vcpu,
152572bbf935SLadi Prosek 					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
152672bbf935SLadi Prosek 					    sizeof(struct hv_vp_assist_page)))
1527e83d5887SAndrey Smetanin 			return 1;
1528e83d5887SAndrey Smetanin 		break;
1529e83d5887SAndrey Smetanin 	}
1530e83d5887SAndrey Smetanin 	case HV_X64_MSR_EOI:
1531e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1532e83d5887SAndrey Smetanin 	case HV_X64_MSR_ICR:
1533e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1534e83d5887SAndrey Smetanin 	case HV_X64_MSR_TPR:
1535e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
15369eec50b8SAndrey Smetanin 	case HV_X64_MSR_VP_RUNTIME:
15379eec50b8SAndrey Smetanin 		if (!host)
15389eec50b8SAndrey Smetanin 			return 1;
15391779a39fSVitaly Kuznetsov 		hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
15409eec50b8SAndrey Smetanin 		break;
15415c919412SAndrey Smetanin 	case HV_X64_MSR_SCONTROL:
15425c919412SAndrey Smetanin 	case HV_X64_MSR_SVERSION:
15435c919412SAndrey Smetanin 	case HV_X64_MSR_SIEFP:
15445c919412SAndrey Smetanin 	case HV_X64_MSR_SIMP:
15455c919412SAndrey Smetanin 	case HV_X64_MSR_EOM:
15465c919412SAndrey Smetanin 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1547e0121fa2SVitaly Kuznetsov 		return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
15481f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER0_CONFIG:
15491f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER1_CONFIG:
15501f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER2_CONFIG:
15511f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER3_CONFIG: {
15521f4b34f8SAndrey Smetanin 		int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
15531f4b34f8SAndrey Smetanin 
1554aafa97fdSVitaly Kuznetsov 		return stimer_set_config(to_hv_stimer(vcpu, timer_index),
15551f4b34f8SAndrey Smetanin 					 data, host);
15561f4b34f8SAndrey Smetanin 	}
15571f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER0_COUNT:
15581f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER1_COUNT:
15591f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER2_COUNT:
15601f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER3_COUNT: {
15611f4b34f8SAndrey Smetanin 		int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
15621f4b34f8SAndrey Smetanin 
1563aafa97fdSVitaly Kuznetsov 		return stimer_set_count(to_hv_stimer(vcpu, timer_index),
15641f4b34f8SAndrey Smetanin 					data, host);
15651f4b34f8SAndrey Smetanin 	}
156644883f01SPaolo Bonzini 	case HV_X64_MSR_TSC_FREQUENCY:
156744883f01SPaolo Bonzini 	case HV_X64_MSR_APIC_FREQUENCY:
156844883f01SPaolo Bonzini 		/* read-only, but still ignore it if host-initiated */
156944883f01SPaolo Bonzini 		if (!host)
157044883f01SPaolo Bonzini 			return 1;
157144883f01SPaolo Bonzini 		break;
1572e83d5887SAndrey Smetanin 	default:
15732f9f5cddSMiaohe Lin 		vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1574e83d5887SAndrey Smetanin 			    msr, data);
1575e83d5887SAndrey Smetanin 		return 1;
1576e83d5887SAndrey Smetanin 	}
1577e83d5887SAndrey Smetanin 
1578e83d5887SAndrey Smetanin 	return 0;
1579e83d5887SAndrey Smetanin }
1580e83d5887SAndrey Smetanin 
1581f97f5a56SJon Doron static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1582f97f5a56SJon Doron 			     bool host)
1583e83d5887SAndrey Smetanin {
1584e83d5887SAndrey Smetanin 	u64 data = 0;
1585e83d5887SAndrey Smetanin 	struct kvm *kvm = vcpu->kvm;
158605f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
1587e83d5887SAndrey Smetanin 
1588b4128000SVitaly Kuznetsov 	if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1589b4128000SVitaly Kuznetsov 		return 1;
1590b4128000SVitaly Kuznetsov 
1591e83d5887SAndrey Smetanin 	switch (msr) {
1592e83d5887SAndrey Smetanin 	case HV_X64_MSR_GUEST_OS_ID:
1593e83d5887SAndrey Smetanin 		data = hv->hv_guest_os_id;
1594e83d5887SAndrey Smetanin 		break;
1595e83d5887SAndrey Smetanin 	case HV_X64_MSR_HYPERCALL:
1596e83d5887SAndrey Smetanin 		data = hv->hv_hypercall;
1597e83d5887SAndrey Smetanin 		break;
159893bf4172SAndrey Smetanin 	case HV_X64_MSR_TIME_REF_COUNT:
159993bf4172SAndrey Smetanin 		data = get_time_ref_counter(kvm);
1600e83d5887SAndrey Smetanin 		break;
1601e83d5887SAndrey Smetanin 	case HV_X64_MSR_REFERENCE_TSC:
1602e83d5887SAndrey Smetanin 		data = hv->hv_tsc_page;
1603e83d5887SAndrey Smetanin 		break;
1604e7d9513bSAndrey Smetanin 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
160505f04ae4SVitaly Kuznetsov 		return kvm_hv_msr_get_crash_data(kvm,
1606e7d9513bSAndrey Smetanin 						 msr - HV_X64_MSR_CRASH_P0,
1607e7d9513bSAndrey Smetanin 						 pdata);
1608e7d9513bSAndrey Smetanin 	case HV_X64_MSR_CRASH_CTL:
160905f04ae4SVitaly Kuznetsov 		return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1610e516cebbSAndrey Smetanin 	case HV_X64_MSR_RESET:
1611e516cebbSAndrey Smetanin 		data = 0;
1612e516cebbSAndrey Smetanin 		break;
1613a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1614a2e164e7SVitaly Kuznetsov 		data = hv->hv_reenlightenment_control;
1615a2e164e7SVitaly Kuznetsov 		break;
1616a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
1617a2e164e7SVitaly Kuznetsov 		data = hv->hv_tsc_emulation_control;
1618a2e164e7SVitaly Kuznetsov 		break;
1619a2e164e7SVitaly Kuznetsov 	case HV_X64_MSR_TSC_EMULATION_STATUS:
1620a2e164e7SVitaly Kuznetsov 		data = hv->hv_tsc_emulation_status;
1621a2e164e7SVitaly Kuznetsov 		break;
16222be1bd3aSVitaly Kuznetsov 	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
16232be1bd3aSVitaly Kuznetsov 		data = hv->hv_invtsc_control;
16242be1bd3aSVitaly Kuznetsov 		break;
1625f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_OPTIONS:
1626f97f5a56SJon Doron 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1627f97f5a56SJon Doron 		return syndbg_get_msr(vcpu, msr, pdata, host);
1628e83d5887SAndrey Smetanin 	default:
1629e83d5887SAndrey Smetanin 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1630e83d5887SAndrey Smetanin 		return 1;
1631e83d5887SAndrey Smetanin 	}
1632e83d5887SAndrey Smetanin 
1633e83d5887SAndrey Smetanin 	*pdata = data;
1634e83d5887SAndrey Smetanin 	return 0;
1635e83d5887SAndrey Smetanin }
1636e83d5887SAndrey Smetanin 
163744883f01SPaolo Bonzini static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
163844883f01SPaolo Bonzini 			  bool host)
1639e83d5887SAndrey Smetanin {
1640e83d5887SAndrey Smetanin 	u64 data = 0;
16419ff5e030SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1642e83d5887SAndrey Smetanin 
1643b4128000SVitaly Kuznetsov 	if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1644b4128000SVitaly Kuznetsov 		return 1;
1645b4128000SVitaly Kuznetsov 
1646e83d5887SAndrey Smetanin 	switch (msr) {
1647d3457c87SRoman Kagan 	case HV_X64_MSR_VP_INDEX:
16481779a39fSVitaly Kuznetsov 		data = hv_vcpu->vp_index;
1649e83d5887SAndrey Smetanin 		break;
1650e83d5887SAndrey Smetanin 	case HV_X64_MSR_EOI:
1651e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1652e83d5887SAndrey Smetanin 	case HV_X64_MSR_ICR:
1653e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1654e83d5887SAndrey Smetanin 	case HV_X64_MSR_TPR:
1655e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1656d4abc577SLadi Prosek 	case HV_X64_MSR_VP_ASSIST_PAGE:
16571779a39fSVitaly Kuznetsov 		data = hv_vcpu->hv_vapic;
1658e83d5887SAndrey Smetanin 		break;
16599eec50b8SAndrey Smetanin 	case HV_X64_MSR_VP_RUNTIME:
16601779a39fSVitaly Kuznetsov 		data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
16619eec50b8SAndrey Smetanin 		break;
16625c919412SAndrey Smetanin 	case HV_X64_MSR_SCONTROL:
16635c919412SAndrey Smetanin 	case HV_X64_MSR_SVERSION:
16645c919412SAndrey Smetanin 	case HV_X64_MSR_SIEFP:
16655c919412SAndrey Smetanin 	case HV_X64_MSR_SIMP:
16665c919412SAndrey Smetanin 	case HV_X64_MSR_EOM:
16675c919412SAndrey Smetanin 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1668e0121fa2SVitaly Kuznetsov 		return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
16691f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER0_CONFIG:
16701f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER1_CONFIG:
16711f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER2_CONFIG:
16721f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER3_CONFIG: {
16731f4b34f8SAndrey Smetanin 		int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
16741f4b34f8SAndrey Smetanin 
1675aafa97fdSVitaly Kuznetsov 		return stimer_get_config(to_hv_stimer(vcpu, timer_index),
16761f4b34f8SAndrey Smetanin 					 pdata);
16771f4b34f8SAndrey Smetanin 	}
16781f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER0_COUNT:
16791f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER1_COUNT:
16801f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER2_COUNT:
16811f4b34f8SAndrey Smetanin 	case HV_X64_MSR_STIMER3_COUNT: {
16821f4b34f8SAndrey Smetanin 		int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
16831f4b34f8SAndrey Smetanin 
1684aafa97fdSVitaly Kuznetsov 		return stimer_get_count(to_hv_stimer(vcpu, timer_index),
16851f4b34f8SAndrey Smetanin 					pdata);
16861f4b34f8SAndrey Smetanin 	}
168772c139baSLadi Prosek 	case HV_X64_MSR_TSC_FREQUENCY:
168872c139baSLadi Prosek 		data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
168972c139baSLadi Prosek 		break;
169072c139baSLadi Prosek 	case HV_X64_MSR_APIC_FREQUENCY:
169172c139baSLadi Prosek 		data = APIC_BUS_FREQUENCY;
169272c139baSLadi Prosek 		break;
1693e83d5887SAndrey Smetanin 	default:
1694e83d5887SAndrey Smetanin 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1695e83d5887SAndrey Smetanin 		return 1;
1696e83d5887SAndrey Smetanin 	}
1697e83d5887SAndrey Smetanin 	*pdata = data;
1698e83d5887SAndrey Smetanin 	return 0;
1699e83d5887SAndrey Smetanin }
1700e83d5887SAndrey Smetanin 
1701e7d9513bSAndrey Smetanin int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1702e83d5887SAndrey Smetanin {
170305f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
170405f04ae4SVitaly Kuznetsov 
17058f014550SVitaly Kuznetsov 	if (!host && !vcpu->arch.hyperv_enabled)
17068f014550SVitaly Kuznetsov 		return 1;
17078f014550SVitaly Kuznetsov 
1708fc08b628SVitaly Kuznetsov 	if (kvm_hv_vcpu_init(vcpu))
1709fc08b628SVitaly Kuznetsov 		return 1;
1710fc08b628SVitaly Kuznetsov 
1711e83d5887SAndrey Smetanin 	if (kvm_hv_msr_partition_wide(msr)) {
1712e83d5887SAndrey Smetanin 		int r;
1713e83d5887SAndrey Smetanin 
171405f04ae4SVitaly Kuznetsov 		mutex_lock(&hv->hv_lock);
1715e7d9513bSAndrey Smetanin 		r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
171605f04ae4SVitaly Kuznetsov 		mutex_unlock(&hv->hv_lock);
1717e83d5887SAndrey Smetanin 		return r;
1718e83d5887SAndrey Smetanin 	} else
17199eec50b8SAndrey Smetanin 		return kvm_hv_set_msr(vcpu, msr, data, host);
1720e83d5887SAndrey Smetanin }
1721e83d5887SAndrey Smetanin 
172244883f01SPaolo Bonzini int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1723e83d5887SAndrey Smetanin {
172405f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
172505f04ae4SVitaly Kuznetsov 
17268f014550SVitaly Kuznetsov 	if (!host && !vcpu->arch.hyperv_enabled)
17278f014550SVitaly Kuznetsov 		return 1;
17288f014550SVitaly Kuznetsov 
1729fc08b628SVitaly Kuznetsov 	if (kvm_hv_vcpu_init(vcpu))
1730fc08b628SVitaly Kuznetsov 		return 1;
1731fc08b628SVitaly Kuznetsov 
1732e83d5887SAndrey Smetanin 	if (kvm_hv_msr_partition_wide(msr)) {
1733e83d5887SAndrey Smetanin 		int r;
1734e83d5887SAndrey Smetanin 
173505f04ae4SVitaly Kuznetsov 		mutex_lock(&hv->hv_lock);
1736f97f5a56SJon Doron 		r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
173705f04ae4SVitaly Kuznetsov 		mutex_unlock(&hv->hv_lock);
1738e83d5887SAndrey Smetanin 		return r;
1739e83d5887SAndrey Smetanin 	} else
174044883f01SPaolo Bonzini 		return kvm_hv_get_msr(vcpu, msr, pdata, host);
1741e83d5887SAndrey Smetanin }
1742e83d5887SAndrey Smetanin 
17439c52f6b3SSean Christopherson static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
17449c52f6b3SSean Christopherson 				    u64 valid_bank_mask, unsigned long *vcpu_mask)
1745c7012676SVitaly Kuznetsov {
174605f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
17479c52f6b3SSean Christopherson 	bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
17489c52f6b3SSean Christopherson 	u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1749f21dd494SVitaly Kuznetsov 	struct kvm_vcpu *vcpu;
175046808a4cSMarc Zyngier 	int bank, sbank = 0;
175146808a4cSMarc Zyngier 	unsigned long i;
17529c52f6b3SSean Christopherson 	u64 *bitmap;
1753c7012676SVitaly Kuznetsov 
17549c52f6b3SSean Christopherson 	BUILD_BUG_ON(sizeof(vp_bitmap) >
17559c52f6b3SSean Christopherson 		     sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
17569c52f6b3SSean Christopherson 
17579c52f6b3SSean Christopherson 	/*
17589c52f6b3SSean Christopherson 	 * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
17599c52f6b3SSean Christopherson 	 * fill a temporary buffer and manually test each vCPU's VP index.
17609c52f6b3SSean Christopherson 	 */
17619c52f6b3SSean Christopherson 	if (likely(!has_mismatch))
17629c52f6b3SSean Christopherson 		bitmap = (u64 *)vcpu_mask;
17639c52f6b3SSean Christopherson 	else
17649c52f6b3SSean Christopherson 		bitmap = vp_bitmap;
17659c52f6b3SSean Christopherson 
17669c52f6b3SSean Christopherson 	/*
17679c52f6b3SSean Christopherson 	 * Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
17689c52f6b3SSean Christopherson 	 * having a '1' for each bank that exists in sparse_banks.  Sets must
17699c52f6b3SSean Christopherson 	 * be in ascending order, i.e. bank0..bankN.
17709c52f6b3SSean Christopherson 	 */
17719c52f6b3SSean Christopherson 	memset(bitmap, 0, sizeof(vp_bitmap));
1772f21dd494SVitaly Kuznetsov 	for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1773f21dd494SVitaly Kuznetsov 			 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
17749c52f6b3SSean Christopherson 		bitmap[bank] = sparse_banks[sbank++];
1775c7012676SVitaly Kuznetsov 
17769c52f6b3SSean Christopherson 	if (likely(!has_mismatch))
17779c52f6b3SSean Christopherson 		return;
1778c7012676SVitaly Kuznetsov 
17799c52f6b3SSean Christopherson 	bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
1780f21dd494SVitaly Kuznetsov 	kvm_for_each_vcpu(i, vcpu, kvm) {
1781f2bc14b6SVitaly Kuznetsov 		if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
17829c52f6b3SSean Christopherson 			__set_bit(i, vcpu_mask);
1783f21dd494SVitaly Kuznetsov 	}
1784c7012676SVitaly Kuznetsov }
1785c7012676SVitaly Kuznetsov 
1786b6c2c22fSVitaly Kuznetsov static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[])
1787b6c2c22fSVitaly Kuznetsov {
1788b6c2c22fSVitaly Kuznetsov 	int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK;
1789b6c2c22fSVitaly Kuznetsov 	unsigned long sbank;
1790b6c2c22fSVitaly Kuznetsov 
1791b6c2c22fSVitaly Kuznetsov 	if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask))
1792b6c2c22fSVitaly Kuznetsov 		return false;
1793b6c2c22fSVitaly Kuznetsov 
1794b6c2c22fSVitaly Kuznetsov 	/*
1795b6c2c22fSVitaly Kuznetsov 	 * The index into the sparse bank is the number of preceding bits in
1796b6c2c22fSVitaly Kuznetsov 	 * the valid mask.  Optimize for VMs with <64 vCPUs by skipping the
1797b6c2c22fSVitaly Kuznetsov 	 * fancy math if there can't possibly be preceding bits.
1798b6c2c22fSVitaly Kuznetsov 	 */
1799b6c2c22fSVitaly Kuznetsov 	if (valid_bit_nr)
1800b6c2c22fSVitaly Kuznetsov 		sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0));
1801b6c2c22fSVitaly Kuznetsov 	else
1802b6c2c22fSVitaly Kuznetsov 		sbank = 0;
1803b6c2c22fSVitaly Kuznetsov 
1804b6c2c22fSVitaly Kuznetsov 	return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK,
1805b6c2c22fSVitaly Kuznetsov 			(unsigned long *)&sparse_banks[sbank]);
1806b6c2c22fSVitaly Kuznetsov }
1807b6c2c22fSVitaly Kuznetsov 
1808bd38b320SSiddharth Chandrasekaran struct kvm_hv_hcall {
18098b9e13d2SVitaly Kuznetsov 	/* Hypercall input data */
1810bd38b320SSiddharth Chandrasekaran 	u64 param;
1811bd38b320SSiddharth Chandrasekaran 	u64 ingpa;
1812bd38b320SSiddharth Chandrasekaran 	u64 outgpa;
1813bd38b320SSiddharth Chandrasekaran 	u16 code;
1814bd1ba573SSean Christopherson 	u16 var_cnt;
1815bd38b320SSiddharth Chandrasekaran 	u16 rep_cnt;
1816bd38b320SSiddharth Chandrasekaran 	u16 rep_idx;
1817bd38b320SSiddharth Chandrasekaran 	bool fast;
1818bd38b320SSiddharth Chandrasekaran 	bool rep;
18195974565bSSiddharth Chandrasekaran 	sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
18208b9e13d2SVitaly Kuznetsov 
18218b9e13d2SVitaly Kuznetsov 	/*
18228b9e13d2SVitaly Kuznetsov 	 * Current read offset when KVM reads hypercall input data gradually,
18238b9e13d2SVitaly Kuznetsov 	 * either offset in bytes from 'ingpa' for regular hypercalls or the
18248b9e13d2SVitaly Kuznetsov 	 * number of already consumed 'XMM halves' for 'fast' hypercalls.
18258b9e13d2SVitaly Kuznetsov 	 */
18268b9e13d2SVitaly Kuznetsov 	union {
18278b9e13d2SVitaly Kuznetsov 		gpa_t data_offset;
18288b9e13d2SVitaly Kuznetsov 		int consumed_xmm_halves;
18298b9e13d2SVitaly Kuznetsov 	};
1830bd38b320SSiddharth Chandrasekaran };
1831bd38b320SSiddharth Chandrasekaran 
183256b5354fSSean Christopherson 
183356b5354fSSean Christopherson static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
18348b9e13d2SVitaly Kuznetsov 			      u16 orig_cnt, u16 cnt_cap, u64 *data)
1835a0dd008fSSean Christopherson {
183656b5354fSSean Christopherson 	/*
183756b5354fSSean Christopherson 	 * Preserve the original count when ignoring entries via a "cap", KVM
183856b5354fSSean Christopherson 	 * still needs to validate the guest input (though the non-XMM path
183956b5354fSSean Christopherson 	 * punts on the checks).
184056b5354fSSean Christopherson 	 */
184156b5354fSSean Christopherson 	u16 cnt = min(orig_cnt, cnt_cap);
184256b5354fSSean Christopherson 	int i, j;
184379661c37SSean Christopherson 
1844c0f1eaebSPaolo Bonzini 	if (hc->fast) {
1845c0f1eaebSPaolo Bonzini 		/*
1846c0f1eaebSPaolo Bonzini 		 * Each XMM holds two sparse banks, but do not count halves that
1847c0f1eaebSPaolo Bonzini 		 * have already been consumed for hypercall parameters.
1848c0f1eaebSPaolo Bonzini 		 */
18498b9e13d2SVitaly Kuznetsov 		if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
1850c0f1eaebSPaolo Bonzini 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
185156b5354fSSean Christopherson 
185256b5354fSSean Christopherson 		for (i = 0; i < cnt; i++) {
18538b9e13d2SVitaly Kuznetsov 			j = i + hc->consumed_xmm_halves;
1854c0f1eaebSPaolo Bonzini 			if (j % 2)
185556b5354fSSean Christopherson 				data[i] = sse128_hi(hc->xmm[j / 2]);
1856c0f1eaebSPaolo Bonzini 			else
185756b5354fSSean Christopherson 				data[i] = sse128_lo(hc->xmm[j / 2]);
1858c0f1eaebSPaolo Bonzini 		}
1859c0f1eaebSPaolo Bonzini 		return 0;
1860c0f1eaebSPaolo Bonzini 	}
1861c0f1eaebSPaolo Bonzini 
18628b9e13d2SVitaly Kuznetsov 	return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
186356b5354fSSean Christopherson 			      cnt * sizeof(*data));
186456b5354fSSean Christopherson }
186556b5354fSSean Christopherson 
186656b5354fSSean Christopherson static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
18678b9e13d2SVitaly Kuznetsov 				 u64 *sparse_banks)
186856b5354fSSean Christopherson {
1869ca7372acSVitaly Kuznetsov 	if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
187056b5354fSSean Christopherson 		return -EINVAL;
187156b5354fSSean Christopherson 
187256b5354fSSean Christopherson 	/* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
187356b5354fSSean Christopherson 	return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
18748b9e13d2SVitaly Kuznetsov 				  sparse_banks);
1875a0dd008fSSean Christopherson }
1876a0dd008fSSean Christopherson 
18778b9e13d2SVitaly Kuznetsov static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
187826097086SVitaly Kuznetsov {
18798b9e13d2SVitaly Kuznetsov 	return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
188026097086SVitaly Kuznetsov }
188126097086SVitaly Kuznetsov 
1882c58a318fSVitaly Kuznetsov static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
1883c58a318fSVitaly Kuznetsov 				 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
1884c58a318fSVitaly Kuznetsov 				 u64 *entries, int count)
18850823570fSVitaly Kuznetsov {
18860823570fSVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
18870823570fSVitaly Kuznetsov 	u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
18880823570fSVitaly Kuznetsov 
18890823570fSVitaly Kuznetsov 	if (!hv_vcpu)
18900823570fSVitaly Kuznetsov 		return;
18910823570fSVitaly Kuznetsov 
189226097086SVitaly Kuznetsov 	spin_lock(&tlb_flush_fifo->write_lock);
189326097086SVitaly Kuznetsov 
189426097086SVitaly Kuznetsov 	/*
189526097086SVitaly Kuznetsov 	 * All entries should fit on the fifo leaving one free for 'flush all'
189626097086SVitaly Kuznetsov 	 * entry in case another request comes in. In case there's not enough
189726097086SVitaly Kuznetsov 	 * space, just put 'flush all' entry there.
189826097086SVitaly Kuznetsov 	 */
189926097086SVitaly Kuznetsov 	if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) {
190026097086SVitaly Kuznetsov 		WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count);
190126097086SVitaly Kuznetsov 		goto out_unlock;
190226097086SVitaly Kuznetsov 	}
190326097086SVitaly Kuznetsov 
190426097086SVitaly Kuznetsov 	/*
190526097086SVitaly Kuznetsov 	 * Note: full fifo always contains 'flush all' entry, no need to check the
190626097086SVitaly Kuznetsov 	 * return value.
190726097086SVitaly Kuznetsov 	 */
190826097086SVitaly Kuznetsov 	kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1);
190926097086SVitaly Kuznetsov 
191026097086SVitaly Kuznetsov out_unlock:
191126097086SVitaly Kuznetsov 	spin_unlock(&tlb_flush_fifo->write_lock);
19120823570fSVitaly Kuznetsov }
19130823570fSVitaly Kuznetsov 
19140823570fSVitaly Kuznetsov int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
19150823570fSVitaly Kuznetsov {
19160823570fSVitaly Kuznetsov 	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
19170823570fSVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
191826097086SVitaly Kuznetsov 	u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE];
191926097086SVitaly Kuznetsov 	int i, j, count;
192026097086SVitaly Kuznetsov 	gva_t gva;
19210823570fSVitaly Kuznetsov 
192226097086SVitaly Kuznetsov 	if (!tdp_enabled || !hv_vcpu)
19230823570fSVitaly Kuznetsov 		return -EINVAL;
19240823570fSVitaly Kuznetsov 
192553ca765aSVitaly Kuznetsov 	tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
19260823570fSVitaly Kuznetsov 
192726097086SVitaly Kuznetsov 	count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
192826097086SVitaly Kuznetsov 
192926097086SVitaly Kuznetsov 	for (i = 0; i < count; i++) {
193026097086SVitaly Kuznetsov 		if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
193126097086SVitaly Kuznetsov 			goto out_flush_all;
193226097086SVitaly Kuznetsov 
193326097086SVitaly Kuznetsov 		/*
193426097086SVitaly Kuznetsov 		 * Lower 12 bits of 'address' encode the number of additional
193526097086SVitaly Kuznetsov 		 * pages to flush.
193626097086SVitaly Kuznetsov 		 */
193726097086SVitaly Kuznetsov 		gva = entries[i] & PAGE_MASK;
193826097086SVitaly Kuznetsov 		for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
193926097086SVitaly Kuznetsov 			static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
194026097086SVitaly Kuznetsov 
194126097086SVitaly Kuznetsov 		++vcpu->stat.tlb_flush;
194226097086SVitaly Kuznetsov 	}
194326097086SVitaly Kuznetsov 	return 0;
194426097086SVitaly Kuznetsov 
194526097086SVitaly Kuznetsov out_flush_all:
19460823570fSVitaly Kuznetsov 	kfifo_reset_out(&tlb_flush_fifo->entries);
19470823570fSVitaly Kuznetsov 
194826097086SVitaly Kuznetsov 	/* Fall back to full flush. */
194926097086SVitaly Kuznetsov 	return -ENOSPC;
19500823570fSVitaly Kuznetsov }
19510823570fSVitaly Kuznetsov 
195282c1ead0SVitaly Kuznetsov static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
1953e2f11f42SVitaly Kuznetsov {
19547d5e88d3SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
19557d5e88d3SVitaly Kuznetsov 	u64 *sparse_banks = hv_vcpu->sparse_banks;
195672167a9dSVitaly Kuznetsov 	struct kvm *kvm = vcpu->kvm;
1957c7012676SVitaly Kuznetsov 	struct hv_tlb_flush_ex flush_ex;
1958e2f11f42SVitaly Kuznetsov 	struct hv_tlb_flush flush;
19599c52f6b3SSean Christopherson 	DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1960c58a318fSVitaly Kuznetsov 	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
196126097086SVitaly Kuznetsov 	/*
196226097086SVitaly Kuznetsov 	 * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
196326097086SVitaly Kuznetsov 	 * entries on the TLB flush fifo. The last entry, however, needs to be
196426097086SVitaly Kuznetsov 	 * always left free for 'flush all' entry which gets placed when
196526097086SVitaly Kuznetsov 	 * there is not enough space to put all the requested entries.
196626097086SVitaly Kuznetsov 	 */
196726097086SVitaly Kuznetsov 	u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1];
196826097086SVitaly Kuznetsov 	u64 *tlb_flush_entries;
19692cefc5feSVitaly Kuznetsov 	u64 valid_bank_mask;
19700823570fSVitaly Kuznetsov 	struct kvm_vcpu *v;
19710823570fSVitaly Kuznetsov 	unsigned long i;
1972c7012676SVitaly Kuznetsov 	bool all_cpus;
1973e2f11f42SVitaly Kuznetsov 
197479661c37SSean Christopherson 	/*
1975ca7372acSVitaly Kuznetsov 	 * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
1976ca7372acSVitaly Kuznetsov 	 * sparse banks. Fail the build if KVM's max allowed number of
1977ca7372acSVitaly Kuznetsov 	 * vCPUs (>4096) exceeds this limit.
197879661c37SSean Christopherson 	 */
1979ca7372acSVitaly Kuznetsov 	BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS);
198079661c37SSean Christopherson 
1981aee73823SVitaly Kuznetsov 	/*
1982aee73823SVitaly Kuznetsov 	 * 'Slow' hypercall's first parameter is the address in guest's memory
1983aee73823SVitaly Kuznetsov 	 * where hypercall parameters are placed. This is either a GPA or a
1984aee73823SVitaly Kuznetsov 	 * nested GPA when KVM is handling the call from L2 ('direct' TLB
1985aee73823SVitaly Kuznetsov 	 * flush).  Translate the address here so the memory can be uniformly
1986aee73823SVitaly Kuznetsov 	 * read with kvm_read_guest().
1987aee73823SVitaly Kuznetsov 	 */
1988aee73823SVitaly Kuznetsov 	if (!hc->fast && is_guest_mode(vcpu)) {
1989aee73823SVitaly Kuznetsov 		hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
1990aee73823SVitaly Kuznetsov 		if (unlikely(hc->ingpa == INVALID_GPA))
1991aee73823SVitaly Kuznetsov 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
1992aee73823SVitaly Kuznetsov 	}
1993aee73823SVitaly Kuznetsov 
199482c1ead0SVitaly Kuznetsov 	if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
199582c1ead0SVitaly Kuznetsov 	    hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
19965974565bSSiddharth Chandrasekaran 		if (hc->fast) {
19975974565bSSiddharth Chandrasekaran 			flush.address_space = hc->ingpa;
19985974565bSSiddharth Chandrasekaran 			flush.flags = hc->outgpa;
19995974565bSSiddharth Chandrasekaran 			flush.processor_mask = sse128_lo(hc->xmm[0]);
20008b9e13d2SVitaly Kuznetsov 			hc->consumed_xmm_halves = 1;
20015974565bSSiddharth Chandrasekaran 		} else {
20025974565bSSiddharth Chandrasekaran 			if (unlikely(kvm_read_guest(kvm, hc->ingpa,
20035974565bSSiddharth Chandrasekaran 						    &flush, sizeof(flush))))
2004e2f11f42SVitaly Kuznetsov 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
20058b9e13d2SVitaly Kuznetsov 			hc->data_offset = sizeof(flush);
20065974565bSSiddharth Chandrasekaran 		}
2007e2f11f42SVitaly Kuznetsov 
2008c7012676SVitaly Kuznetsov 		trace_kvm_hv_flush_tlb(flush.processor_mask,
2009c58a318fSVitaly Kuznetsov 				       flush.address_space, flush.flags,
2010c58a318fSVitaly Kuznetsov 				       is_guest_mode(vcpu));
2011c7012676SVitaly Kuznetsov 
20122cefc5feSVitaly Kuznetsov 		valid_bank_mask = BIT_ULL(0);
2013c7012676SVitaly Kuznetsov 		sparse_banks[0] = flush.processor_mask;
2014da66761cSVitaly Kuznetsov 
2015da66761cSVitaly Kuznetsov 		/*
2016da66761cSVitaly Kuznetsov 		 * Work around possible WS2012 bug: it sends hypercalls
2017da66761cSVitaly Kuznetsov 		 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
2018da66761cSVitaly Kuznetsov 		 * while also expecting us to flush something and crashing if
2019da66761cSVitaly Kuznetsov 		 * we don't. Let's treat processor_mask == 0 same as
2020da66761cSVitaly Kuznetsov 		 * HV_FLUSH_ALL_PROCESSORS.
2021da66761cSVitaly Kuznetsov 		 */
2022da66761cSVitaly Kuznetsov 		all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
2023da66761cSVitaly Kuznetsov 			flush.processor_mask == 0;
2024c7012676SVitaly Kuznetsov 	} else {
20255974565bSSiddharth Chandrasekaran 		if (hc->fast) {
20265974565bSSiddharth Chandrasekaran 			flush_ex.address_space = hc->ingpa;
20275974565bSSiddharth Chandrasekaran 			flush_ex.flags = hc->outgpa;
20285974565bSSiddharth Chandrasekaran 			memcpy(&flush_ex.hv_vp_set,
20295974565bSSiddharth Chandrasekaran 			       &hc->xmm[0], sizeof(hc->xmm[0]));
20308b9e13d2SVitaly Kuznetsov 			hc->consumed_xmm_halves = 2;
20315974565bSSiddharth Chandrasekaran 		} else {
2032bd38b320SSiddharth Chandrasekaran 			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
2033c7012676SVitaly Kuznetsov 						    sizeof(flush_ex))))
2034c7012676SVitaly Kuznetsov 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
20358b9e13d2SVitaly Kuznetsov 			hc->data_offset = sizeof(flush_ex);
20365974565bSSiddharth Chandrasekaran 		}
2037c7012676SVitaly Kuznetsov 
2038c7012676SVitaly Kuznetsov 		trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
2039c7012676SVitaly Kuznetsov 					  flush_ex.hv_vp_set.format,
2040c7012676SVitaly Kuznetsov 					  flush_ex.address_space,
2041c58a318fSVitaly Kuznetsov 					  flush_ex.flags, is_guest_mode(vcpu));
2042c7012676SVitaly Kuznetsov 
2043c7012676SVitaly Kuznetsov 		valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
2044c7012676SVitaly Kuznetsov 		all_cpus = flush_ex.hv_vp_set.format !=
2045c7012676SVitaly Kuznetsov 			HV_GENERIC_SET_SPARSE_4K;
2046c7012676SVitaly Kuznetsov 
2047d603fd8dSYury Norov 		if (hc->var_cnt != hweight64(valid_bank_mask))
2048bd1ba573SSean Christopherson 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
2049c7012676SVitaly Kuznetsov 
205026097086SVitaly Kuznetsov 		if (!all_cpus) {
205125af9081SSean Christopherson 			if (!hc->var_cnt)
2052c7012676SVitaly Kuznetsov 				goto ret_success;
2053c7012676SVitaly Kuznetsov 
20548b9e13d2SVitaly Kuznetsov 			if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
20555974565bSSiddharth Chandrasekaran 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
20565974565bSSiddharth Chandrasekaran 		}
2057e2f11f42SVitaly Kuznetsov 
205826097086SVitaly Kuznetsov 		/*
205926097086SVitaly Kuznetsov 		 * Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU
206026097086SVitaly Kuznetsov 		 * banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs'
206126097086SVitaly Kuznetsov 		 * case (HV_GENERIC_SET_ALL).  Always adjust data_offset and
206226097086SVitaly Kuznetsov 		 * consumed_xmm_halves to make sure TLB flush entries are read
206326097086SVitaly Kuznetsov 		 * from the correct offset.
206426097086SVitaly Kuznetsov 		 */
20658b9e13d2SVitaly Kuznetsov 		if (hc->fast)
20668b9e13d2SVitaly Kuznetsov 			hc->consumed_xmm_halves += hc->var_cnt;
20678b9e13d2SVitaly Kuznetsov 		else
20688b9e13d2SVitaly Kuznetsov 			hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
206926097086SVitaly Kuznetsov 	}
207026097086SVitaly Kuznetsov 
207126097086SVitaly Kuznetsov 	if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
207226097086SVitaly Kuznetsov 	    hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
207326097086SVitaly Kuznetsov 	    hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
207426097086SVitaly Kuznetsov 		tlb_flush_entries = NULL;
207526097086SVitaly Kuznetsov 	} else {
20768b9e13d2SVitaly Kuznetsov 		if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
207726097086SVitaly Kuznetsov 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
207826097086SVitaly Kuznetsov 		tlb_flush_entries = __tlb_flush_entries;
207926097086SVitaly Kuznetsov 	}
208026097086SVitaly Kuznetsov 
20812cefc5feSVitaly Kuznetsov 	/*
20822cefc5feSVitaly Kuznetsov 	 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
20832cefc5feSVitaly Kuznetsov 	 * analyze it here, flush TLB regardless of the specified address space.
20842cefc5feSVitaly Kuznetsov 	 */
2085c58a318fSVitaly Kuznetsov 	if (all_cpus && !is_guest_mode(vcpu)) {
2086c58a318fSVitaly Kuznetsov 		kvm_for_each_vcpu(i, v, kvm) {
2087c58a318fSVitaly Kuznetsov 			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2088c58a318fSVitaly Kuznetsov 			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2089c58a318fSVitaly Kuznetsov 					     tlb_flush_entries, hc->rep_cnt);
2090c58a318fSVitaly Kuznetsov 		}
20910823570fSVitaly Kuznetsov 
2092adc43caaSVitaly Kuznetsov 		kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
2093c58a318fSVitaly Kuznetsov 	} else if (!is_guest_mode(vcpu)) {
20949c52f6b3SSean Christopherson 		sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
20956470acccSVitaly Kuznetsov 
20960823570fSVitaly Kuznetsov 		for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
20970823570fSVitaly Kuznetsov 			v = kvm_get_vcpu(kvm, i);
20980823570fSVitaly Kuznetsov 			if (!v)
20990823570fSVitaly Kuznetsov 				continue;
2100c58a318fSVitaly Kuznetsov 			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2101c58a318fSVitaly Kuznetsov 			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2102c58a318fSVitaly Kuznetsov 					     tlb_flush_entries, hc->rep_cnt);
2103c58a318fSVitaly Kuznetsov 		}
2104c58a318fSVitaly Kuznetsov 
2105c58a318fSVitaly Kuznetsov 		kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2106c58a318fSVitaly Kuznetsov 	} else {
2107c58a318fSVitaly Kuznetsov 		struct kvm_vcpu_hv *hv_v;
2108c58a318fSVitaly Kuznetsov 
2109c58a318fSVitaly Kuznetsov 		bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
2110c58a318fSVitaly Kuznetsov 
2111c58a318fSVitaly Kuznetsov 		kvm_for_each_vcpu(i, v, kvm) {
2112c58a318fSVitaly Kuznetsov 			hv_v = to_hv_vcpu(v);
2113c58a318fSVitaly Kuznetsov 
2114c58a318fSVitaly Kuznetsov 			/*
2115c58a318fSVitaly Kuznetsov 			 * The following check races with nested vCPUs entering/exiting
2116c58a318fSVitaly Kuznetsov 			 * and/or migrating between L1's vCPUs, however the only case when
2117c58a318fSVitaly Kuznetsov 			 * KVM *must* flush the TLB is when the target L2 vCPU keeps
2118c58a318fSVitaly Kuznetsov 			 * running on the same L1 vCPU from the moment of the request until
2119c58a318fSVitaly Kuznetsov 			 * kvm_hv_flush_tlb() returns. TLB is fully flushed in all other
2120c58a318fSVitaly Kuznetsov 			 * cases, e.g. when the target L2 vCPU migrates to a different L1
2121c58a318fSVitaly Kuznetsov 			 * vCPU or when the corresponding L1 vCPU temporary switches to a
2122c58a318fSVitaly Kuznetsov 			 * different L2 vCPU while the request is being processed.
2123c58a318fSVitaly Kuznetsov 			 */
2124c58a318fSVitaly Kuznetsov 			if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
2125c58a318fSVitaly Kuznetsov 				continue;
2126c58a318fSVitaly Kuznetsov 
2127c58a318fSVitaly Kuznetsov 			if (!all_cpus &&
2128c58a318fSVitaly Kuznetsov 			    !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask,
2129c58a318fSVitaly Kuznetsov 						    sparse_banks))
2130c58a318fSVitaly Kuznetsov 				continue;
2131c58a318fSVitaly Kuznetsov 
2132c58a318fSVitaly Kuznetsov 			__set_bit(i, vcpu_mask);
2133c58a318fSVitaly Kuznetsov 			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true);
2134c58a318fSVitaly Kuznetsov 			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2135c58a318fSVitaly Kuznetsov 					     tlb_flush_entries, hc->rep_cnt);
21360823570fSVitaly Kuznetsov 		}
21370823570fSVitaly Kuznetsov 
2138adc43caaSVitaly Kuznetsov 		kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
21396470acccSVitaly Kuznetsov 	}
2140e2f11f42SVitaly Kuznetsov 
2141c7012676SVitaly Kuznetsov ret_success:
2142bd38b320SSiddharth Chandrasekaran 	/* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
2143e2f11f42SVitaly Kuznetsov 	return (u64)HV_STATUS_SUCCESS |
2144bd38b320SSiddharth Chandrasekaran 		((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
2145e2f11f42SVitaly Kuznetsov }
2146e2f11f42SVitaly Kuznetsov 
2147b6c2c22fSVitaly Kuznetsov static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
2148b6c2c22fSVitaly Kuznetsov 				    u64 *sparse_banks, u64 valid_bank_mask)
2149f21dd494SVitaly Kuznetsov {
2150f21dd494SVitaly Kuznetsov 	struct kvm_lapic_irq irq = {
2151f21dd494SVitaly Kuznetsov 		.delivery_mode = APIC_DM_FIXED,
2152f21dd494SVitaly Kuznetsov 		.vector = vector
2153f21dd494SVitaly Kuznetsov 	};
2154f21dd494SVitaly Kuznetsov 	struct kvm_vcpu *vcpu;
215546808a4cSMarc Zyngier 	unsigned long i;
2156f21dd494SVitaly Kuznetsov 
2157f21dd494SVitaly Kuznetsov 	kvm_for_each_vcpu(i, vcpu, kvm) {
2158b6c2c22fSVitaly Kuznetsov 		if (sparse_banks &&
2159b6c2c22fSVitaly Kuznetsov 		    !hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu),
2160b6c2c22fSVitaly Kuznetsov 					    valid_bank_mask, sparse_banks))
2161f21dd494SVitaly Kuznetsov 			continue;
2162f21dd494SVitaly Kuznetsov 
2163f21dd494SVitaly Kuznetsov 		/* We fail only when APIC is disabled */
2164f21dd494SVitaly Kuznetsov 		kvm_apic_set_irq(vcpu, &irq, NULL);
2165f21dd494SVitaly Kuznetsov 	}
2166f21dd494SVitaly Kuznetsov }
2167f21dd494SVitaly Kuznetsov 
216850e523ddSVitaly Kuznetsov static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2169214ff83dSVitaly Kuznetsov {
21707d5e88d3SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
21717d5e88d3SVitaly Kuznetsov 	u64 *sparse_banks = hv_vcpu->sparse_banks;
217272167a9dSVitaly Kuznetsov 	struct kvm *kvm = vcpu->kvm;
2173214ff83dSVitaly Kuznetsov 	struct hv_send_ipi_ex send_ipi_ex;
2174214ff83dSVitaly Kuznetsov 	struct hv_send_ipi send_ipi;
2175ea8c66feSYury Norov 	u64 valid_bank_mask;
2176f21dd494SVitaly Kuznetsov 	u32 vector;
2177214ff83dSVitaly Kuznetsov 	bool all_cpus;
2178214ff83dSVitaly Kuznetsov 
217950e523ddSVitaly Kuznetsov 	if (hc->code == HVCALL_SEND_IPI) {
2180bd38b320SSiddharth Chandrasekaran 		if (!hc->fast) {
2181bd38b320SSiddharth Chandrasekaran 			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
2182214ff83dSVitaly Kuznetsov 						    sizeof(send_ipi))))
2183214ff83dSVitaly Kuznetsov 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
2184214ff83dSVitaly Kuznetsov 			sparse_banks[0] = send_ipi.cpu_mask;
2185f21dd494SVitaly Kuznetsov 			vector = send_ipi.vector;
2186214ff83dSVitaly Kuznetsov 		} else {
2187214ff83dSVitaly Kuznetsov 			/* 'reserved' part of hv_send_ipi should be 0 */
2188bd38b320SSiddharth Chandrasekaran 			if (unlikely(hc->ingpa >> 32 != 0))
2189214ff83dSVitaly Kuznetsov 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
2190bd38b320SSiddharth Chandrasekaran 			sparse_banks[0] = hc->outgpa;
2191bd38b320SSiddharth Chandrasekaran 			vector = (u32)hc->ingpa;
2192214ff83dSVitaly Kuznetsov 		}
2193214ff83dSVitaly Kuznetsov 		all_cpus = false;
2194214ff83dSVitaly Kuznetsov 		valid_bank_mask = BIT_ULL(0);
2195214ff83dSVitaly Kuznetsov 
2196f21dd494SVitaly Kuznetsov 		trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
2197214ff83dSVitaly Kuznetsov 	} else {
219847d3e5cdSVitaly Kuznetsov 		if (!hc->fast) {
2199bd38b320SSiddharth Chandrasekaran 			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
2200214ff83dSVitaly Kuznetsov 						    sizeof(send_ipi_ex))))
2201214ff83dSVitaly Kuznetsov 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
220247d3e5cdSVitaly Kuznetsov 		} else {
220347d3e5cdSVitaly Kuznetsov 			send_ipi_ex.vector = (u32)hc->ingpa;
220447d3e5cdSVitaly Kuznetsov 			send_ipi_ex.vp_set.format = hc->outgpa;
220547d3e5cdSVitaly Kuznetsov 			send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
220647d3e5cdSVitaly Kuznetsov 		}
2207214ff83dSVitaly Kuznetsov 
2208214ff83dSVitaly Kuznetsov 		trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
2209214ff83dSVitaly Kuznetsov 					 send_ipi_ex.vp_set.format,
2210214ff83dSVitaly Kuznetsov 					 send_ipi_ex.vp_set.valid_bank_mask);
2211214ff83dSVitaly Kuznetsov 
2212f21dd494SVitaly Kuznetsov 		vector = send_ipi_ex.vector;
2213214ff83dSVitaly Kuznetsov 		valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
2214214ff83dSVitaly Kuznetsov 		all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
2215214ff83dSVitaly Kuznetsov 
2216d603fd8dSYury Norov 		if (hc->var_cnt != hweight64(valid_bank_mask))
2217bd1ba573SSean Christopherson 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
2218bd1ba573SSean Christopherson 
22193244867aSSean Christopherson 		if (all_cpus)
22203244867aSSean Christopherson 			goto check_and_send_ipi;
22213244867aSSean Christopherson 
2222bd1ba573SSean Christopherson 		if (!hc->var_cnt)
2223214ff83dSVitaly Kuznetsov 			goto ret_success;
2224214ff83dSVitaly Kuznetsov 
22258b9e13d2SVitaly Kuznetsov 		if (!hc->fast)
22268b9e13d2SVitaly Kuznetsov 			hc->data_offset = offsetof(struct hv_send_ipi_ex,
22278b9e13d2SVitaly Kuznetsov 						   vp_set.bank_contents);
22288b9e13d2SVitaly Kuznetsov 		else
22298b9e13d2SVitaly Kuznetsov 			hc->consumed_xmm_halves = 1;
22308b9e13d2SVitaly Kuznetsov 
22318b9e13d2SVitaly Kuznetsov 		if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2232214ff83dSVitaly Kuznetsov 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
2233214ff83dSVitaly Kuznetsov 	}
2234214ff83dSVitaly Kuznetsov 
22353244867aSSean Christopherson check_and_send_ipi:
2236f21dd494SVitaly Kuznetsov 	if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
2237214ff83dSVitaly Kuznetsov 		return HV_STATUS_INVALID_HYPERCALL_INPUT;
2238214ff83dSVitaly Kuznetsov 
2239b6c2c22fSVitaly Kuznetsov 	if (all_cpus)
2240b6c2c22fSVitaly Kuznetsov 		kvm_hv_send_ipi_to_many(kvm, vector, NULL, 0);
2241b6c2c22fSVitaly Kuznetsov 	else
2242b6c2c22fSVitaly Kuznetsov 		kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask);
2243214ff83dSVitaly Kuznetsov 
2244214ff83dSVitaly Kuznetsov ret_success:
2245214ff83dSVitaly Kuznetsov 	return HV_STATUS_SUCCESS;
2246214ff83dSVitaly Kuznetsov }
2247214ff83dSVitaly Kuznetsov 
22483be29eb7SSean Christopherson void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
2249e83d5887SAndrey Smetanin {
22503be29eb7SSean Christopherson 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
22518f014550SVitaly Kuznetsov 	struct kvm_cpuid_entry2 *entry;
22528f014550SVitaly Kuznetsov 
22533be29eb7SSean Christopherson 	vcpu->arch.hyperv_enabled = hyperv_enabled;
22543be29eb7SSean Christopherson 
22553be29eb7SSean Christopherson 	if (!hv_vcpu) {
22563be29eb7SSean Christopherson 		/*
22573be29eb7SSean Christopherson 		 * KVM should have already allocated kvm_vcpu_hv if Hyper-V is
22583be29eb7SSean Christopherson 		 * enabled in CPUID.
22593be29eb7SSean Christopherson 		 */
22603be29eb7SSean Christopherson 		WARN_ON_ONCE(vcpu->arch.hyperv_enabled);
226110d7bf1eSVitaly Kuznetsov 		return;
226210d7bf1eSVitaly Kuznetsov 	}
226310d7bf1eSVitaly Kuznetsov 
2264ce2196b8SVitaly Kuznetsov 	memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
2265ce2196b8SVitaly Kuznetsov 
22663be29eb7SSean Christopherson 	if (!vcpu->arch.hyperv_enabled)
22673be29eb7SSean Christopherson 		return;
22683be29eb7SSean Christopherson 
2269277ad7d5SSean Christopherson 	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
227010d7bf1eSVitaly Kuznetsov 	if (entry) {
227110d7bf1eSVitaly Kuznetsov 		hv_vcpu->cpuid_cache.features_eax = entry->eax;
227210d7bf1eSVitaly Kuznetsov 		hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
227310d7bf1eSVitaly Kuznetsov 		hv_vcpu->cpuid_cache.features_edx = entry->edx;
227410d7bf1eSVitaly Kuznetsov 	}
227510d7bf1eSVitaly Kuznetsov 
2276277ad7d5SSean Christopherson 	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
227710d7bf1eSVitaly Kuznetsov 	if (entry) {
227810d7bf1eSVitaly Kuznetsov 		hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
227910d7bf1eSVitaly Kuznetsov 		hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
228010d7bf1eSVitaly Kuznetsov 	}
228110d7bf1eSVitaly Kuznetsov 
2282277ad7d5SSean Christopherson 	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
228310d7bf1eSVitaly Kuznetsov 	if (entry)
228410d7bf1eSVitaly Kuznetsov 		hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
2285dea6e140SVitaly Kuznetsov 
2286dea6e140SVitaly Kuznetsov 	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES);
2287dea6e140SVitaly Kuznetsov 	if (entry) {
2288dea6e140SVitaly Kuznetsov 		hv_vcpu->cpuid_cache.nested_eax = entry->eax;
2289dea6e140SVitaly Kuznetsov 		hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
2290dea6e140SVitaly Kuznetsov 	}
22918f014550SVitaly Kuznetsov }
22928f014550SVitaly Kuznetsov 
2293644f7067SVitaly Kuznetsov int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
2294644f7067SVitaly Kuznetsov {
2295644f7067SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu;
2296644f7067SVitaly Kuznetsov 	int ret = 0;
2297644f7067SVitaly Kuznetsov 
2298644f7067SVitaly Kuznetsov 	if (!to_hv_vcpu(vcpu)) {
2299644f7067SVitaly Kuznetsov 		if (enforce) {
2300644f7067SVitaly Kuznetsov 			ret = kvm_hv_vcpu_init(vcpu);
2301644f7067SVitaly Kuznetsov 			if (ret)
2302644f7067SVitaly Kuznetsov 				return ret;
2303644f7067SVitaly Kuznetsov 		} else {
2304644f7067SVitaly Kuznetsov 			return 0;
2305644f7067SVitaly Kuznetsov 		}
2306644f7067SVitaly Kuznetsov 	}
2307644f7067SVitaly Kuznetsov 
2308644f7067SVitaly Kuznetsov 	hv_vcpu = to_hv_vcpu(vcpu);
2309644f7067SVitaly Kuznetsov 	hv_vcpu->enforce_cpuid = enforce;
2310644f7067SVitaly Kuznetsov 
2311644f7067SVitaly Kuznetsov 	return ret;
2312644f7067SVitaly Kuznetsov }
2313644f7067SVitaly Kuznetsov 
231483326e43SAndrey Smetanin static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
231583326e43SAndrey Smetanin {
231683326e43SAndrey Smetanin 	bool longmode;
231783326e43SAndrey Smetanin 
2318b5aead00STom Lendacky 	longmode = is_64_bit_hypercall(vcpu);
231983326e43SAndrey Smetanin 	if (longmode)
2320de3cd117SSean Christopherson 		kvm_rax_write(vcpu, result);
232183326e43SAndrey Smetanin 	else {
2322de3cd117SSean Christopherson 		kvm_rdx_write(vcpu, result >> 32);
2323de3cd117SSean Christopherson 		kvm_rax_write(vcpu, result & 0xffffffff);
232483326e43SAndrey Smetanin 	}
232583326e43SAndrey Smetanin }
232683326e43SAndrey Smetanin 
2327696ca779SRadim Krčmář static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2328696ca779SRadim Krčmář {
2329c58a318fSVitaly Kuznetsov 	u32 tlb_lock_count = 0;
2330c58a318fSVitaly Kuznetsov 	int ret;
2331c58a318fSVitaly Kuznetsov 
2332c58a318fSVitaly Kuznetsov 	if (hv_result_success(result) && is_guest_mode(vcpu) &&
2333c58a318fSVitaly Kuznetsov 	    kvm_hv_is_tlb_flush_hcall(vcpu) &&
2334c58a318fSVitaly Kuznetsov 	    kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa,
2335c58a318fSVitaly Kuznetsov 			   &tlb_lock_count, sizeof(tlb_lock_count)))
2336c58a318fSVitaly Kuznetsov 		result = HV_STATUS_INVALID_HYPERCALL_INPUT;
2337c58a318fSVitaly Kuznetsov 
2338f5714bbbSVitaly Kuznetsov 	trace_kvm_hv_hypercall_done(result);
2339696ca779SRadim Krčmář 	kvm_hv_hypercall_set_result(vcpu, result);
2340696ca779SRadim Krčmář 	++vcpu->stat.hypercalls;
2341c58a318fSVitaly Kuznetsov 
2342c58a318fSVitaly Kuznetsov 	ret = kvm_skip_emulated_instruction(vcpu);
2343c58a318fSVitaly Kuznetsov 
2344c58a318fSVitaly Kuznetsov 	if (tlb_lock_count)
2345c58a318fSVitaly Kuznetsov 		kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu);
2346c58a318fSVitaly Kuznetsov 
2347c58a318fSVitaly Kuznetsov 	return ret;
2348696ca779SRadim Krčmář }
2349696ca779SRadim Krčmář 
235083326e43SAndrey Smetanin static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
235183326e43SAndrey Smetanin {
2352696ca779SRadim Krčmář 	return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
235383326e43SAndrey Smetanin }
235483326e43SAndrey Smetanin 
2355bd38b320SSiddharth Chandrasekaran static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2356faeb7833SRoman Kagan {
235705f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2358faeb7833SRoman Kagan 	struct eventfd_ctx *eventfd;
2359faeb7833SRoman Kagan 
2360bd38b320SSiddharth Chandrasekaran 	if (unlikely(!hc->fast)) {
2361faeb7833SRoman Kagan 		int ret;
2362bd38b320SSiddharth Chandrasekaran 		gpa_t gpa = hc->ingpa;
2363faeb7833SRoman Kagan 
2364bd38b320SSiddharth Chandrasekaran 		if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2365bd38b320SSiddharth Chandrasekaran 		    offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2366faeb7833SRoman Kagan 			return HV_STATUS_INVALID_ALIGNMENT;
2367faeb7833SRoman Kagan 
2368bd38b320SSiddharth Chandrasekaran 		ret = kvm_vcpu_read_guest(vcpu, gpa,
2369bd38b320SSiddharth Chandrasekaran 					  &hc->ingpa, sizeof(hc->ingpa));
2370faeb7833SRoman Kagan 		if (ret < 0)
2371faeb7833SRoman Kagan 			return HV_STATUS_INVALID_ALIGNMENT;
2372faeb7833SRoman Kagan 	}
2373faeb7833SRoman Kagan 
2374faeb7833SRoman Kagan 	/*
2375faeb7833SRoman Kagan 	 * Per spec, bits 32-47 contain the extra "flag number".  However, we
2376faeb7833SRoman Kagan 	 * have no use for it, and in all known usecases it is zero, so just
2377faeb7833SRoman Kagan 	 * report lookup failure if it isn't.
2378faeb7833SRoman Kagan 	 */
2379bd38b320SSiddharth Chandrasekaran 	if (hc->ingpa & 0xffff00000000ULL)
2380faeb7833SRoman Kagan 		return HV_STATUS_INVALID_PORT_ID;
2381faeb7833SRoman Kagan 	/* remaining bits are reserved-zero */
2382bd38b320SSiddharth Chandrasekaran 	if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2383faeb7833SRoman Kagan 		return HV_STATUS_INVALID_HYPERCALL_INPUT;
2384faeb7833SRoman Kagan 
2385452a68d0SPaolo Bonzini 	/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
2386452a68d0SPaolo Bonzini 	rcu_read_lock();
2387bd38b320SSiddharth Chandrasekaran 	eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2388452a68d0SPaolo Bonzini 	rcu_read_unlock();
2389faeb7833SRoman Kagan 	if (!eventfd)
2390faeb7833SRoman Kagan 		return HV_STATUS_INVALID_PORT_ID;
2391faeb7833SRoman Kagan 
2392faeb7833SRoman Kagan 	eventfd_signal(eventfd, 1);
2393faeb7833SRoman Kagan 	return HV_STATUS_SUCCESS;
2394faeb7833SRoman Kagan }
2395faeb7833SRoman Kagan 
23965974565bSSiddharth Chandrasekaran static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
23975974565bSSiddharth Chandrasekaran {
23985974565bSSiddharth Chandrasekaran 	switch (hc->code) {
23995974565bSSiddharth Chandrasekaran 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
24005974565bSSiddharth Chandrasekaran 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
24015974565bSSiddharth Chandrasekaran 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
24025974565bSSiddharth Chandrasekaran 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
240347d3e5cdSVitaly Kuznetsov 	case HVCALL_SEND_IPI_EX:
24045974565bSSiddharth Chandrasekaran 		return true;
24055974565bSSiddharth Chandrasekaran 	}
24065974565bSSiddharth Chandrasekaran 
24075974565bSSiddharth Chandrasekaran 	return false;
24085974565bSSiddharth Chandrasekaran }
24095974565bSSiddharth Chandrasekaran 
24105974565bSSiddharth Chandrasekaran static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
24115974565bSSiddharth Chandrasekaran {
24125974565bSSiddharth Chandrasekaran 	int reg;
24135974565bSSiddharth Chandrasekaran 
24145974565bSSiddharth Chandrasekaran 	kvm_fpu_get();
24155974565bSSiddharth Chandrasekaran 	for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
24165974565bSSiddharth Chandrasekaran 		_kvm_read_sse_reg(reg, &hc->xmm[reg]);
24175974565bSSiddharth Chandrasekaran 	kvm_fpu_put();
24185974565bSSiddharth Chandrasekaran }
24195974565bSSiddharth Chandrasekaran 
24204ad81a91SVitaly Kuznetsov static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
24214ad81a91SVitaly Kuznetsov {
242234ef7d7bSVitaly Kuznetsov 	if (!hv_vcpu->enforce_cpuid)
242334ef7d7bSVitaly Kuznetsov 		return true;
242434ef7d7bSVitaly Kuznetsov 
242534ef7d7bSVitaly Kuznetsov 	switch (code) {
242634ef7d7bSVitaly Kuznetsov 	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
242734ef7d7bSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.enlightenments_ebx &&
242834ef7d7bSVitaly Kuznetsov 			hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
24294f532b7fSVitaly Kuznetsov 	case HVCALL_POST_MESSAGE:
24304f532b7fSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2431a60b3c59SVitaly Kuznetsov 	case HVCALL_SIGNAL_EVENT:
2432a60b3c59SVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2433a921cf83SVitaly Kuznetsov 	case HVCALL_POST_DEBUG_DATA:
2434a921cf83SVitaly Kuznetsov 	case HVCALL_RETRIEVE_DEBUG_DATA:
2435a921cf83SVitaly Kuznetsov 	case HVCALL_RESET_DEBUG_SESSION:
2436a921cf83SVitaly Kuznetsov 		/*
2437a921cf83SVitaly Kuznetsov 		 * Return 'true' when SynDBG is disabled so the resulting code
2438a921cf83SVitaly Kuznetsov 		 * will be HV_STATUS_INVALID_HYPERCALL_CODE.
2439a921cf83SVitaly Kuznetsov 		 */
2440a921cf83SVitaly Kuznetsov 		return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2441a921cf83SVitaly Kuznetsov 			hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2442bb53ecb4SVitaly Kuznetsov 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2443bb53ecb4SVitaly Kuznetsov 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2444445caed0SVitaly Kuznetsov 		if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2445445caed0SVitaly Kuznetsov 		      HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2446445caed0SVitaly Kuznetsov 			return false;
2447445caed0SVitaly Kuznetsov 		fallthrough;
2448bb53ecb4SVitaly Kuznetsov 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2449bb53ecb4SVitaly Kuznetsov 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2450bb53ecb4SVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.enlightenments_eax &
2451bb53ecb4SVitaly Kuznetsov 			HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2452d264eb3cSVitaly Kuznetsov 	case HVCALL_SEND_IPI_EX:
2453445caed0SVitaly Kuznetsov 		if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2454445caed0SVitaly Kuznetsov 		      HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2455445caed0SVitaly Kuznetsov 			return false;
2456445caed0SVitaly Kuznetsov 		fallthrough;
2457d264eb3cSVitaly Kuznetsov 	case HVCALL_SEND_IPI:
2458d264eb3cSVitaly Kuznetsov 		return hv_vcpu->cpuid_cache.enlightenments_eax &
2459d264eb3cSVitaly Kuznetsov 			HV_X64_CLUSTER_IPI_RECOMMENDED;
2460*db9cf24cSVipin Sharma 	case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
2461*db9cf24cSVipin Sharma 		return hv_vcpu->cpuid_cache.features_ebx &
2462*db9cf24cSVipin Sharma 			HV_ENABLE_EXTENDED_HYPERCALLS;
246334ef7d7bSVitaly Kuznetsov 	default:
246434ef7d7bSVitaly Kuznetsov 		break;
246534ef7d7bSVitaly Kuznetsov 	}
246634ef7d7bSVitaly Kuznetsov 
24674ad81a91SVitaly Kuznetsov 	return true;
24684ad81a91SVitaly Kuznetsov }
24694ad81a91SVitaly Kuznetsov 
2470e83d5887SAndrey Smetanin int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2471e83d5887SAndrey Smetanin {
24724e62aa96SVitaly Kuznetsov 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2473bd38b320SSiddharth Chandrasekaran 	struct kvm_hv_hcall hc;
2474bd38b320SSiddharth Chandrasekaran 	u64 ret = HV_STATUS_SUCCESS;
2475e83d5887SAndrey Smetanin 
2476e83d5887SAndrey Smetanin 	/*
2477e83d5887SAndrey Smetanin 	 * hypercall generates UD from non zero cpl and real mode
2478e83d5887SAndrey Smetanin 	 * per HYPER-V spec
2479e83d5887SAndrey Smetanin 	 */
2480b3646477SJason Baron 	if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2481e83d5887SAndrey Smetanin 		kvm_queue_exception(vcpu, UD_VECTOR);
24820d9c055eSAndrey Smetanin 		return 1;
2483e83d5887SAndrey Smetanin 	}
2484e83d5887SAndrey Smetanin 
2485f4e4805eSArnd Bergmann #ifdef CONFIG_X86_64
2486b5aead00STom Lendacky 	if (is_64_bit_hypercall(vcpu)) {
2487bd38b320SSiddharth Chandrasekaran 		hc.param = kvm_rcx_read(vcpu);
2488bd38b320SSiddharth Chandrasekaran 		hc.ingpa = kvm_rdx_read(vcpu);
2489bd38b320SSiddharth Chandrasekaran 		hc.outgpa = kvm_r8_read(vcpu);
2490f4e4805eSArnd Bergmann 	} else
2491f4e4805eSArnd Bergmann #endif
2492f4e4805eSArnd Bergmann 	{
2493bd38b320SSiddharth Chandrasekaran 		hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2494de3cd117SSean Christopherson 			    (kvm_rax_read(vcpu) & 0xffffffff);
2495bd38b320SSiddharth Chandrasekaran 		hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2496de3cd117SSean Christopherson 			    (kvm_rcx_read(vcpu) & 0xffffffff);
2497bd38b320SSiddharth Chandrasekaran 		hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2498de3cd117SSean Christopherson 			     (kvm_rsi_read(vcpu) & 0xffffffff);
2499e83d5887SAndrey Smetanin 	}
2500e83d5887SAndrey Smetanin 
2501bd38b320SSiddharth Chandrasekaran 	hc.code = hc.param & 0xffff;
2502bd1ba573SSean Christopherson 	hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
2503bd38b320SSiddharth Chandrasekaran 	hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2504bd38b320SSiddharth Chandrasekaran 	hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2505bd38b320SSiddharth Chandrasekaran 	hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2506bd38b320SSiddharth Chandrasekaran 	hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2507e83d5887SAndrey Smetanin 
2508bd1ba573SSean Christopherson 	trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
2509bd1ba573SSean Christopherson 			       hc.rep_idx, hc.ingpa, hc.outgpa);
2510e83d5887SAndrey Smetanin 
25114e62aa96SVitaly Kuznetsov 	if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
25124ad81a91SVitaly Kuznetsov 		ret = HV_STATUS_ACCESS_DENIED;
25134ad81a91SVitaly Kuznetsov 		goto hypercall_complete;
25144ad81a91SVitaly Kuznetsov 	}
25154ad81a91SVitaly Kuznetsov 
2516413af660SSean Christopherson 	if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
2517413af660SSean Christopherson 		ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2518413af660SSean Christopherson 		goto hypercall_complete;
2519413af660SSean Christopherson 	}
2520413af660SSean Christopherson 
25214e62aa96SVitaly Kuznetsov 	if (hc.fast && is_xmm_fast_hypercall(&hc)) {
25224e62aa96SVitaly Kuznetsov 		if (unlikely(hv_vcpu->enforce_cpuid &&
25234e62aa96SVitaly Kuznetsov 			     !(hv_vcpu->cpuid_cache.features_edx &
25244e62aa96SVitaly Kuznetsov 			       HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
25254e62aa96SVitaly Kuznetsov 			kvm_queue_exception(vcpu, UD_VECTOR);
25264e62aa96SVitaly Kuznetsov 			return 1;
25274e62aa96SVitaly Kuznetsov 		}
25284e62aa96SVitaly Kuznetsov 
25292e2f1e8dSVitaly Kuznetsov 		kvm_hv_hypercall_read_xmm(&hc);
25304e62aa96SVitaly Kuznetsov 	}
25312e2f1e8dSVitaly Kuznetsov 
2532bd38b320SSiddharth Chandrasekaran 	switch (hc.code) {
25338ed6d767SAndrey Smetanin 	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
253440421f38SSean Christopherson 		if (unlikely(hc.rep || hc.var_cnt)) {
253556b9ae78SVitaly Kuznetsov 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
253656b9ae78SVitaly Kuznetsov 			break;
253756b9ae78SVitaly Kuznetsov 		}
2538de63ad4cSLongpeng(Mike) 		kvm_vcpu_on_spin(vcpu, true);
2539e83d5887SAndrey Smetanin 		break;
254083326e43SAndrey Smetanin 	case HVCALL_SIGNAL_EVENT:
254140421f38SSean Christopherson 		if (unlikely(hc.rep || hc.var_cnt)) {
254256b9ae78SVitaly Kuznetsov 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
254356b9ae78SVitaly Kuznetsov 			break;
254456b9ae78SVitaly Kuznetsov 		}
2545bd38b320SSiddharth Chandrasekaran 		ret = kvm_hvcall_signal_event(vcpu, &hc);
2546d32ef547SDan Carpenter 		if (ret != HV_STATUS_INVALID_PORT_ID)
2547faeb7833SRoman Kagan 			break;
2548df561f66SGustavo A. R. Silva 		fallthrough;	/* maybe userspace knows this conn_id */
2549faeb7833SRoman Kagan 	case HVCALL_POST_MESSAGE:
2550a2b5c3c0SPaolo Bonzini 		/* don't bother userspace if it has no way to handle it */
255140421f38SSean Christopherson 		if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
255256b9ae78SVitaly Kuznetsov 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2553a2b5c3c0SPaolo Bonzini 			break;
2554a2b5c3c0SPaolo Bonzini 		}
25551a9df326SVipin Sharma 		goto hypercall_userspace_exit;
2556e2f11f42SVitaly Kuznetsov 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2557c0f1eaebSPaolo Bonzini 		if (unlikely(hc.var_cnt)) {
2558e2f11f42SVitaly Kuznetsov 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2559e2f11f42SVitaly Kuznetsov 			break;
2560e2f11f42SVitaly Kuznetsov 		}
2561c0f1eaebSPaolo Bonzini 		fallthrough;
2562c7012676SVitaly Kuznetsov 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
25635974565bSSiddharth Chandrasekaran 		if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2564c7012676SVitaly Kuznetsov 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2565c7012676SVitaly Kuznetsov 			break;
2566c7012676SVitaly Kuznetsov 		}
256782c1ead0SVitaly Kuznetsov 		ret = kvm_hv_flush_tlb(vcpu, &hc);
2568c7012676SVitaly Kuznetsov 		break;
256982c1ead0SVitaly Kuznetsov 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2570c0f1eaebSPaolo Bonzini 		if (unlikely(hc.var_cnt)) {
2571c0f1eaebSPaolo Bonzini 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2572c0f1eaebSPaolo Bonzini 			break;
2573c0f1eaebSPaolo Bonzini 		}
2574c0f1eaebSPaolo Bonzini 		fallthrough;
2575c7012676SVitaly Kuznetsov 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
25765974565bSSiddharth Chandrasekaran 		if (unlikely(hc.rep)) {
2577c7012676SVitaly Kuznetsov 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2578c7012676SVitaly Kuznetsov 			break;
2579c7012676SVitaly Kuznetsov 		}
258082c1ead0SVitaly Kuznetsov 		ret = kvm_hv_flush_tlb(vcpu, &hc);
2581e2f11f42SVitaly Kuznetsov 		break;
2582214ff83dSVitaly Kuznetsov 	case HVCALL_SEND_IPI:
2583c0f1eaebSPaolo Bonzini 		if (unlikely(hc.var_cnt)) {
2584214ff83dSVitaly Kuznetsov 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2585214ff83dSVitaly Kuznetsov 			break;
2586214ff83dSVitaly Kuznetsov 		}
2587c0f1eaebSPaolo Bonzini 		fallthrough;
2588214ff83dSVitaly Kuznetsov 	case HVCALL_SEND_IPI_EX:
258947d3e5cdSVitaly Kuznetsov 		if (unlikely(hc.rep)) {
2590214ff83dSVitaly Kuznetsov 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2591214ff83dSVitaly Kuznetsov 			break;
2592214ff83dSVitaly Kuznetsov 		}
259350e523ddSVitaly Kuznetsov 		ret = kvm_hv_send_ipi(vcpu, &hc);
2594214ff83dSVitaly Kuznetsov 		break;
2595b187038bSJon Doron 	case HVCALL_POST_DEBUG_DATA:
2596b187038bSJon Doron 	case HVCALL_RETRIEVE_DEBUG_DATA:
2597bd38b320SSiddharth Chandrasekaran 		if (unlikely(hc.fast)) {
2598b187038bSJon Doron 			ret = HV_STATUS_INVALID_PARAMETER;
2599b187038bSJon Doron 			break;
2600b187038bSJon Doron 		}
2601b187038bSJon Doron 		fallthrough;
2602b187038bSJon Doron 	case HVCALL_RESET_DEBUG_SESSION: {
2603f69b55efSVitaly Kuznetsov 		struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2604b187038bSJon Doron 
2605b187038bSJon Doron 		if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2606b187038bSJon Doron 			ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2607b187038bSJon Doron 			break;
2608b187038bSJon Doron 		}
2609b187038bSJon Doron 
2610b187038bSJon Doron 		if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2611b187038bSJon Doron 			ret = HV_STATUS_OPERATION_DENIED;
2612b187038bSJon Doron 			break;
2613b187038bSJon Doron 		}
26141a9df326SVipin Sharma 		goto hypercall_userspace_exit;
2615b187038bSJon Doron 	}
2616*db9cf24cSVipin Sharma 	case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
2617*db9cf24cSVipin Sharma 		if (unlikely(hc.fast)) {
2618*db9cf24cSVipin Sharma 			ret = HV_STATUS_INVALID_PARAMETER;
2619*db9cf24cSVipin Sharma 			break;
2620*db9cf24cSVipin Sharma 		}
2621*db9cf24cSVipin Sharma 		goto hypercall_userspace_exit;
2622e83d5887SAndrey Smetanin 	default:
2623d32ef547SDan Carpenter 		ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2624e83d5887SAndrey Smetanin 		break;
2625e83d5887SAndrey Smetanin 	}
2626e83d5887SAndrey Smetanin 
26274ad81a91SVitaly Kuznetsov hypercall_complete:
2628696ca779SRadim Krčmář 	return kvm_hv_hypercall_complete(vcpu, ret);
26291a9df326SVipin Sharma 
26301a9df326SVipin Sharma hypercall_userspace_exit:
26311a9df326SVipin Sharma 	vcpu->run->exit_reason = KVM_EXIT_HYPERV;
26321a9df326SVipin Sharma 	vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
26331a9df326SVipin Sharma 	vcpu->run->hyperv.u.hcall.input = hc.param;
26341a9df326SVipin Sharma 	vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
26351a9df326SVipin Sharma 	vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
26361a9df326SVipin Sharma 	vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace;
26371a9df326SVipin Sharma 	return 0;
2638e83d5887SAndrey Smetanin }
2639cbc0236aSRoman Kagan 
2640cbc0236aSRoman Kagan void kvm_hv_init_vm(struct kvm *kvm)
2641cbc0236aSRoman Kagan {
264205f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
264305f04ae4SVitaly Kuznetsov 
264405f04ae4SVitaly Kuznetsov 	mutex_init(&hv->hv_lock);
264505f04ae4SVitaly Kuznetsov 	idr_init(&hv->conn_to_evt);
2646cbc0236aSRoman Kagan }
2647cbc0236aSRoman Kagan 
2648cbc0236aSRoman Kagan void kvm_hv_destroy_vm(struct kvm *kvm)
2649cbc0236aSRoman Kagan {
265005f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
2651faeb7833SRoman Kagan 	struct eventfd_ctx *eventfd;
2652faeb7833SRoman Kagan 	int i;
2653faeb7833SRoman Kagan 
265405f04ae4SVitaly Kuznetsov 	idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2655faeb7833SRoman Kagan 		eventfd_ctx_put(eventfd);
265605f04ae4SVitaly Kuznetsov 	idr_destroy(&hv->conn_to_evt);
2657faeb7833SRoman Kagan }
2658faeb7833SRoman Kagan 
2659faeb7833SRoman Kagan static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2660faeb7833SRoman Kagan {
266105f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
2662faeb7833SRoman Kagan 	struct eventfd_ctx *eventfd;
2663faeb7833SRoman Kagan 	int ret;
2664faeb7833SRoman Kagan 
2665faeb7833SRoman Kagan 	eventfd = eventfd_ctx_fdget(fd);
2666faeb7833SRoman Kagan 	if (IS_ERR(eventfd))
2667faeb7833SRoman Kagan 		return PTR_ERR(eventfd);
2668faeb7833SRoman Kagan 
2669faeb7833SRoman Kagan 	mutex_lock(&hv->hv_lock);
2670faeb7833SRoman Kagan 	ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2671254272ceSBen Gardon 			GFP_KERNEL_ACCOUNT);
2672faeb7833SRoman Kagan 	mutex_unlock(&hv->hv_lock);
2673faeb7833SRoman Kagan 
2674faeb7833SRoman Kagan 	if (ret >= 0)
2675faeb7833SRoman Kagan 		return 0;
2676faeb7833SRoman Kagan 
2677faeb7833SRoman Kagan 	if (ret == -ENOSPC)
2678faeb7833SRoman Kagan 		ret = -EEXIST;
2679faeb7833SRoman Kagan 	eventfd_ctx_put(eventfd);
2680faeb7833SRoman Kagan 	return ret;
2681faeb7833SRoman Kagan }
2682faeb7833SRoman Kagan 
2683faeb7833SRoman Kagan static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2684faeb7833SRoman Kagan {
268505f04ae4SVitaly Kuznetsov 	struct kvm_hv *hv = to_kvm_hv(kvm);
2686faeb7833SRoman Kagan 	struct eventfd_ctx *eventfd;
2687faeb7833SRoman Kagan 
2688faeb7833SRoman Kagan 	mutex_lock(&hv->hv_lock);
2689faeb7833SRoman Kagan 	eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2690faeb7833SRoman Kagan 	mutex_unlock(&hv->hv_lock);
2691faeb7833SRoman Kagan 
2692faeb7833SRoman Kagan 	if (!eventfd)
2693faeb7833SRoman Kagan 		return -ENOENT;
2694faeb7833SRoman Kagan 
2695faeb7833SRoman Kagan 	synchronize_srcu(&kvm->srcu);
2696faeb7833SRoman Kagan 	eventfd_ctx_put(eventfd);
2697faeb7833SRoman Kagan 	return 0;
2698faeb7833SRoman Kagan }
2699faeb7833SRoman Kagan 
2700faeb7833SRoman Kagan int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2701faeb7833SRoman Kagan {
2702faeb7833SRoman Kagan 	if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2703faeb7833SRoman Kagan 	    (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2704faeb7833SRoman Kagan 		return -EINVAL;
2705faeb7833SRoman Kagan 
2706faeb7833SRoman Kagan 	if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2707faeb7833SRoman Kagan 		return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2708faeb7833SRoman Kagan 	return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2709cbc0236aSRoman Kagan }
27102bc39970SVitaly Kuznetsov 
2711c21d54f0SVitaly Kuznetsov int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
27122bc39970SVitaly Kuznetsov 		     struct kvm_cpuid_entry2 __user *entries)
27132bc39970SVitaly Kuznetsov {
2714ea152987SVitaly Kuznetsov 	uint16_t evmcs_ver = 0;
27152bc39970SVitaly Kuznetsov 	struct kvm_cpuid_entry2 cpuid_entries[] = {
27162bc39970SVitaly Kuznetsov 		{ .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
27172bc39970SVitaly Kuznetsov 		{ .function = HYPERV_CPUID_INTERFACE },
27182bc39970SVitaly Kuznetsov 		{ .function = HYPERV_CPUID_VERSION },
27192bc39970SVitaly Kuznetsov 		{ .function = HYPERV_CPUID_FEATURES },
27202bc39970SVitaly Kuznetsov 		{ .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
27212bc39970SVitaly Kuznetsov 		{ .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2722f97f5a56SJon Doron 		{ .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2723f97f5a56SJon Doron 		{ .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2724f97f5a56SJon Doron 		{ .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES	},
27252bc39970SVitaly Kuznetsov 		{ .function = HYPERV_CPUID_NESTED_FEATURES },
27262bc39970SVitaly Kuznetsov 	};
27272bc39970SVitaly Kuznetsov 	int i, nent = ARRAY_SIZE(cpuid_entries);
27282bc39970SVitaly Kuznetsov 
272933b22172SPaolo Bonzini 	if (kvm_x86_ops.nested_ops->get_evmcs_version)
273033b22172SPaolo Bonzini 		evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2731ea152987SVitaly Kuznetsov 
27322bc39970SVitaly Kuznetsov 	if (cpuid->nent < nent)
27332bc39970SVitaly Kuznetsov 		return -E2BIG;
27342bc39970SVitaly Kuznetsov 
27352bc39970SVitaly Kuznetsov 	if (cpuid->nent > nent)
27362bc39970SVitaly Kuznetsov 		cpuid->nent = nent;
27372bc39970SVitaly Kuznetsov 
27382bc39970SVitaly Kuznetsov 	for (i = 0; i < nent; i++) {
27392bc39970SVitaly Kuznetsov 		struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
27402bc39970SVitaly Kuznetsov 		u32 signature[3];
27412bc39970SVitaly Kuznetsov 
27422bc39970SVitaly Kuznetsov 		switch (ent->function) {
27432bc39970SVitaly Kuznetsov 		case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
27442bc39970SVitaly Kuznetsov 			memcpy(signature, "Linux KVM Hv", 12);
27452bc39970SVitaly Kuznetsov 
2746f97f5a56SJon Doron 			ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
27472bc39970SVitaly Kuznetsov 			ent->ebx = signature[0];
27482bc39970SVitaly Kuznetsov 			ent->ecx = signature[1];
27492bc39970SVitaly Kuznetsov 			ent->edx = signature[2];
27502bc39970SVitaly Kuznetsov 			break;
27512bc39970SVitaly Kuznetsov 
27522bc39970SVitaly Kuznetsov 		case HYPERV_CPUID_INTERFACE:
27538f014550SVitaly Kuznetsov 			ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
27542bc39970SVitaly Kuznetsov 			break;
27552bc39970SVitaly Kuznetsov 
27562bc39970SVitaly Kuznetsov 		case HYPERV_CPUID_VERSION:
27572bc39970SVitaly Kuznetsov 			/*
27582bc39970SVitaly Kuznetsov 			 * We implement some Hyper-V 2016 functions so let's use
27592bc39970SVitaly Kuznetsov 			 * this version.
27602bc39970SVitaly Kuznetsov 			 */
27612bc39970SVitaly Kuznetsov 			ent->eax = 0x00003839;
27622bc39970SVitaly Kuznetsov 			ent->ebx = 0x000A0000;
27632bc39970SVitaly Kuznetsov 			break;
27642bc39970SVitaly Kuznetsov 
27652bc39970SVitaly Kuznetsov 		case HYPERV_CPUID_FEATURES:
2766dfc53baaSJoseph Salisbury 			ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
27672bc39970SVitaly Kuznetsov 			ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2768dfc53baaSJoseph Salisbury 			ent->eax |= HV_MSR_SYNIC_AVAILABLE;
27692bc39970SVitaly Kuznetsov 			ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2770dfc53baaSJoseph Salisbury 			ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2771dfc53baaSJoseph Salisbury 			ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2772dfc53baaSJoseph Salisbury 			ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2773dfc53baaSJoseph Salisbury 			ent->eax |= HV_MSR_RESET_AVAILABLE;
27742bc39970SVitaly Kuznetsov 			ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2775dfc53baaSJoseph Salisbury 			ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2776dfc53baaSJoseph Salisbury 			ent->eax |= HV_ACCESS_REENLIGHTENMENT;
27772be1bd3aSVitaly Kuznetsov 			ent->eax |= HV_ACCESS_TSC_INVARIANT;
27782bc39970SVitaly Kuznetsov 
2779dfc53baaSJoseph Salisbury 			ent->ebx |= HV_POST_MESSAGES;
2780dfc53baaSJoseph Salisbury 			ent->ebx |= HV_SIGNAL_EVENTS;
2781*db9cf24cSVipin Sharma 			ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
27822bc39970SVitaly Kuznetsov 
2783d8f5537aSSiddharth Chandrasekaran 			ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
27842bc39970SVitaly Kuznetsov 			ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
27852bc39970SVitaly Kuznetsov 			ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2786a073d7e3SWanpeng Li 
2787039aeb9dSLinus Torvalds 			ent->ebx |= HV_DEBUGGING;
2788f97f5a56SJon Doron 			ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2789f97f5a56SJon Doron 			ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2790f84fcb66SVitaly Kuznetsov 			ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH;
2791f97f5a56SJon Doron 
2792a073d7e3SWanpeng Li 			/*
2793a073d7e3SWanpeng Li 			 * Direct Synthetic timers only make sense with in-kernel
2794a073d7e3SWanpeng Li 			 * LAPIC
2795a073d7e3SWanpeng Li 			 */
2796c21d54f0SVitaly Kuznetsov 			if (!vcpu || lapic_in_kernel(vcpu))
27972bc39970SVitaly Kuznetsov 				ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
27982bc39970SVitaly Kuznetsov 
27992bc39970SVitaly Kuznetsov 			break;
28002bc39970SVitaly Kuznetsov 
28012bc39970SVitaly Kuznetsov 		case HYPERV_CPUID_ENLIGHTMENT_INFO:
28022bc39970SVitaly Kuznetsov 			ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
28032bc39970SVitaly Kuznetsov 			ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
28042bc39970SVitaly Kuznetsov 			ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
28052bc39970SVitaly Kuznetsov 			ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
28062bc39970SVitaly Kuznetsov 			ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2807f1adceafSVitaly Kuznetsov 			if (evmcs_ver)
28082bc39970SVitaly Kuznetsov 				ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2809b2d8b167SVitaly Kuznetsov 			if (!cpu_smt_possible())
2810b2d8b167SVitaly Kuznetsov 				ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
28110f250a64SVitaly Kuznetsov 
28120f250a64SVitaly Kuznetsov 			ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
28132bc39970SVitaly Kuznetsov 			/*
28142bc39970SVitaly Kuznetsov 			 * Default number of spinlock retry attempts, matches
28152bc39970SVitaly Kuznetsov 			 * HyperV 2016.
28162bc39970SVitaly Kuznetsov 			 */
28172bc39970SVitaly Kuznetsov 			ent->ebx = 0x00000FFF;
28182bc39970SVitaly Kuznetsov 
28192bc39970SVitaly Kuznetsov 			break;
28202bc39970SVitaly Kuznetsov 
28212bc39970SVitaly Kuznetsov 		case HYPERV_CPUID_IMPLEMENT_LIMITS:
28222bc39970SVitaly Kuznetsov 			/* Maximum number of virtual processors */
28232bc39970SVitaly Kuznetsov 			ent->eax = KVM_MAX_VCPUS;
28242bc39970SVitaly Kuznetsov 			/*
28252bc39970SVitaly Kuznetsov 			 * Maximum number of logical processors, matches
28262bc39970SVitaly Kuznetsov 			 * HyperV 2016.
28272bc39970SVitaly Kuznetsov 			 */
28282bc39970SVitaly Kuznetsov 			ent->ebx = 64;
28292bc39970SVitaly Kuznetsov 
28302bc39970SVitaly Kuznetsov 			break;
28312bc39970SVitaly Kuznetsov 
28322bc39970SVitaly Kuznetsov 		case HYPERV_CPUID_NESTED_FEATURES:
28332bc39970SVitaly Kuznetsov 			ent->eax = evmcs_ver;
2834f4de6a1fSVitaly Kuznetsov 			ent->eax |= HV_X64_NESTED_DIRECT_FLUSH;
2835502d2bf5SVitaly Kuznetsov 			ent->eax |= HV_X64_NESTED_MSR_BITMAP;
28364da77090SVitaly Kuznetsov 			ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
28372bc39970SVitaly Kuznetsov 			break;
28382bc39970SVitaly Kuznetsov 
2839f97f5a56SJon Doron 		case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2840f97f5a56SJon Doron 			memcpy(signature, "Linux KVM Hv", 12);
2841f97f5a56SJon Doron 
2842f97f5a56SJon Doron 			ent->eax = 0;
2843f97f5a56SJon Doron 			ent->ebx = signature[0];
2844f97f5a56SJon Doron 			ent->ecx = signature[1];
2845f97f5a56SJon Doron 			ent->edx = signature[2];
2846f97f5a56SJon Doron 			break;
2847f97f5a56SJon Doron 
2848f97f5a56SJon Doron 		case HYPERV_CPUID_SYNDBG_INTERFACE:
2849f97f5a56SJon Doron 			memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2850f97f5a56SJon Doron 			ent->eax = signature[0];
2851f97f5a56SJon Doron 			break;
2852f97f5a56SJon Doron 
2853f97f5a56SJon Doron 		case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2854f97f5a56SJon Doron 			ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2855f97f5a56SJon Doron 			break;
2856f97f5a56SJon Doron 
28572bc39970SVitaly Kuznetsov 		default:
28582bc39970SVitaly Kuznetsov 			break;
28592bc39970SVitaly Kuznetsov 		}
28602bc39970SVitaly Kuznetsov 	}
28612bc39970SVitaly Kuznetsov 
28622bc39970SVitaly Kuznetsov 	if (copy_to_user(entries, cpuid_entries,
28632bc39970SVitaly Kuznetsov 			 nent * sizeof(struct kvm_cpuid_entry2)))
28642bc39970SVitaly Kuznetsov 		return -EFAULT;
28652bc39970SVitaly Kuznetsov 
28662bc39970SVitaly Kuznetsov 	return 0;
28672bc39970SVitaly Kuznetsov }
2868