xref: /openbmc/linux/arch/x86/kvm/hyperv.c (revision e83d58874ba1de74c13d3c6b05f95a023c860d25)
1*e83d5887SAndrey Smetanin /*
2*e83d5887SAndrey Smetanin  * KVM Microsoft Hyper-V emulation
3*e83d5887SAndrey Smetanin  *
4*e83d5887SAndrey Smetanin  * derived from arch/x86/kvm/x86.c
5*e83d5887SAndrey Smetanin  *
6*e83d5887SAndrey Smetanin  * Copyright (C) 2006 Qumranet, Inc.
7*e83d5887SAndrey Smetanin  * Copyright (C) 2008 Qumranet, Inc.
8*e83d5887SAndrey Smetanin  * Copyright IBM Corporation, 2008
9*e83d5887SAndrey Smetanin  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10*e83d5887SAndrey Smetanin  * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
11*e83d5887SAndrey Smetanin  *
12*e83d5887SAndrey Smetanin  * Authors:
13*e83d5887SAndrey Smetanin  *   Avi Kivity   <avi@qumranet.com>
14*e83d5887SAndrey Smetanin  *   Yaniv Kamay  <yaniv@qumranet.com>
15*e83d5887SAndrey Smetanin  *   Amit Shah    <amit.shah@qumranet.com>
16*e83d5887SAndrey Smetanin  *   Ben-Ami Yassour <benami@il.ibm.com>
17*e83d5887SAndrey Smetanin  *   Andrey Smetanin <asmetanin@virtuozzo.com>
18*e83d5887SAndrey Smetanin  *
19*e83d5887SAndrey Smetanin  * This work is licensed under the terms of the GNU GPL, version 2.  See
20*e83d5887SAndrey Smetanin  * the COPYING file in the top-level directory.
21*e83d5887SAndrey Smetanin  *
22*e83d5887SAndrey Smetanin  */
23*e83d5887SAndrey Smetanin 
24*e83d5887SAndrey Smetanin #include "x86.h"
25*e83d5887SAndrey Smetanin #include "lapic.h"
26*e83d5887SAndrey Smetanin #include "hyperv.h"
27*e83d5887SAndrey Smetanin 
28*e83d5887SAndrey Smetanin #include <linux/kvm_host.h>
29*e83d5887SAndrey Smetanin #include <trace/events/kvm.h>
30*e83d5887SAndrey Smetanin 
31*e83d5887SAndrey Smetanin #include "trace.h"
32*e83d5887SAndrey Smetanin 
33*e83d5887SAndrey Smetanin static bool kvm_hv_msr_partition_wide(u32 msr)
34*e83d5887SAndrey Smetanin {
35*e83d5887SAndrey Smetanin 	bool r = false;
36*e83d5887SAndrey Smetanin 
37*e83d5887SAndrey Smetanin 	switch (msr) {
38*e83d5887SAndrey Smetanin 	case HV_X64_MSR_GUEST_OS_ID:
39*e83d5887SAndrey Smetanin 	case HV_X64_MSR_HYPERCALL:
40*e83d5887SAndrey Smetanin 	case HV_X64_MSR_REFERENCE_TSC:
41*e83d5887SAndrey Smetanin 	case HV_X64_MSR_TIME_REF_COUNT:
42*e83d5887SAndrey Smetanin 		r = true;
43*e83d5887SAndrey Smetanin 		break;
44*e83d5887SAndrey Smetanin 	}
45*e83d5887SAndrey Smetanin 
46*e83d5887SAndrey Smetanin 	return r;
47*e83d5887SAndrey Smetanin }
48*e83d5887SAndrey Smetanin 
49*e83d5887SAndrey Smetanin static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
50*e83d5887SAndrey Smetanin {
51*e83d5887SAndrey Smetanin 	struct kvm *kvm = vcpu->kvm;
52*e83d5887SAndrey Smetanin 	struct kvm_hv *hv = &kvm->arch.hyperv;
53*e83d5887SAndrey Smetanin 
54*e83d5887SAndrey Smetanin 	switch (msr) {
55*e83d5887SAndrey Smetanin 	case HV_X64_MSR_GUEST_OS_ID:
56*e83d5887SAndrey Smetanin 		hv->hv_guest_os_id = data;
57*e83d5887SAndrey Smetanin 		/* setting guest os id to zero disables hypercall page */
58*e83d5887SAndrey Smetanin 		if (!hv->hv_guest_os_id)
59*e83d5887SAndrey Smetanin 			hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
60*e83d5887SAndrey Smetanin 		break;
61*e83d5887SAndrey Smetanin 	case HV_X64_MSR_HYPERCALL: {
62*e83d5887SAndrey Smetanin 		u64 gfn;
63*e83d5887SAndrey Smetanin 		unsigned long addr;
64*e83d5887SAndrey Smetanin 		u8 instructions[4];
65*e83d5887SAndrey Smetanin 
66*e83d5887SAndrey Smetanin 		/* if guest os id is not set hypercall should remain disabled */
67*e83d5887SAndrey Smetanin 		if (!hv->hv_guest_os_id)
68*e83d5887SAndrey Smetanin 			break;
69*e83d5887SAndrey Smetanin 		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
70*e83d5887SAndrey Smetanin 			hv->hv_hypercall = data;
71*e83d5887SAndrey Smetanin 			break;
72*e83d5887SAndrey Smetanin 		}
73*e83d5887SAndrey Smetanin 		gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
74*e83d5887SAndrey Smetanin 		addr = gfn_to_hva(kvm, gfn);
75*e83d5887SAndrey Smetanin 		if (kvm_is_error_hva(addr))
76*e83d5887SAndrey Smetanin 			return 1;
77*e83d5887SAndrey Smetanin 		kvm_x86_ops->patch_hypercall(vcpu, instructions);
78*e83d5887SAndrey Smetanin 		((unsigned char *)instructions)[3] = 0xc3; /* ret */
79*e83d5887SAndrey Smetanin 		if (__copy_to_user((void __user *)addr, instructions, 4))
80*e83d5887SAndrey Smetanin 			return 1;
81*e83d5887SAndrey Smetanin 		hv->hv_hypercall = data;
82*e83d5887SAndrey Smetanin 		mark_page_dirty(kvm, gfn);
83*e83d5887SAndrey Smetanin 		break;
84*e83d5887SAndrey Smetanin 	}
85*e83d5887SAndrey Smetanin 	case HV_X64_MSR_REFERENCE_TSC: {
86*e83d5887SAndrey Smetanin 		u64 gfn;
87*e83d5887SAndrey Smetanin 		HV_REFERENCE_TSC_PAGE tsc_ref;
88*e83d5887SAndrey Smetanin 
89*e83d5887SAndrey Smetanin 		memset(&tsc_ref, 0, sizeof(tsc_ref));
90*e83d5887SAndrey Smetanin 		hv->hv_tsc_page = data;
91*e83d5887SAndrey Smetanin 		if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
92*e83d5887SAndrey Smetanin 			break;
93*e83d5887SAndrey Smetanin 		gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
94*e83d5887SAndrey Smetanin 		if (kvm_write_guest(
95*e83d5887SAndrey Smetanin 				kvm,
96*e83d5887SAndrey Smetanin 				gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
97*e83d5887SAndrey Smetanin 				&tsc_ref, sizeof(tsc_ref)))
98*e83d5887SAndrey Smetanin 			return 1;
99*e83d5887SAndrey Smetanin 		mark_page_dirty(kvm, gfn);
100*e83d5887SAndrey Smetanin 		break;
101*e83d5887SAndrey Smetanin 	}
102*e83d5887SAndrey Smetanin 	default:
103*e83d5887SAndrey Smetanin 		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
104*e83d5887SAndrey Smetanin 			    msr, data);
105*e83d5887SAndrey Smetanin 		return 1;
106*e83d5887SAndrey Smetanin 	}
107*e83d5887SAndrey Smetanin 	return 0;
108*e83d5887SAndrey Smetanin }
109*e83d5887SAndrey Smetanin 
110*e83d5887SAndrey Smetanin static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
111*e83d5887SAndrey Smetanin {
112*e83d5887SAndrey Smetanin 	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
113*e83d5887SAndrey Smetanin 
114*e83d5887SAndrey Smetanin 	switch (msr) {
115*e83d5887SAndrey Smetanin 	case HV_X64_MSR_APIC_ASSIST_PAGE: {
116*e83d5887SAndrey Smetanin 		u64 gfn;
117*e83d5887SAndrey Smetanin 		unsigned long addr;
118*e83d5887SAndrey Smetanin 
119*e83d5887SAndrey Smetanin 		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
120*e83d5887SAndrey Smetanin 			hv->hv_vapic = data;
121*e83d5887SAndrey Smetanin 			if (kvm_lapic_enable_pv_eoi(vcpu, 0))
122*e83d5887SAndrey Smetanin 				return 1;
123*e83d5887SAndrey Smetanin 			break;
124*e83d5887SAndrey Smetanin 		}
125*e83d5887SAndrey Smetanin 		gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
126*e83d5887SAndrey Smetanin 		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
127*e83d5887SAndrey Smetanin 		if (kvm_is_error_hva(addr))
128*e83d5887SAndrey Smetanin 			return 1;
129*e83d5887SAndrey Smetanin 		if (__clear_user((void __user *)addr, PAGE_SIZE))
130*e83d5887SAndrey Smetanin 			return 1;
131*e83d5887SAndrey Smetanin 		hv->hv_vapic = data;
132*e83d5887SAndrey Smetanin 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
133*e83d5887SAndrey Smetanin 		if (kvm_lapic_enable_pv_eoi(vcpu,
134*e83d5887SAndrey Smetanin 					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
135*e83d5887SAndrey Smetanin 			return 1;
136*e83d5887SAndrey Smetanin 		break;
137*e83d5887SAndrey Smetanin 	}
138*e83d5887SAndrey Smetanin 	case HV_X64_MSR_EOI:
139*e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
140*e83d5887SAndrey Smetanin 	case HV_X64_MSR_ICR:
141*e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
142*e83d5887SAndrey Smetanin 	case HV_X64_MSR_TPR:
143*e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
144*e83d5887SAndrey Smetanin 	default:
145*e83d5887SAndrey Smetanin 		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
146*e83d5887SAndrey Smetanin 			    msr, data);
147*e83d5887SAndrey Smetanin 		return 1;
148*e83d5887SAndrey Smetanin 	}
149*e83d5887SAndrey Smetanin 
150*e83d5887SAndrey Smetanin 	return 0;
151*e83d5887SAndrey Smetanin }
152*e83d5887SAndrey Smetanin 
153*e83d5887SAndrey Smetanin static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
154*e83d5887SAndrey Smetanin {
155*e83d5887SAndrey Smetanin 	u64 data = 0;
156*e83d5887SAndrey Smetanin 	struct kvm *kvm = vcpu->kvm;
157*e83d5887SAndrey Smetanin 	struct kvm_hv *hv = &kvm->arch.hyperv;
158*e83d5887SAndrey Smetanin 
159*e83d5887SAndrey Smetanin 	switch (msr) {
160*e83d5887SAndrey Smetanin 	case HV_X64_MSR_GUEST_OS_ID:
161*e83d5887SAndrey Smetanin 		data = hv->hv_guest_os_id;
162*e83d5887SAndrey Smetanin 		break;
163*e83d5887SAndrey Smetanin 	case HV_X64_MSR_HYPERCALL:
164*e83d5887SAndrey Smetanin 		data = hv->hv_hypercall;
165*e83d5887SAndrey Smetanin 		break;
166*e83d5887SAndrey Smetanin 	case HV_X64_MSR_TIME_REF_COUNT: {
167*e83d5887SAndrey Smetanin 		data =
168*e83d5887SAndrey Smetanin 		     div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
169*e83d5887SAndrey Smetanin 		break;
170*e83d5887SAndrey Smetanin 	}
171*e83d5887SAndrey Smetanin 	case HV_X64_MSR_REFERENCE_TSC:
172*e83d5887SAndrey Smetanin 		data = hv->hv_tsc_page;
173*e83d5887SAndrey Smetanin 		break;
174*e83d5887SAndrey Smetanin 	default:
175*e83d5887SAndrey Smetanin 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
176*e83d5887SAndrey Smetanin 		return 1;
177*e83d5887SAndrey Smetanin 	}
178*e83d5887SAndrey Smetanin 
179*e83d5887SAndrey Smetanin 	*pdata = data;
180*e83d5887SAndrey Smetanin 	return 0;
181*e83d5887SAndrey Smetanin }
182*e83d5887SAndrey Smetanin 
183*e83d5887SAndrey Smetanin static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
184*e83d5887SAndrey Smetanin {
185*e83d5887SAndrey Smetanin 	u64 data = 0;
186*e83d5887SAndrey Smetanin 	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
187*e83d5887SAndrey Smetanin 
188*e83d5887SAndrey Smetanin 	switch (msr) {
189*e83d5887SAndrey Smetanin 	case HV_X64_MSR_VP_INDEX: {
190*e83d5887SAndrey Smetanin 		int r;
191*e83d5887SAndrey Smetanin 		struct kvm_vcpu *v;
192*e83d5887SAndrey Smetanin 
193*e83d5887SAndrey Smetanin 		kvm_for_each_vcpu(r, v, vcpu->kvm) {
194*e83d5887SAndrey Smetanin 			if (v == vcpu) {
195*e83d5887SAndrey Smetanin 				data = r;
196*e83d5887SAndrey Smetanin 				break;
197*e83d5887SAndrey Smetanin 			}
198*e83d5887SAndrey Smetanin 		}
199*e83d5887SAndrey Smetanin 		break;
200*e83d5887SAndrey Smetanin 	}
201*e83d5887SAndrey Smetanin 	case HV_X64_MSR_EOI:
202*e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
203*e83d5887SAndrey Smetanin 	case HV_X64_MSR_ICR:
204*e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
205*e83d5887SAndrey Smetanin 	case HV_X64_MSR_TPR:
206*e83d5887SAndrey Smetanin 		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
207*e83d5887SAndrey Smetanin 	case HV_X64_MSR_APIC_ASSIST_PAGE:
208*e83d5887SAndrey Smetanin 		data = hv->hv_vapic;
209*e83d5887SAndrey Smetanin 		break;
210*e83d5887SAndrey Smetanin 	default:
211*e83d5887SAndrey Smetanin 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
212*e83d5887SAndrey Smetanin 		return 1;
213*e83d5887SAndrey Smetanin 	}
214*e83d5887SAndrey Smetanin 	*pdata = data;
215*e83d5887SAndrey Smetanin 	return 0;
216*e83d5887SAndrey Smetanin }
217*e83d5887SAndrey Smetanin 
218*e83d5887SAndrey Smetanin int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
219*e83d5887SAndrey Smetanin {
220*e83d5887SAndrey Smetanin 	if (kvm_hv_msr_partition_wide(msr)) {
221*e83d5887SAndrey Smetanin 		int r;
222*e83d5887SAndrey Smetanin 
223*e83d5887SAndrey Smetanin 		mutex_lock(&vcpu->kvm->lock);
224*e83d5887SAndrey Smetanin 		r = kvm_hv_set_msr_pw(vcpu, msr, data);
225*e83d5887SAndrey Smetanin 		mutex_unlock(&vcpu->kvm->lock);
226*e83d5887SAndrey Smetanin 		return r;
227*e83d5887SAndrey Smetanin 	} else
228*e83d5887SAndrey Smetanin 		return kvm_hv_set_msr(vcpu, msr, data);
229*e83d5887SAndrey Smetanin }
230*e83d5887SAndrey Smetanin 
231*e83d5887SAndrey Smetanin int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
232*e83d5887SAndrey Smetanin {
233*e83d5887SAndrey Smetanin 	if (kvm_hv_msr_partition_wide(msr)) {
234*e83d5887SAndrey Smetanin 		int r;
235*e83d5887SAndrey Smetanin 
236*e83d5887SAndrey Smetanin 		mutex_lock(&vcpu->kvm->lock);
237*e83d5887SAndrey Smetanin 		r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
238*e83d5887SAndrey Smetanin 		mutex_unlock(&vcpu->kvm->lock);
239*e83d5887SAndrey Smetanin 		return r;
240*e83d5887SAndrey Smetanin 	} else
241*e83d5887SAndrey Smetanin 		return kvm_hv_get_msr(vcpu, msr, pdata);
242*e83d5887SAndrey Smetanin }
243*e83d5887SAndrey Smetanin 
244*e83d5887SAndrey Smetanin bool kvm_hv_hypercall_enabled(struct kvm *kvm)
245*e83d5887SAndrey Smetanin {
246*e83d5887SAndrey Smetanin 	return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
247*e83d5887SAndrey Smetanin }
248*e83d5887SAndrey Smetanin 
249*e83d5887SAndrey Smetanin int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
250*e83d5887SAndrey Smetanin {
251*e83d5887SAndrey Smetanin 	u64 param, ingpa, outgpa, ret;
252*e83d5887SAndrey Smetanin 	uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
253*e83d5887SAndrey Smetanin 	bool fast, longmode;
254*e83d5887SAndrey Smetanin 
255*e83d5887SAndrey Smetanin 	/*
256*e83d5887SAndrey Smetanin 	 * hypercall generates UD from non zero cpl and real mode
257*e83d5887SAndrey Smetanin 	 * per HYPER-V spec
258*e83d5887SAndrey Smetanin 	 */
259*e83d5887SAndrey Smetanin 	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
260*e83d5887SAndrey Smetanin 		kvm_queue_exception(vcpu, UD_VECTOR);
261*e83d5887SAndrey Smetanin 		return 0;
262*e83d5887SAndrey Smetanin 	}
263*e83d5887SAndrey Smetanin 
264*e83d5887SAndrey Smetanin 	longmode = is_64_bit_mode(vcpu);
265*e83d5887SAndrey Smetanin 
266*e83d5887SAndrey Smetanin 	if (!longmode) {
267*e83d5887SAndrey Smetanin 		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
268*e83d5887SAndrey Smetanin 			(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
269*e83d5887SAndrey Smetanin 		ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
270*e83d5887SAndrey Smetanin 			(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
271*e83d5887SAndrey Smetanin 		outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
272*e83d5887SAndrey Smetanin 			(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
273*e83d5887SAndrey Smetanin 	}
274*e83d5887SAndrey Smetanin #ifdef CONFIG_X86_64
275*e83d5887SAndrey Smetanin 	else {
276*e83d5887SAndrey Smetanin 		param = kvm_register_read(vcpu, VCPU_REGS_RCX);
277*e83d5887SAndrey Smetanin 		ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
278*e83d5887SAndrey Smetanin 		outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
279*e83d5887SAndrey Smetanin 	}
280*e83d5887SAndrey Smetanin #endif
281*e83d5887SAndrey Smetanin 
282*e83d5887SAndrey Smetanin 	code = param & 0xffff;
283*e83d5887SAndrey Smetanin 	fast = (param >> 16) & 0x1;
284*e83d5887SAndrey Smetanin 	rep_cnt = (param >> 32) & 0xfff;
285*e83d5887SAndrey Smetanin 	rep_idx = (param >> 48) & 0xfff;
286*e83d5887SAndrey Smetanin 
287*e83d5887SAndrey Smetanin 	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
288*e83d5887SAndrey Smetanin 
289*e83d5887SAndrey Smetanin 	switch (code) {
290*e83d5887SAndrey Smetanin 	case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
291*e83d5887SAndrey Smetanin 		kvm_vcpu_on_spin(vcpu);
292*e83d5887SAndrey Smetanin 		break;
293*e83d5887SAndrey Smetanin 	default:
294*e83d5887SAndrey Smetanin 		res = HV_STATUS_INVALID_HYPERCALL_CODE;
295*e83d5887SAndrey Smetanin 		break;
296*e83d5887SAndrey Smetanin 	}
297*e83d5887SAndrey Smetanin 
298*e83d5887SAndrey Smetanin 	ret = res | (((u64)rep_done & 0xfff) << 32);
299*e83d5887SAndrey Smetanin 	if (longmode) {
300*e83d5887SAndrey Smetanin 		kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
301*e83d5887SAndrey Smetanin 	} else {
302*e83d5887SAndrey Smetanin 		kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
303*e83d5887SAndrey Smetanin 		kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
304*e83d5887SAndrey Smetanin 	}
305*e83d5887SAndrey Smetanin 
306*e83d5887SAndrey Smetanin 	return 1;
307*e83d5887SAndrey Smetanin }
308