1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Tests for MSR_IA32_TSC and MSR_IA32_TSC_ADJUST.
4  *
5  * Copyright (C) 2020, Red Hat, Inc.
6  */
7 #include <stdio.h>
8 #include <string.h>
9 #include "kvm_util.h"
10 #include "processor.h"
11 
12 #define VCPU_ID 0
13 
14 #define UNITY                  (1ull << 30)
15 #define HOST_ADJUST            (UNITY * 64)
16 #define GUEST_STEP             (UNITY * 4)
17 #define ROUND(x)               ((x + UNITY / 2) & -UNITY)
18 #define rounded_rdmsr(x)       ROUND(rdmsr(x))
19 #define rounded_host_rdmsr(x)  ROUND(vcpu_get_msr(vm, 0, x))
20 
21 #define GUEST_ASSERT_EQ(a, b) do {				\
22 	__typeof(a) _a = (a);					\
23 	__typeof(b) _b = (b);					\
24 	if (_a != _b)						\
25                 ucall(UCALL_ABORT, 4,				\
26                         "Failed guest assert: "			\
27                         #a " == " #b, __LINE__, _a, _b);	\
28   } while(0)
29 
30 static void guest_code(void)
31 {
32 	u64 val = 0;
33 
34 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
35 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
36 
37 	/* Guest: writes to MSR_IA32_TSC affect both MSRs.  */
38 	val = 1ull * GUEST_STEP;
39 	wrmsr(MSR_IA32_TSC, val);
40 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
41 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
42 
43 	/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs.  */
44 	GUEST_SYNC(2);
45 	val = 2ull * GUEST_STEP;
46 	wrmsr(MSR_IA32_TSC_ADJUST, val);
47 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
48 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
49 
50 	/* Host: setting the TSC offset.  */
51 	GUEST_SYNC(3);
52 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
53 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
54 
55 	/*
56 	 * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
57 	 * host-side offset and affect both MSRs.
58 	 */
59 	GUEST_SYNC(4);
60 	val = 3ull * GUEST_STEP;
61 	wrmsr(MSR_IA32_TSC_ADJUST, val);
62 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
63 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val);
64 
65 	/*
66 	 * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
67 	 * offset is now visible in MSR_IA32_TSC_ADJUST.
68 	 */
69 	GUEST_SYNC(5);
70 	val = 4ull * GUEST_STEP;
71 	wrmsr(MSR_IA32_TSC, val);
72 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC), val);
73 	GUEST_ASSERT_EQ(rounded_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
74 
75 	GUEST_DONE();
76 }
77 
78 static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
79 {
80 	struct ucall uc;
81 
82 	vcpu_args_set(vm, vcpuid, 1, vcpuid);
83 
84 	vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
85 
86 	switch (get_ucall(vm, vcpuid, &uc)) {
87 	case UCALL_SYNC:
88 		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
89                             uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx",
90                             stage + 1, (ulong)uc.args[1]);
91 		return;
92 	case UCALL_DONE:
93 		return;
94 	case UCALL_ABORT:
95 		TEST_ASSERT(false, "%s at %s:%ld\n" \
96 			    "\tvalues: %#lx, %#lx", (const char *)uc.args[0],
97 			    __FILE__, uc.args[1], uc.args[2], uc.args[3]);
98 	default:
99 		TEST_ASSERT(false, "Unexpected exit: %s",
100 			    exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
101 	}
102 }
103 
104 int main(void)
105 {
106 	struct kvm_vm *vm;
107 	uint64_t val;
108 
109 	vm = vm_create_default(VCPU_ID, 0, guest_code);
110 
111 	val = 0;
112 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
113 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
114 
115 	/* Guest: writes to MSR_IA32_TSC affect both MSRs.  */
116 	run_vcpu(vm, VCPU_ID, 1);
117 	val = 1ull * GUEST_STEP;
118 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
119 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
120 
121 	/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs.  */
122 	run_vcpu(vm, VCPU_ID, 2);
123 	val = 2ull * GUEST_STEP;
124 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
125 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
126 
127 	/*
128 	 * Host: writes to MSR_IA32_TSC set the host-side offset
129 	 * and therefore do not change MSR_IA32_TSC_ADJUST.
130 	 */
131 	vcpu_set_msr(vm, 0, MSR_IA32_TSC, HOST_ADJUST + val);
132 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
133 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
134 	run_vcpu(vm, VCPU_ID, 3);
135 
136 	/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC.  */
137 	vcpu_set_msr(vm, 0, MSR_IA32_TSC_ADJUST, UNITY * 123456);
138 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
139 	ASSERT_EQ(vcpu_get_msr(vm, 0, MSR_IA32_TSC_ADJUST), UNITY * 123456);
140 
141 	/* Restore previous value.  */
142 	vcpu_set_msr(vm, 0, MSR_IA32_TSC_ADJUST, val);
143 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
144 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
145 
146 	/*
147 	 * Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
148 	 * host-side offset and affect both MSRs.
149 	 */
150 	run_vcpu(vm, VCPU_ID, 4);
151 	val = 3ull * GUEST_STEP;
152 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
153 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
154 
155 	/*
156 	 * Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
157 	 * offset is now visible in MSR_IA32_TSC_ADJUST.
158 	 */
159 	run_vcpu(vm, VCPU_ID, 5);
160 	val = 4ull * GUEST_STEP;
161 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
162 	ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
163 
164 	kvm_vm_free(vm);
165 
166 	return 0;
167 }
168