1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMX-preemption timer test 4 * 5 * Copyright (C) 2020, Google, LLC. 6 * 7 * Test to ensure the VM-Enter after migration doesn't 8 * incorrectly restarts the timer with the full timer 9 * value instead of partially decayed timer value 10 * 11 */ 12 #define _GNU_SOURCE /* for program_invocation_short_name */ 13 #include <fcntl.h> 14 #include <stdio.h> 15 #include <stdlib.h> 16 #include <string.h> 17 #include <sys/ioctl.h> 18 19 #include "test_util.h" 20 21 #include "kvm_util.h" 22 #include "processor.h" 23 #include "vmx.h" 24 25 #define PREEMPTION_TIMER_VALUE 100000000ull 26 #define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull 27 28 u32 vmx_pt_rate; 29 bool l2_save_restore_done; 30 static u64 l2_vmx_pt_start; 31 volatile u64 l2_vmx_pt_finish; 32 33 union vmx_basic basic; 34 union vmx_ctrl_msr ctrl_pin_rev; 35 union vmx_ctrl_msr ctrl_exit_rev; 36 37 void l2_guest_code(void) 38 { 39 u64 vmx_pt_delta; 40 41 vmcall(); 42 l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate; 43 44 /* 45 * Wait until the 1st threshold has passed 46 */ 47 do { 48 l2_vmx_pt_finish = rdtsc(); 49 vmx_pt_delta = (l2_vmx_pt_finish - l2_vmx_pt_start) >> 50 vmx_pt_rate; 51 } while (vmx_pt_delta < PREEMPTION_TIMER_VALUE_THRESHOLD1); 52 53 /* 54 * Force L2 through Save and Restore cycle 55 */ 56 GUEST_SYNC(1); 57 58 l2_save_restore_done = 1; 59 60 /* 61 * Now wait for the preemption timer to fire and 62 * exit to L1 63 */ 64 while ((l2_vmx_pt_finish = rdtsc())) 65 ; 66 } 67 68 void l1_guest_code(struct vmx_pages *vmx_pages) 69 { 70 #define L2_GUEST_STACK_SIZE 64 71 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 72 u64 l1_vmx_pt_start; 73 u64 l1_vmx_pt_finish; 74 u64 l1_tsc_deadline, l2_tsc_deadline; 75 76 GUEST_ASSERT(vmx_pages->vmcs_gpa); 77 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 78 GUEST_ASSERT(load_vmcs(vmx_pages)); 79 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); 80 81 prepare_vmcs(vmx_pages, l2_guest_code, 82 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 83 84 /* 85 * Check for Preemption timer support 86 */ 87 basic.val = rdmsr(MSR_IA32_VMX_BASIC); 88 ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PINBASED_CTLS 89 : MSR_IA32_VMX_PINBASED_CTLS); 90 ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT_CTLS 91 : MSR_IA32_VMX_EXIT_CTLS); 92 93 if (!(ctrl_pin_rev.clr & PIN_BASED_VMX_PREEMPTION_TIMER) || 94 !(ctrl_exit_rev.clr & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)) 95 return; 96 97 GUEST_ASSERT(!vmlaunch()); 98 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 99 vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN)); 100 101 /* 102 * Turn on PIN control and resume the guest 103 */ 104 GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL, 105 vmreadz(PIN_BASED_VM_EXEC_CONTROL) | 106 PIN_BASED_VMX_PREEMPTION_TIMER)); 107 108 GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE, 109 PREEMPTION_TIMER_VALUE)); 110 111 vmx_pt_rate = rdmsr(MSR_IA32_VMX_MISC) & 0x1F; 112 113 l2_save_restore_done = 0; 114 115 l1_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate; 116 117 GUEST_ASSERT(!vmresume()); 118 119 l1_vmx_pt_finish = rdtsc(); 120 121 /* 122 * Ensure exit from L2 happens after L2 goes through 123 * save and restore 124 */ 125 GUEST_ASSERT(l2_save_restore_done); 126 127 /* 128 * Ensure the exit from L2 is due to preemption timer expiry 129 */ 130 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_PREEMPTION_TIMER); 131 132 l1_tsc_deadline = l1_vmx_pt_start + 133 (PREEMPTION_TIMER_VALUE << vmx_pt_rate); 134 135 l2_tsc_deadline = l2_vmx_pt_start + 136 (PREEMPTION_TIMER_VALUE << vmx_pt_rate); 137 138 /* 139 * Sync with the host and pass the l1|l2 pt_expiry_finish times and 140 * tsc deadlines so that host can verify they are as expected 141 */ 142 GUEST_SYNC_ARGS(2, l1_vmx_pt_finish, l1_tsc_deadline, 143 l2_vmx_pt_finish, l2_tsc_deadline); 144 } 145 146 void guest_code(struct vmx_pages *vmx_pages) 147 { 148 if (vmx_pages) 149 l1_guest_code(vmx_pages); 150 151 GUEST_DONE(); 152 } 153 154 int main(int argc, char *argv[]) 155 { 156 vm_vaddr_t vmx_pages_gva = 0; 157 158 struct kvm_regs regs1, regs2; 159 struct kvm_vm *vm; 160 struct kvm_run *run; 161 struct kvm_vcpu *vcpu; 162 struct kvm_x86_state *state; 163 struct ucall uc; 164 int stage; 165 166 /* 167 * AMD currently does not implement any VMX features, so for now we 168 * just early out. 169 */ 170 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); 171 172 TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE)); 173 174 /* Create VM */ 175 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 176 run = vcpu->run; 177 178 vcpu_regs_get(vcpu, ®s1); 179 180 vcpu_alloc_vmx(vm, &vmx_pages_gva); 181 vcpu_args_set(vcpu, 1, vmx_pages_gva); 182 183 for (stage = 1;; stage++) { 184 vcpu_run(vcpu); 185 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 186 "Stage %d: unexpected exit reason: %u (%s),\n", 187 stage, run->exit_reason, 188 exit_reason_str(run->exit_reason)); 189 190 switch (get_ucall(vcpu, &uc)) { 191 case UCALL_ABORT: 192 REPORT_GUEST_ASSERT(uc); 193 /* NOT REACHED */ 194 case UCALL_SYNC: 195 break; 196 case UCALL_DONE: 197 goto done; 198 default: 199 TEST_FAIL("Unknown ucall %lu", uc.cmd); 200 } 201 202 /* UCALL_SYNC is handled here. */ 203 TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && 204 uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx", 205 stage, (ulong)uc.args[1]); 206 /* 207 * If this stage 2 then we should verify the vmx pt expiry 208 * is as expected. 209 * From L1's perspective verify Preemption timer hasn't 210 * expired too early. 211 * From L2's perspective verify Preemption timer hasn't 212 * expired too late. 213 */ 214 if (stage == 2) { 215 216 pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n", 217 stage, uc.args[2], uc.args[3]); 218 219 pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n", 220 stage, uc.args[4], uc.args[5]); 221 222 TEST_ASSERT(uc.args[2] >= uc.args[3], 223 "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)", 224 stage, uc.args[2], uc.args[3]); 225 226 TEST_ASSERT(uc.args[4] < uc.args[5], 227 "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)", 228 stage, uc.args[4], uc.args[5]); 229 } 230 231 state = vcpu_save_state(vcpu); 232 memset(®s1, 0, sizeof(regs1)); 233 vcpu_regs_get(vcpu, ®s1); 234 235 kvm_vm_release(vm); 236 237 /* Restore state in a new VM. */ 238 vcpu = vm_recreate_with_one_vcpu(vm); 239 vcpu_load_state(vcpu, state); 240 run = vcpu->run; 241 kvm_x86_state_cleanup(state); 242 243 memset(®s2, 0, sizeof(regs2)); 244 vcpu_regs_get(vcpu, ®s2); 245 TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)), 246 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", 247 (ulong) regs2.rdi, (ulong) regs2.rsi); 248 } 249 250 done: 251 kvm_vm_free(vm); 252 } 253