1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vmx_tsc_adjust_test 4 * 5 * Copyright (C) 2018, Google LLC. 6 * 7 * IA32_TSC_ADJUST test 8 * 9 * According to the SDM, "if an execution of WRMSR to the 10 * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC, 11 * the logical processor also adds (or subtracts) value X from the 12 * IA32_TSC_ADJUST MSR. 13 * 14 * Note that when L1 doesn't intercept writes to IA32_TSC, a 15 * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC 16 * value. 17 * 18 * This test verifies that this unusual case is handled correctly. 19 */ 20 21 #include "test_util.h" 22 #include "kvm_util.h" 23 #include "processor.h" 24 #include "vmx.h" 25 26 #include <string.h> 27 #include <sys/ioctl.h> 28 29 #include "kselftest.h" 30 31 #ifndef MSR_IA32_TSC_ADJUST 32 #define MSR_IA32_TSC_ADJUST 0x3b 33 #endif 34 35 #define PAGE_SIZE 4096 36 #define VCPU_ID 5 37 38 #define TSC_ADJUST_VALUE (1ll << 32) 39 #define TSC_OFFSET_VALUE -(1ll << 48) 40 41 enum { 42 PORT_ABORT = 0x1000, 43 PORT_REPORT, 44 PORT_DONE, 45 }; 46 47 enum { 48 VMXON_PAGE = 0, 49 VMCS_PAGE, 50 MSR_BITMAP_PAGE, 51 52 NUM_VMX_PAGES, 53 }; 54 55 struct kvm_single_msr { 56 struct kvm_msrs header; 57 struct kvm_msr_entry entry; 58 } __attribute__((packed)); 59 60 /* The virtual machine object. */ 61 static struct kvm_vm *vm; 62 63 static void check_ia32_tsc_adjust(int64_t max) 64 { 65 int64_t adjust; 66 67 adjust = rdmsr(MSR_IA32_TSC_ADJUST); 68 GUEST_SYNC(adjust); 69 GUEST_ASSERT(adjust <= max); 70 } 71 72 static void l2_guest_code(void) 73 { 74 uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE; 75 76 wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE); 77 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); 78 79 /* Exit to L1 */ 80 __asm__ __volatile__("vmcall"); 81 } 82 83 static void l1_guest_code(struct vmx_pages *vmx_pages) 84 { 85 #define L2_GUEST_STACK_SIZE 64 86 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 87 uint32_t control; 88 uintptr_t save_cr3; 89 90 GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE); 91 wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE); 92 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); 93 94 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 95 GUEST_ASSERT(load_vmcs(vmx_pages)); 96 97 /* Prepare the VMCS for L2 execution. */ 98 prepare_vmcs(vmx_pages, l2_guest_code, 99 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 100 control = vmreadz(CPU_BASED_VM_EXEC_CONTROL); 101 control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING; 102 vmwrite(CPU_BASED_VM_EXEC_CONTROL, control); 103 vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE); 104 105 /* Jump into L2. First, test failure to load guest CR3. */ 106 save_cr3 = vmreadz(GUEST_CR3); 107 vmwrite(GUEST_CR3, -1ull); 108 GUEST_ASSERT(!vmlaunch()); 109 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == 110 (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE)); 111 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE); 112 vmwrite(GUEST_CR3, save_cr3); 113 114 GUEST_ASSERT(!vmlaunch()); 115 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); 116 117 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE); 118 119 GUEST_DONE(); 120 } 121 122 static void report(int64_t val) 123 { 124 printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n", 125 val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE); 126 } 127 128 int main(int argc, char *argv[]) 129 { 130 vm_vaddr_t vmx_pages_gva; 131 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 132 133 if (!(entry->ecx & CPUID_VMX)) { 134 fprintf(stderr, "nested VMX not enabled, skipping test\n"); 135 exit(KSFT_SKIP); 136 } 137 138 vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); 139 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 140 141 /* Allocate VMX pages and shared descriptors (vmx_pages). */ 142 vcpu_alloc_vmx(vm, &vmx_pages_gva); 143 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); 144 145 for (;;) { 146 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); 147 struct ucall uc; 148 149 vcpu_run(vm, VCPU_ID); 150 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 151 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 152 run->exit_reason, 153 exit_reason_str(run->exit_reason)); 154 155 switch (get_ucall(vm, VCPU_ID, &uc)) { 156 case UCALL_ABORT: 157 TEST_ASSERT(false, "%s", (const char *)uc.args[0]); 158 /* NOT REACHED */ 159 case UCALL_SYNC: 160 report(uc.args[1]); 161 break; 162 case UCALL_DONE: 163 goto done; 164 default: 165 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); 166 } 167 } 168 169 kvm_vm_free(vm); 170 done: 171 return 0; 172 } 173