1 // SPDX-License-Identifier: GPL-2.0-only 2 #define _GNU_SOURCE /* for program_invocation_short_name */ 3 4 #include "test_util.h" 5 #include "kvm_util.h" 6 #include "processor.h" 7 #include "vmx.h" 8 #include "svm_util.h" 9 10 #define L2_GUEST_STACK_SIZE 256 11 12 /* 13 * Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with 14 * the "real" exceptions used, #SS/#GP/#DF (12/13/8). 15 */ 16 #define FAKE_TRIPLE_FAULT_VECTOR 0xaa 17 18 /* Arbitrary 32-bit error code injected by this test. */ 19 #define SS_ERROR_CODE 0xdeadbeef 20 21 /* 22 * Bit '0' is set on Intel if the exception occurs while delivering a previous 23 * event/exception. AMD's wording is ambiguous, but presumably the bit is set 24 * if the exception occurs while delivering an external event, e.g. NMI or INTR, 25 * but not for exceptions that occur when delivering other exceptions or 26 * software interrupts. 27 * 28 * Note, Intel's name for it, "External event", is misleading and much more 29 * aligned with AMD's behavior, but the SDM is quite clear on its behavior. 30 */ 31 #define ERROR_CODE_EXT_FLAG BIT(0) 32 33 /* 34 * Bit '1' is set if the fault occurred when looking up a descriptor in the 35 * IDT, which is the case here as the IDT is empty/NULL. 36 */ 37 #define ERROR_CODE_IDT_FLAG BIT(1) 38 39 /* 40 * The #GP that occurs when vectoring #SS should show the index into the IDT 41 * for #SS, plus have the "IDT flag" set. 42 */ 43 #define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG) 44 #define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG) 45 46 /* 47 * Intel and AMD both shove '0' into the error code on #DF, regardless of what 48 * led to the double fault. 49 */ 50 #define DF_ERROR_CODE 0 51 52 #define INTERCEPT_SS (BIT_ULL(SS_VECTOR)) 53 #define INTERCEPT_SS_DF (INTERCEPT_SS | BIT_ULL(DF_VECTOR)) 54 #define INTERCEPT_SS_GP_DF (INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR)) 55 56 static void l2_ss_pending_test(void) 57 { 58 GUEST_SYNC(SS_VECTOR); 59 } 60 61 static void l2_ss_injected_gp_test(void) 62 { 63 GUEST_SYNC(GP_VECTOR); 64 } 65 66 static void l2_ss_injected_df_test(void) 67 { 68 GUEST_SYNC(DF_VECTOR); 69 } 70 71 static void l2_ss_injected_tf_test(void) 72 { 73 GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR); 74 } 75 76 static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector, 77 uint32_t error_code) 78 { 79 struct vmcb *vmcb = svm->vmcb; 80 struct vmcb_control_area *ctrl = &vmcb->control; 81 82 vmcb->save.rip = (u64)l2_code; 83 run_guest(vmcb, svm->vmcb_gpa); 84 85 if (vector == FAKE_TRIPLE_FAULT_VECTOR) 86 return; 87 88 GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector)); 89 GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code); 90 } 91 92 static void l1_svm_code(struct svm_test_data *svm) 93 { 94 struct vmcb_control_area *ctrl = &svm->vmcb->control; 95 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 96 97 generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]); 98 svm->vmcb->save.idtr.limit = 0; 99 ctrl->intercept |= BIT_ULL(INTERCEPT_SHUTDOWN); 100 101 ctrl->intercept_exceptions = INTERCEPT_SS_GP_DF; 102 svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE); 103 svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD); 104 105 ctrl->intercept_exceptions = INTERCEPT_SS_DF; 106 svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE); 107 108 ctrl->intercept_exceptions = INTERCEPT_SS; 109 svm_run_l2(svm, l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0); 110 GUEST_ASSERT_EQ(ctrl->exit_code, SVM_EXIT_SHUTDOWN); 111 112 GUEST_DONE(); 113 } 114 115 static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code) 116 { 117 GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code)); 118 119 GUEST_ASSERT_EQ(vector == SS_VECTOR ? vmlaunch() : vmresume(), 0); 120 121 if (vector == FAKE_TRIPLE_FAULT_VECTOR) 122 return; 123 124 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI); 125 GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector); 126 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code); 127 } 128 129 static void l1_vmx_code(struct vmx_pages *vmx) 130 { 131 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 132 133 GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true); 134 135 GUEST_ASSERT_EQ(load_vmcs(vmx), true); 136 137 prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]); 138 GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT, 0), 0); 139 140 /* 141 * VMX disallows injecting an exception with error_code[31:16] != 0, 142 * and hardware will never generate a VM-Exit with bits 31:16 set. 143 * KVM should likewise truncate the "bad" userspace value. 144 */ 145 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_GP_DF), 0); 146 vmx_run_l2(l2_ss_pending_test, SS_VECTOR, (u16)SS_ERROR_CODE); 147 vmx_run_l2(l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_INTEL); 148 149 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_DF), 0); 150 vmx_run_l2(l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE); 151 152 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS), 0); 153 vmx_run_l2(l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0); 154 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_TRIPLE_FAULT); 155 156 GUEST_DONE(); 157 } 158 159 static void __attribute__((__flatten__)) l1_guest_code(void *test_data) 160 { 161 if (this_cpu_has(X86_FEATURE_SVM)) 162 l1_svm_code(test_data); 163 else 164 l1_vmx_code(test_data); 165 } 166 167 static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector) 168 { 169 struct kvm_run *run = vcpu->run; 170 struct ucall uc; 171 172 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 173 "Unexpected exit reason: %u (%s),\n", 174 run->exit_reason, exit_reason_str(run->exit_reason)); 175 176 switch (get_ucall(vcpu, &uc)) { 177 case UCALL_SYNC: 178 TEST_ASSERT(vector == uc.args[1], 179 "Expected L2 to ask for %d, got %ld", vector, uc.args[1]); 180 break; 181 case UCALL_DONE: 182 TEST_ASSERT(vector == -1, 183 "Expected L2 to ask for %d, L2 says it's done", vector); 184 break; 185 case UCALL_ABORT: 186 TEST_FAIL("%s at %s:%ld (0x%lx != 0x%lx)", 187 (const char *)uc.args[0], __FILE__, uc.args[1], 188 uc.args[2], uc.args[3]); 189 break; 190 default: 191 TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd); 192 } 193 } 194 195 static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject) 196 { 197 struct kvm_vcpu_events events; 198 199 vcpu_events_get(vcpu, &events); 200 201 TEST_ASSERT(!events.exception.pending, 202 "Vector %d unexpectedlt pending", events.exception.nr); 203 TEST_ASSERT(!events.exception.injected, 204 "Vector %d unexpectedly injected", events.exception.nr); 205 206 events.flags = KVM_VCPUEVENT_VALID_PAYLOAD; 207 events.exception.pending = !inject; 208 events.exception.injected = inject; 209 events.exception.nr = SS_VECTOR; 210 events.exception.has_error_code = true; 211 events.exception.error_code = SS_ERROR_CODE; 212 vcpu_events_set(vcpu, &events); 213 } 214 215 /* 216 * Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions 217 * when an exception is being queued for L2. Specifically, verify that KVM 218 * honors L1 exception intercept controls when a #SS is pending/injected, 219 * triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted 220 * by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1. 221 */ 222 int main(int argc, char *argv[]) 223 { 224 vm_vaddr_t nested_test_data_gva; 225 struct kvm_vcpu_events events; 226 struct kvm_vcpu *vcpu; 227 struct kvm_vm *vm; 228 229 TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD)); 230 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX)); 231 232 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 233 vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul); 234 235 if (kvm_cpu_has(X86_FEATURE_SVM)) 236 vcpu_alloc_svm(vm, &nested_test_data_gva); 237 else 238 vcpu_alloc_vmx(vm, &nested_test_data_gva); 239 240 vcpu_args_set(vcpu, 1, nested_test_data_gva); 241 242 /* Run L1 => L2. L2 should sync and request #SS. */ 243 vcpu_run(vcpu); 244 assert_ucall_vector(vcpu, SS_VECTOR); 245 246 /* Pend #SS and request immediate exit. #SS should still be pending. */ 247 queue_ss_exception(vcpu, false); 248 vcpu->run->immediate_exit = true; 249 vcpu_run_complete_io(vcpu); 250 251 /* Verify the pending events comes back out the same as it went in. */ 252 vcpu_events_get(vcpu, &events); 253 ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD, 254 KVM_VCPUEVENT_VALID_PAYLOAD); 255 ASSERT_EQ(events.exception.pending, true); 256 ASSERT_EQ(events.exception.nr, SS_VECTOR); 257 ASSERT_EQ(events.exception.has_error_code, true); 258 ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE); 259 260 /* 261 * Run for real with the pending #SS, L1 should get a VM-Exit due to 262 * #SS interception and re-enter L2 to request #GP (via injected #SS). 263 */ 264 vcpu->run->immediate_exit = false; 265 vcpu_run(vcpu); 266 assert_ucall_vector(vcpu, GP_VECTOR); 267 268 /* 269 * Inject #SS, the #SS should bypass interception and cause #GP, which 270 * L1 should intercept before KVM morphs it to #DF. L1 should then 271 * disable #GP interception and run L2 to request #DF (via #SS => #GP). 272 */ 273 queue_ss_exception(vcpu, true); 274 vcpu_run(vcpu); 275 assert_ucall_vector(vcpu, DF_VECTOR); 276 277 /* 278 * Inject #SS, the #SS should bypass interception and cause #GP, which 279 * L1 is no longer interception, and so should see a #DF VM-Exit. L1 280 * should then signal that is done. 281 */ 282 queue_ss_exception(vcpu, true); 283 vcpu_run(vcpu); 284 assert_ucall_vector(vcpu, FAKE_TRIPLE_FAULT_VECTOR); 285 286 /* 287 * Inject #SS yet again. L1 is not intercepting #GP or #DF, and so 288 * should see nested TRIPLE_FAULT / SHUTDOWN. 289 */ 290 queue_ss_exception(vcpu, true); 291 vcpu_run(vcpu); 292 assert_ucall_vector(vcpu, -1); 293 294 kvm_vm_free(vm); 295 } 296