1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018, Red Hat, Inc. 4 * 5 * Tests for SMM. 6 */ 7 #define _GNU_SOURCE /* for program_invocation_short_name */ 8 #include <fcntl.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <stdint.h> 12 #include <string.h> 13 #include <sys/ioctl.h> 14 15 #include "test_util.h" 16 17 #include "kvm_util.h" 18 19 #include "vmx.h" 20 #include "svm_util.h" 21 22 #define SMRAM_SIZE 65536 23 #define SMRAM_MEMSLOT ((1 << 16) | 1) 24 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) 25 #define SMRAM_GPA 0x1000000 26 #define SMRAM_STAGE 0xfe 27 28 #define STR(x) #x 29 #define XSTR(s) STR(s) 30 31 #define SYNC_PORT 0xe 32 #define DONE 0xff 33 34 /* 35 * This is compiled as normal 64-bit code, however, SMI handler is executed 36 * in real-address mode. To stay simple we're limiting ourselves to a mode 37 * independent subset of asm here. 38 * SMI handler always report back fixed stage SMRAM_STAGE. 39 */ 40 uint8_t smi_handler[] = { 41 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 42 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 43 0x0f, 0xaa, /* rsm */ 44 }; 45 46 static inline void sync_with_host(uint64_t phase) 47 { 48 asm volatile("in $" XSTR(SYNC_PORT)", %%al \n" 49 : "+a" (phase)); 50 } 51 52 static void self_smi(void) 53 { 54 x2apic_write_reg(APIC_ICR, 55 APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI); 56 } 57 58 static void l2_guest_code(void) 59 { 60 sync_with_host(8); 61 62 sync_with_host(10); 63 64 vmcall(); 65 } 66 67 static void guest_code(void *arg) 68 { 69 #define L2_GUEST_STACK_SIZE 64 70 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 71 uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); 72 struct svm_test_data *svm = arg; 73 struct vmx_pages *vmx_pages = arg; 74 75 sync_with_host(1); 76 77 wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE); 78 79 sync_with_host(2); 80 81 self_smi(); 82 83 sync_with_host(4); 84 85 if (arg) { 86 if (this_cpu_has(X86_FEATURE_SVM)) { 87 generic_svm_setup(svm, l2_guest_code, 88 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 89 } else { 90 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 91 GUEST_ASSERT(load_vmcs(vmx_pages)); 92 prepare_vmcs(vmx_pages, l2_guest_code, 93 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 94 } 95 96 sync_with_host(5); 97 98 self_smi(); 99 100 sync_with_host(7); 101 102 if (this_cpu_has(X86_FEATURE_SVM)) { 103 run_guest(svm->vmcb, svm->vmcb_gpa); 104 run_guest(svm->vmcb, svm->vmcb_gpa); 105 } else { 106 vmlaunch(); 107 vmresume(); 108 } 109 110 /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */ 111 sync_with_host(12); 112 } 113 114 sync_with_host(DONE); 115 } 116 117 void inject_smi(struct kvm_vcpu *vcpu) 118 { 119 struct kvm_vcpu_events events; 120 121 vcpu_events_get(vcpu, &events); 122 123 events.smi.pending = 1; 124 events.flags |= KVM_VCPUEVENT_VALID_SMM; 125 126 vcpu_events_set(vcpu, &events); 127 } 128 129 int main(int argc, char *argv[]) 130 { 131 vm_vaddr_t nested_gva = 0; 132 133 struct kvm_vcpu *vcpu; 134 struct kvm_regs regs; 135 struct kvm_vm *vm; 136 struct kvm_run *run; 137 struct kvm_x86_state *state; 138 int stage, stage_reported; 139 140 TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM)); 141 142 /* Create VM */ 143 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 144 145 run = vcpu->run; 146 147 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, 148 SMRAM_MEMSLOT, SMRAM_PAGES, 0); 149 TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) 150 == SMRAM_GPA, "could not allocate guest physical addresses?"); 151 152 memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); 153 memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, 154 sizeof(smi_handler)); 155 156 vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA); 157 158 if (kvm_has_cap(KVM_CAP_NESTED_STATE)) { 159 if (kvm_cpu_has(X86_FEATURE_SVM)) 160 vcpu_alloc_svm(vm, &nested_gva); 161 else if (kvm_cpu_has(X86_FEATURE_VMX)) 162 vcpu_alloc_vmx(vm, &nested_gva); 163 } 164 165 if (!nested_gva) 166 pr_info("will skip SMM test with VMX enabled\n"); 167 168 vcpu_args_set(vcpu, 1, nested_gva); 169 170 for (stage = 1;; stage++) { 171 vcpu_run(vcpu); 172 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 173 "Stage %d: unexpected exit reason: %u (%s),\n", 174 stage, run->exit_reason, 175 exit_reason_str(run->exit_reason)); 176 177 memset(®s, 0, sizeof(regs)); 178 vcpu_regs_get(vcpu, ®s); 179 180 stage_reported = regs.rax & 0xff; 181 182 if (stage_reported == DONE) 183 goto done; 184 185 TEST_ASSERT(stage_reported == stage || 186 stage_reported == SMRAM_STAGE, 187 "Unexpected stage: #%x, got %x", 188 stage, stage_reported); 189 190 /* 191 * Enter SMM during L2 execution and check that we correctly 192 * return from it. Do not perform save/restore while in SMM yet. 193 */ 194 if (stage == 8) { 195 inject_smi(vcpu); 196 continue; 197 } 198 199 /* 200 * Perform save/restore while the guest is in SMM triggered 201 * during L2 execution. 202 */ 203 if (stage == 10) 204 inject_smi(vcpu); 205 206 state = vcpu_save_state(vcpu); 207 kvm_vm_release(vm); 208 209 vcpu = vm_recreate_with_one_vcpu(vm); 210 vcpu_load_state(vcpu, state); 211 run = vcpu->run; 212 kvm_x86_state_cleanup(state); 213 } 214 215 done: 216 kvm_vm_free(vm); 217 } 218