1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018, Red Hat, Inc. 4 * 5 * Tests for SMM. 6 */ 7 #define _GNU_SOURCE /* for program_invocation_short_name */ 8 #include <fcntl.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <stdint.h> 12 #include <string.h> 13 #include <sys/ioctl.h> 14 15 #include "test_util.h" 16 17 #include "kvm_util.h" 18 19 #include "vmx.h" 20 #include "svm_util.h" 21 22 #define VCPU_ID 1 23 24 #define PAGE_SIZE 4096 25 26 #define SMRAM_SIZE 65536 27 #define SMRAM_MEMSLOT ((1 << 16) | 1) 28 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) 29 #define SMRAM_GPA 0x1000000 30 #define SMRAM_STAGE 0xfe 31 32 #define STR(x) #x 33 #define XSTR(s) STR(s) 34 35 #define SYNC_PORT 0xe 36 #define DONE 0xff 37 38 /* 39 * This is compiled as normal 64-bit code, however, SMI handler is executed 40 * in real-address mode. To stay simple we're limiting ourselves to a mode 41 * independent subset of asm here. 42 * SMI handler always report back fixed stage SMRAM_STAGE. 43 */ 44 uint8_t smi_handler[] = { 45 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 46 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 47 0x0f, 0xaa, /* rsm */ 48 }; 49 50 static inline void sync_with_host(uint64_t phase) 51 { 52 asm volatile("in $" XSTR(SYNC_PORT)", %%al \n" 53 : "+a" (phase)); 54 } 55 56 static void self_smi(void) 57 { 58 x2apic_write_reg(APIC_ICR, 59 APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI); 60 } 61 62 static void l2_guest_code(void) 63 { 64 sync_with_host(8); 65 66 sync_with_host(10); 67 68 vmcall(); 69 } 70 71 static void guest_code(void *arg) 72 { 73 #define L2_GUEST_STACK_SIZE 64 74 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 75 uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); 76 struct svm_test_data *svm = arg; 77 struct vmx_pages *vmx_pages = arg; 78 79 sync_with_host(1); 80 81 wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE); 82 83 sync_with_host(2); 84 85 self_smi(); 86 87 sync_with_host(4); 88 89 if (arg) { 90 if (cpu_has_svm()) { 91 generic_svm_setup(svm, l2_guest_code, 92 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 93 } else { 94 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 95 GUEST_ASSERT(load_vmcs(vmx_pages)); 96 prepare_vmcs(vmx_pages, l2_guest_code, 97 &l2_guest_stack[L2_GUEST_STACK_SIZE]); 98 } 99 100 sync_with_host(5); 101 102 self_smi(); 103 104 sync_with_host(7); 105 106 if (cpu_has_svm()) { 107 run_guest(svm->vmcb, svm->vmcb_gpa); 108 run_guest(svm->vmcb, svm->vmcb_gpa); 109 } else { 110 vmlaunch(); 111 vmresume(); 112 } 113 114 /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */ 115 sync_with_host(12); 116 } 117 118 sync_with_host(DONE); 119 } 120 121 void inject_smi(struct kvm_vm *vm) 122 { 123 struct kvm_vcpu_events events; 124 125 vcpu_events_get(vm, VCPU_ID, &events); 126 127 events.smi.pending = 1; 128 events.flags |= KVM_VCPUEVENT_VALID_SMM; 129 130 vcpu_events_set(vm, VCPU_ID, &events); 131 } 132 133 int main(int argc, char *argv[]) 134 { 135 vm_vaddr_t nested_gva = 0; 136 137 struct kvm_regs regs; 138 struct kvm_vm *vm; 139 struct kvm_run *run; 140 struct kvm_x86_state *state; 141 int stage, stage_reported; 142 143 /* Create VM */ 144 vm = vm_create_default(VCPU_ID, 0, guest_code); 145 146 run = vcpu_state(vm, VCPU_ID); 147 148 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, 149 SMRAM_MEMSLOT, SMRAM_PAGES, 0); 150 TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) 151 == SMRAM_GPA, "could not allocate guest physical addresses?"); 152 153 memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); 154 memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, 155 sizeof(smi_handler)); 156 157 vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA); 158 159 if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { 160 if (nested_svm_supported()) 161 vcpu_alloc_svm(vm, &nested_gva); 162 else if (nested_vmx_supported()) 163 vcpu_alloc_vmx(vm, &nested_gva); 164 } 165 166 if (!nested_gva) 167 pr_info("will skip SMM test with VMX enabled\n"); 168 169 vcpu_args_set(vm, VCPU_ID, 1, nested_gva); 170 171 for (stage = 1;; stage++) { 172 _vcpu_run(vm, VCPU_ID); 173 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 174 "Stage %d: unexpected exit reason: %u (%s),\n", 175 stage, run->exit_reason, 176 exit_reason_str(run->exit_reason)); 177 178 memset(®s, 0, sizeof(regs)); 179 vcpu_regs_get(vm, VCPU_ID, ®s); 180 181 stage_reported = regs.rax & 0xff; 182 183 if (stage_reported == DONE) 184 goto done; 185 186 TEST_ASSERT(stage_reported == stage || 187 stage_reported == SMRAM_STAGE, 188 "Unexpected stage: #%x, got %x", 189 stage, stage_reported); 190 191 /* 192 * Enter SMM during L2 execution and check that we correctly 193 * return from it. Do not perform save/restore while in SMM yet. 194 */ 195 if (stage == 8) { 196 inject_smi(vm); 197 continue; 198 } 199 200 /* 201 * Perform save/restore while the guest is in SMM triggered 202 * during L2 execution. 203 */ 204 if (stage == 10) 205 inject_smi(vm); 206 207 state = vcpu_save_state(vm, VCPU_ID); 208 kvm_vm_release(vm); 209 kvm_vm_restart(vm, O_RDWR); 210 vm_vcpu_add(vm, VCPU_ID); 211 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 212 vcpu_load_state(vm, VCPU_ID, state); 213 run = vcpu_state(vm, VCPU_ID); 214 kvm_x86_state_cleanup(state); 215 } 216 217 done: 218 kvm_vm_free(vm); 219 } 220