1 /* 2 * Copyright (C) 2016 Veertu Inc, 3 * Copyright (C) 2017 Google Inc, 4 * Based on Veertu vddh/vmm/vmx.h 5 * 6 * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU Lesser General Public 10 * License as published by the Free Software Foundation; either 11 * version 2 of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public 19 * License along with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #ifndef VMX_H 23 #define VMX_H 24 25 #include <stdint.h> 26 #include <Hypervisor/hv.h> 27 #include <Hypervisor/hv_vmx.h> 28 #include "vmcs.h" 29 #include "cpu.h" 30 #include "x86.h" 31 32 #include "exec/address-spaces.h" 33 34 static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg) 35 { 36 uint64_t v; 37 38 if (hv_vcpu_read_register(vcpu, reg, &v)) { 39 abort(); 40 } 41 42 return v; 43 } 44 45 /* write GPR */ 46 static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v) 47 { 48 if (hv_vcpu_write_register(vcpu, reg, v)) { 49 abort(); 50 } 51 } 52 53 /* read VMCS field */ 54 static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field) 55 { 56 uint64_t v; 57 58 hv_vmx_vcpu_read_vmcs(vcpu, field, &v); 59 60 return v; 61 } 62 63 /* write VMCS field */ 64 static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v) 65 { 66 hv_vmx_vcpu_write_vmcs(vcpu, field, v); 67 } 68 69 /* desired control word constrained by hardware/hypervisor capabilities */ 70 static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl) 71 { 72 return (ctrl | (cap & 0xffffffff)) & (cap >> 32); 73 } 74 75 #define VM_ENTRY_GUEST_LMA (1LL << 9) 76 77 #define AR_TYPE_ACCESSES_MASK 1 78 #define AR_TYPE_READABLE_MASK (1 << 1) 79 #define AR_TYPE_WRITEABLE_MASK (1 << 2) 80 #define AR_TYPE_CODE_MASK (1 << 3) 81 #define AR_TYPE_MASK 0x0f 82 #define AR_TYPE_BUSY_64_TSS 11 83 #define AR_TYPE_BUSY_32_TSS 11 84 #define AR_TYPE_BUSY_16_TSS 3 85 #define AR_TYPE_LDT 2 86 87 static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer) 88 { 89 uint64_t entry_ctls; 90 91 efer |= MSR_EFER_LMA; 92 wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer); 93 entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS); 94 wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) | 95 VM_ENTRY_GUEST_LMA); 96 97 uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS); 98 if ((efer & MSR_EFER_LME) && 99 (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { 100 wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS, 101 (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS); 102 } 103 } 104 105 static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer) 106 { 107 uint64_t entry_ctls; 108 109 entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS); 110 wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA); 111 112 efer &= ~MSR_EFER_LMA; 113 wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer); 114 } 115 116 static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0) 117 { 118 int i; 119 uint64_t pdpte[4] = {0, 0, 0, 0}; 120 uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER); 121 uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0); 122 123 if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) && 124 !(efer & MSR_EFER_LME)) { 125 address_space_rw(&address_space_memory, 126 rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f, 127 MEMTXATTRS_UNSPECIFIED, 128 (uint8_t *)pdpte, 32, 0); 129 } 130 131 for (i = 0; i < 4; i++) { 132 wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]); 133 } 134 135 wvmcs(vcpu, VMCS_CR0_MASK, CR0_CD | CR0_NE | CR0_PG); 136 wvmcs(vcpu, VMCS_CR0_SHADOW, cr0); 137 138 cr0 &= ~CR0_CD; 139 wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET); 140 141 if (efer & MSR_EFER_LME) { 142 if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) { 143 enter_long_mode(vcpu, cr0, efer); 144 } 145 if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) { 146 exit_long_mode(vcpu, cr0, efer); 147 } 148 } 149 150 hv_vcpu_invalidate_tlb(vcpu); 151 hv_vcpu_flush(vcpu); 152 } 153 154 static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4) 155 { 156 uint64_t guest_cr4 = cr4 | CR4_VMXE; 157 158 wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4); 159 wvmcs(vcpu, VMCS_CR4_SHADOW, cr4); 160 161 hv_vcpu_invalidate_tlb(vcpu); 162 hv_vcpu_flush(vcpu); 163 } 164 165 static inline void macvm_set_rip(CPUState *cpu, uint64_t rip) 166 { 167 uint64_t val; 168 169 /* BUG, should take considering overlap.. */ 170 wreg(cpu->hvf_fd, HV_X86_RIP, rip); 171 172 /* after moving forward in rip, we need to clean INTERRUPTABILITY */ 173 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY); 174 if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | 175 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { 176 wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, 177 val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING | 178 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)); 179 } 180 } 181 182 static inline void vmx_clear_nmi_blocking(CPUState *cpu) 183 { 184 X86CPU *x86_cpu = X86_CPU(cpu); 185 CPUX86State *env = &x86_cpu->env; 186 187 env->hflags2 &= ~HF2_NMI_MASK; 188 uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY); 189 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 190 wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi); 191 } 192 193 static inline void vmx_set_nmi_blocking(CPUState *cpu) 194 { 195 X86CPU *x86_cpu = X86_CPU(cpu); 196 CPUX86State *env = &x86_cpu->env; 197 198 env->hflags2 |= HF2_NMI_MASK; 199 uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY); 200 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; 201 wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi); 202 } 203 204 static inline void vmx_set_nmi_window_exiting(CPUState *cpu) 205 { 206 uint64_t val; 207 val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); 208 wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | 209 VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); 210 211 } 212 213 static inline void vmx_clear_nmi_window_exiting(CPUState *cpu) 214 { 215 216 uint64_t val; 217 val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); 218 wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & 219 ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); 220 } 221 222 #endif 223