1 /* 2 * Copyright (c) 2003-2008 Fabrice Bellard 3 * Copyright (C) 2016 Veertu Inc, 4 * Copyright (C) 2017 Google Inc, 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 22 #include "qemu-common.h" 23 #include "x86hvf.h" 24 #include "vmx.h" 25 #include "vmcs.h" 26 #include "cpu.h" 27 #include "x86_descr.h" 28 #include "x86_decode.h" 29 #include "sysemu/hw_accel.h" 30 31 #include "hw/i386/apic_internal.h" 32 33 #include <Hypervisor/hv.h> 34 #include <Hypervisor/hv_vmx.h> 35 36 void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, 37 SegmentCache *qseg, bool is_tr) 38 { 39 vmx_seg->sel = qseg->selector; 40 vmx_seg->base = qseg->base; 41 vmx_seg->limit = qseg->limit; 42 43 if (!qseg->selector && !x86_is_real(cpu) && !is_tr) { 44 /* the TR register is usable after processor reset despite 45 * having a null selector */ 46 vmx_seg->ar = 1 << 16; 47 return; 48 } 49 vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf; 50 vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15; 51 vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14; 52 vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13; 53 vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12; 54 vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7; 55 vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5; 56 vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4; 57 } 58 59 void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg) 60 { 61 qseg->limit = vmx_seg->limit; 62 qseg->base = vmx_seg->base; 63 qseg->selector = vmx_seg->sel; 64 qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) | 65 (((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) | 66 (((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) | 67 (((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) | 68 (((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) | 69 (((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) | 70 (((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) | 71 (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT); 72 } 73 74 void hvf_put_xsave(CPUState *cpu_state) 75 { 76 void *xsave = X86_CPU(cpu_state)->env.xsave_buf; 77 uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len; 78 79 x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave, xsave_len); 80 81 if (hv_vcpu_write_fpstate(cpu_state->hvf->fd, xsave, xsave_len)) { 82 abort(); 83 } 84 } 85 86 static void hvf_put_segments(CPUState *cpu_state) 87 { 88 CPUX86State *env = &X86_CPU(cpu_state)->env; 89 struct vmx_segment seg; 90 91 wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); 92 wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); 93 94 wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); 95 wvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); 96 97 /* wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR2, env->cr[2]); */ 98 wvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3, env->cr[3]); 99 vmx_update_tpr(cpu_state); 100 wvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER, env->efer); 101 102 macvm_set_cr4(cpu_state->hvf->fd, env->cr[4]); 103 macvm_set_cr0(cpu_state->hvf->fd, env->cr[0]); 104 105 hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false); 106 vmx_write_segment_descriptor(cpu_state, &seg, R_CS); 107 108 hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false); 109 vmx_write_segment_descriptor(cpu_state, &seg, R_DS); 110 111 hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false); 112 vmx_write_segment_descriptor(cpu_state, &seg, R_ES); 113 114 hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false); 115 vmx_write_segment_descriptor(cpu_state, &seg, R_SS); 116 117 hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false); 118 vmx_write_segment_descriptor(cpu_state, &seg, R_FS); 119 120 hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false); 121 vmx_write_segment_descriptor(cpu_state, &seg, R_GS); 122 123 hvf_set_segment(cpu_state, &seg, &env->tr, true); 124 vmx_write_segment_descriptor(cpu_state, &seg, R_TR); 125 126 hvf_set_segment(cpu_state, &seg, &env->ldt, false); 127 vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR); 128 } 129 130 void hvf_put_msrs(CPUState *cpu_state) 131 { 132 CPUX86State *env = &X86_CPU(cpu_state)->env; 133 134 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, 135 env->sysenter_cs); 136 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, 137 env->sysenter_esp); 138 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, 139 env->sysenter_eip); 140 141 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_STAR, env->star); 142 143 #ifdef TARGET_X86_64 144 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_CSTAR, env->cstar); 145 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, env->kernelgsbase); 146 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FMASK, env->fmask); 147 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_LSTAR, env->lstar); 148 #endif 149 150 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_GSBASE, env->segs[R_GS].base); 151 hv_vcpu_write_msr(cpu_state->hvf->fd, MSR_FSBASE, env->segs[R_FS].base); 152 } 153 154 155 void hvf_get_xsave(CPUState *cpu_state) 156 { 157 void *xsave = X86_CPU(cpu_state)->env.xsave_buf; 158 uint32_t xsave_len = X86_CPU(cpu_state)->env.xsave_buf_len; 159 160 if (hv_vcpu_read_fpstate(cpu_state->hvf->fd, xsave, xsave_len)) { 161 abort(); 162 } 163 164 x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave, xsave_len); 165 } 166 167 static void hvf_get_segments(CPUState *cpu_state) 168 { 169 CPUX86State *env = &X86_CPU(cpu_state)->env; 170 171 struct vmx_segment seg; 172 173 env->interrupt_injected = -1; 174 175 vmx_read_segment_descriptor(cpu_state, &seg, R_CS); 176 hvf_get_segment(&env->segs[R_CS], &seg); 177 178 vmx_read_segment_descriptor(cpu_state, &seg, R_DS); 179 hvf_get_segment(&env->segs[R_DS], &seg); 180 181 vmx_read_segment_descriptor(cpu_state, &seg, R_ES); 182 hvf_get_segment(&env->segs[R_ES], &seg); 183 184 vmx_read_segment_descriptor(cpu_state, &seg, R_FS); 185 hvf_get_segment(&env->segs[R_FS], &seg); 186 187 vmx_read_segment_descriptor(cpu_state, &seg, R_GS); 188 hvf_get_segment(&env->segs[R_GS], &seg); 189 190 vmx_read_segment_descriptor(cpu_state, &seg, R_SS); 191 hvf_get_segment(&env->segs[R_SS], &seg); 192 193 vmx_read_segment_descriptor(cpu_state, &seg, R_TR); 194 hvf_get_segment(&env->tr, &seg); 195 196 vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR); 197 hvf_get_segment(&env->ldt, &seg); 198 199 env->idt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_LIMIT); 200 env->idt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IDTR_BASE); 201 env->gdt.limit = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_LIMIT); 202 env->gdt.base = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_GDTR_BASE); 203 204 env->cr[0] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR0); 205 env->cr[2] = 0; 206 env->cr[3] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR3); 207 env->cr[4] = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_CR4); 208 209 env->efer = rvmcs(cpu_state->hvf->fd, VMCS_GUEST_IA32_EFER); 210 } 211 212 void hvf_get_msrs(CPUState *cpu_state) 213 { 214 CPUX86State *env = &X86_CPU(cpu_state)->env; 215 uint64_t tmp; 216 217 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_CS, &tmp); 218 env->sysenter_cs = tmp; 219 220 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_ESP, &tmp); 221 env->sysenter_esp = tmp; 222 223 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_SYSENTER_EIP, &tmp); 224 env->sysenter_eip = tmp; 225 226 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_STAR, &env->star); 227 228 #ifdef TARGET_X86_64 229 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_CSTAR, &env->cstar); 230 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_KERNELGSBASE, &env->kernelgsbase); 231 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_FMASK, &env->fmask); 232 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_LSTAR, &env->lstar); 233 #endif 234 235 hv_vcpu_read_msr(cpu_state->hvf->fd, MSR_IA32_APICBASE, &tmp); 236 237 env->tsc = rdtscp() + rvmcs(cpu_state->hvf->fd, VMCS_TSC_OFFSET); 238 } 239 240 int hvf_put_registers(CPUState *cpu_state) 241 { 242 X86CPU *x86cpu = X86_CPU(cpu_state); 243 CPUX86State *env = &x86cpu->env; 244 245 wreg(cpu_state->hvf->fd, HV_X86_RAX, env->regs[R_EAX]); 246 wreg(cpu_state->hvf->fd, HV_X86_RBX, env->regs[R_EBX]); 247 wreg(cpu_state->hvf->fd, HV_X86_RCX, env->regs[R_ECX]); 248 wreg(cpu_state->hvf->fd, HV_X86_RDX, env->regs[R_EDX]); 249 wreg(cpu_state->hvf->fd, HV_X86_RBP, env->regs[R_EBP]); 250 wreg(cpu_state->hvf->fd, HV_X86_RSP, env->regs[R_ESP]); 251 wreg(cpu_state->hvf->fd, HV_X86_RSI, env->regs[R_ESI]); 252 wreg(cpu_state->hvf->fd, HV_X86_RDI, env->regs[R_EDI]); 253 wreg(cpu_state->hvf->fd, HV_X86_R8, env->regs[8]); 254 wreg(cpu_state->hvf->fd, HV_X86_R9, env->regs[9]); 255 wreg(cpu_state->hvf->fd, HV_X86_R10, env->regs[10]); 256 wreg(cpu_state->hvf->fd, HV_X86_R11, env->regs[11]); 257 wreg(cpu_state->hvf->fd, HV_X86_R12, env->regs[12]); 258 wreg(cpu_state->hvf->fd, HV_X86_R13, env->regs[13]); 259 wreg(cpu_state->hvf->fd, HV_X86_R14, env->regs[14]); 260 wreg(cpu_state->hvf->fd, HV_X86_R15, env->regs[15]); 261 wreg(cpu_state->hvf->fd, HV_X86_RFLAGS, env->eflags); 262 wreg(cpu_state->hvf->fd, HV_X86_RIP, env->eip); 263 264 wreg(cpu_state->hvf->fd, HV_X86_XCR0, env->xcr0); 265 266 hvf_put_xsave(cpu_state); 267 268 hvf_put_segments(cpu_state); 269 270 hvf_put_msrs(cpu_state); 271 272 wreg(cpu_state->hvf->fd, HV_X86_DR0, env->dr[0]); 273 wreg(cpu_state->hvf->fd, HV_X86_DR1, env->dr[1]); 274 wreg(cpu_state->hvf->fd, HV_X86_DR2, env->dr[2]); 275 wreg(cpu_state->hvf->fd, HV_X86_DR3, env->dr[3]); 276 wreg(cpu_state->hvf->fd, HV_X86_DR4, env->dr[4]); 277 wreg(cpu_state->hvf->fd, HV_X86_DR5, env->dr[5]); 278 wreg(cpu_state->hvf->fd, HV_X86_DR6, env->dr[6]); 279 wreg(cpu_state->hvf->fd, HV_X86_DR7, env->dr[7]); 280 281 return 0; 282 } 283 284 int hvf_get_registers(CPUState *cpu_state) 285 { 286 X86CPU *x86cpu = X86_CPU(cpu_state); 287 CPUX86State *env = &x86cpu->env; 288 289 env->regs[R_EAX] = rreg(cpu_state->hvf->fd, HV_X86_RAX); 290 env->regs[R_EBX] = rreg(cpu_state->hvf->fd, HV_X86_RBX); 291 env->regs[R_ECX] = rreg(cpu_state->hvf->fd, HV_X86_RCX); 292 env->regs[R_EDX] = rreg(cpu_state->hvf->fd, HV_X86_RDX); 293 env->regs[R_EBP] = rreg(cpu_state->hvf->fd, HV_X86_RBP); 294 env->regs[R_ESP] = rreg(cpu_state->hvf->fd, HV_X86_RSP); 295 env->regs[R_ESI] = rreg(cpu_state->hvf->fd, HV_X86_RSI); 296 env->regs[R_EDI] = rreg(cpu_state->hvf->fd, HV_X86_RDI); 297 env->regs[8] = rreg(cpu_state->hvf->fd, HV_X86_R8); 298 env->regs[9] = rreg(cpu_state->hvf->fd, HV_X86_R9); 299 env->regs[10] = rreg(cpu_state->hvf->fd, HV_X86_R10); 300 env->regs[11] = rreg(cpu_state->hvf->fd, HV_X86_R11); 301 env->regs[12] = rreg(cpu_state->hvf->fd, HV_X86_R12); 302 env->regs[13] = rreg(cpu_state->hvf->fd, HV_X86_R13); 303 env->regs[14] = rreg(cpu_state->hvf->fd, HV_X86_R14); 304 env->regs[15] = rreg(cpu_state->hvf->fd, HV_X86_R15); 305 306 env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS); 307 env->eip = rreg(cpu_state->hvf->fd, HV_X86_RIP); 308 309 hvf_get_xsave(cpu_state); 310 env->xcr0 = rreg(cpu_state->hvf->fd, HV_X86_XCR0); 311 312 hvf_get_segments(cpu_state); 313 hvf_get_msrs(cpu_state); 314 315 env->dr[0] = rreg(cpu_state->hvf->fd, HV_X86_DR0); 316 env->dr[1] = rreg(cpu_state->hvf->fd, HV_X86_DR1); 317 env->dr[2] = rreg(cpu_state->hvf->fd, HV_X86_DR2); 318 env->dr[3] = rreg(cpu_state->hvf->fd, HV_X86_DR3); 319 env->dr[4] = rreg(cpu_state->hvf->fd, HV_X86_DR4); 320 env->dr[5] = rreg(cpu_state->hvf->fd, HV_X86_DR5); 321 env->dr[6] = rreg(cpu_state->hvf->fd, HV_X86_DR6); 322 env->dr[7] = rreg(cpu_state->hvf->fd, HV_X86_DR7); 323 324 x86_update_hflags(env); 325 return 0; 326 } 327 328 static void vmx_set_int_window_exiting(CPUState *cpu) 329 { 330 uint64_t val; 331 val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); 332 wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val | 333 VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); 334 } 335 336 void vmx_clear_int_window_exiting(CPUState *cpu) 337 { 338 uint64_t val; 339 val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS); 340 wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val & 341 ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); 342 } 343 344 bool hvf_inject_interrupts(CPUState *cpu_state) 345 { 346 X86CPU *x86cpu = X86_CPU(cpu_state); 347 CPUX86State *env = &x86cpu->env; 348 349 uint8_t vector; 350 uint64_t intr_type; 351 bool have_event = true; 352 if (env->interrupt_injected != -1) { 353 vector = env->interrupt_injected; 354 if (env->ins_len) { 355 intr_type = VMCS_INTR_T_SWINTR; 356 } else { 357 intr_type = VMCS_INTR_T_HWINTR; 358 } 359 } else if (env->exception_nr != -1) { 360 vector = env->exception_nr; 361 if (vector == EXCP03_INT3 || vector == EXCP04_INTO) { 362 intr_type = VMCS_INTR_T_SWEXCEPTION; 363 } else { 364 intr_type = VMCS_INTR_T_HWEXCEPTION; 365 } 366 } else if (env->nmi_injected) { 367 vector = EXCP02_NMI; 368 intr_type = VMCS_INTR_T_NMI; 369 } else { 370 have_event = false; 371 } 372 373 uint64_t info = 0; 374 if (have_event) { 375 info = vector | intr_type | VMCS_INTR_VALID; 376 uint64_t reason = rvmcs(cpu_state->hvf->fd, VMCS_EXIT_REASON); 377 if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { 378 vmx_clear_nmi_blocking(cpu_state); 379 } 380 381 if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) { 382 info &= ~(1 << 12); /* clear undefined bit */ 383 if (intr_type == VMCS_INTR_T_SWINTR || 384 intr_type == VMCS_INTR_T_SWEXCEPTION) { 385 wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); 386 } 387 388 if (env->has_error_code) { 389 wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_EXCEPTION_ERROR, 390 env->error_code); 391 /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */ 392 info |= VMCS_INTR_DEL_ERRCODE; 393 } 394 /*printf("reinject %lx err %d\n", info, err);*/ 395 wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info); 396 }; 397 } 398 399 if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) { 400 if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { 401 cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI; 402 info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; 403 wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, info); 404 } else { 405 vmx_set_nmi_window_exiting(cpu_state); 406 } 407 } 408 409 if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && 410 (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && 411 (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { 412 int line = cpu_get_pic_interrupt(&x86cpu->env); 413 cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD; 414 if (line >= 0) { 415 wvmcs(cpu_state->hvf->fd, VMCS_ENTRY_INTR_INFO, line | 416 VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); 417 } 418 } 419 if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) { 420 vmx_set_int_window_exiting(cpu_state); 421 } 422 return (cpu_state->interrupt_request 423 & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)); 424 } 425 426 int hvf_process_events(CPUState *cpu_state) 427 { 428 X86CPU *cpu = X86_CPU(cpu_state); 429 CPUX86State *env = &cpu->env; 430 431 if (!cpu_state->vcpu_dirty) { 432 /* light weight sync for CPU_INTERRUPT_HARD and IF_MASK */ 433 env->eflags = rreg(cpu_state->hvf->fd, HV_X86_RFLAGS); 434 } 435 436 if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) { 437 cpu_synchronize_state(cpu_state); 438 do_cpu_init(cpu); 439 } 440 441 if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) { 442 cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL; 443 apic_poll_irq(cpu->apic_state); 444 } 445 if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && 446 (env->eflags & IF_MASK)) || 447 (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) { 448 cpu_state->halted = 0; 449 } 450 if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) { 451 cpu_synchronize_state(cpu_state); 452 do_cpu_sipi(cpu); 453 } 454 if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) { 455 cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR; 456 cpu_synchronize_state(cpu_state); 457 apic_handle_tpr_access_report(cpu->apic_state, env->eip, 458 env->tpr_access_type); 459 } 460 return cpu_state->halted; 461 } 462