Lines Matching +full:cs +full:- +full:1

2  * Copyright (c) 2003-2008 Fabrice Bellard
35 void hvf_set_segment(CPUState *cs, struct vmx_segment *vmx_seg, in hvf_set_segment() argument
38 vmx_seg->sel = qseg->selector; in hvf_set_segment()
39 vmx_seg->base = qseg->base; in hvf_set_segment()
40 vmx_seg->limit = qseg->limit; in hvf_set_segment()
42 if (!qseg->selector && !x86_is_real(cs) && !is_tr) { in hvf_set_segment()
45 vmx_seg->ar = 1 << 16; in hvf_set_segment()
48 vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf; in hvf_set_segment()
49 vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15; in hvf_set_segment()
50 vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14; in hvf_set_segment()
51 vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13; in hvf_set_segment()
52 vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12; in hvf_set_segment()
53 vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7; in hvf_set_segment()
54 vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5; in hvf_set_segment()
55 vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4; in hvf_set_segment()
60 qseg->limit = vmx_seg->limit; in hvf_get_segment()
61 qseg->base = vmx_seg->base; in hvf_get_segment()
62 qseg->selector = vmx_seg->sel; in hvf_get_segment()
63 qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) | in hvf_get_segment()
64 (((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) | in hvf_get_segment()
65 (((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) | in hvf_get_segment()
66 (((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) | in hvf_get_segment()
67 (((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) | in hvf_get_segment()
68 (((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) | in hvf_get_segment()
69 (((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) | in hvf_get_segment()
70 (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT); in hvf_get_segment()
73 void hvf_put_xsave(CPUState *cs) in hvf_put_xsave() argument
75 void *xsave = X86_CPU(cs)->env.xsave_buf; in hvf_put_xsave()
76 uint32_t xsave_len = X86_CPU(cs)->env.xsave_buf_len; in hvf_put_xsave()
78 x86_cpu_xsave_all_areas(X86_CPU(cs), xsave, xsave_len); in hvf_put_xsave()
80 if (hv_vcpu_write_fpstate(cs->accel->fd, xsave, xsave_len)) { in hvf_put_xsave()
85 static void hvf_put_segments(CPUState *cs) in hvf_put_segments() argument
87 CPUX86State *env = &X86_CPU(cs)->env; in hvf_put_segments()
90 wvmcs(cs->accel->fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); in hvf_put_segments()
91 wvmcs(cs->accel->fd, VMCS_GUEST_IDTR_BASE, env->idt.base); in hvf_put_segments()
93 wvmcs(cs->accel->fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); in hvf_put_segments()
94 wvmcs(cs->accel->fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); in hvf_put_segments()
96 /* wvmcs(cs->accel->fd, VMCS_GUEST_CR2, env->cr[2]); */ in hvf_put_segments()
97 wvmcs(cs->accel->fd, VMCS_GUEST_CR3, env->cr[3]); in hvf_put_segments()
98 vmx_update_tpr(cs); in hvf_put_segments()
99 wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, env->efer); in hvf_put_segments()
101 macvm_set_cr4(cs->accel->fd, env->cr[4]); in hvf_put_segments()
102 macvm_set_cr0(cs->accel->fd, env->cr[0]); in hvf_put_segments()
104 hvf_set_segment(cs, &seg, &env->segs[R_CS], false); in hvf_put_segments()
105 vmx_write_segment_descriptor(cs, &seg, R_CS); in hvf_put_segments()
107 hvf_set_segment(cs, &seg, &env->segs[R_DS], false); in hvf_put_segments()
108 vmx_write_segment_descriptor(cs, &seg, R_DS); in hvf_put_segments()
110 hvf_set_segment(cs, &seg, &env->segs[R_ES], false); in hvf_put_segments()
111 vmx_write_segment_descriptor(cs, &seg, R_ES); in hvf_put_segments()
113 hvf_set_segment(cs, &seg, &env->segs[R_SS], false); in hvf_put_segments()
114 vmx_write_segment_descriptor(cs, &seg, R_SS); in hvf_put_segments()
116 hvf_set_segment(cs, &seg, &env->segs[R_FS], false); in hvf_put_segments()
117 vmx_write_segment_descriptor(cs, &seg, R_FS); in hvf_put_segments()
119 hvf_set_segment(cs, &seg, &env->segs[R_GS], false); in hvf_put_segments()
120 vmx_write_segment_descriptor(cs, &seg, R_GS); in hvf_put_segments()
122 hvf_set_segment(cs, &seg, &env->tr, true); in hvf_put_segments()
123 vmx_write_segment_descriptor(cs, &seg, R_TR); in hvf_put_segments()
125 hvf_set_segment(cs, &seg, &env->ldt, false); in hvf_put_segments()
126 vmx_write_segment_descriptor(cs, &seg, R_LDTR); in hvf_put_segments()
129 void hvf_put_msrs(CPUState *cs) in hvf_put_msrs() argument
131 CPUX86State *env = &X86_CPU(cs)->env; in hvf_put_msrs()
133 hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_CS, in hvf_put_msrs()
134 env->sysenter_cs); in hvf_put_msrs()
135 hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_ESP, in hvf_put_msrs()
136 env->sysenter_esp); in hvf_put_msrs()
137 hv_vcpu_write_msr(cs->accel->fd, MSR_IA32_SYSENTER_EIP, in hvf_put_msrs()
138 env->sysenter_eip); in hvf_put_msrs()
140 hv_vcpu_write_msr(cs->accel->fd, MSR_STAR, env->star); in hvf_put_msrs()
143 hv_vcpu_write_msr(cs->accel->fd, MSR_CSTAR, env->cstar); in hvf_put_msrs()
144 hv_vcpu_write_msr(cs->accel->fd, MSR_KERNELGSBASE, env->kernelgsbase); in hvf_put_msrs()
145 hv_vcpu_write_msr(cs->accel->fd, MSR_FMASK, env->fmask); in hvf_put_msrs()
146 hv_vcpu_write_msr(cs->accel->fd, MSR_LSTAR, env->lstar); in hvf_put_msrs()
149 hv_vcpu_write_msr(cs->accel->fd, MSR_GSBASE, env->segs[R_GS].base); in hvf_put_msrs()
150 hv_vcpu_write_msr(cs->accel->fd, MSR_FSBASE, env->segs[R_FS].base); in hvf_put_msrs()
154 void hvf_get_xsave(CPUState *cs) in hvf_get_xsave() argument
156 void *xsave = X86_CPU(cs)->env.xsave_buf; in hvf_get_xsave()
157 uint32_t xsave_len = X86_CPU(cs)->env.xsave_buf_len; in hvf_get_xsave()
159 if (hv_vcpu_read_fpstate(cs->accel->fd, xsave, xsave_len)) { in hvf_get_xsave()
163 x86_cpu_xrstor_all_areas(X86_CPU(cs), xsave, xsave_len); in hvf_get_xsave()
166 static void hvf_get_segments(CPUState *cs) in hvf_get_segments() argument
168 CPUX86State *env = &X86_CPU(cs)->env; in hvf_get_segments()
172 env->interrupt_injected = -1; in hvf_get_segments()
174 vmx_read_segment_descriptor(cs, &seg, R_CS); in hvf_get_segments()
175 hvf_get_segment(&env->segs[R_CS], &seg); in hvf_get_segments()
177 vmx_read_segment_descriptor(cs, &seg, R_DS); in hvf_get_segments()
178 hvf_get_segment(&env->segs[R_DS], &seg); in hvf_get_segments()
180 vmx_read_segment_descriptor(cs, &seg, R_ES); in hvf_get_segments()
181 hvf_get_segment(&env->segs[R_ES], &seg); in hvf_get_segments()
183 vmx_read_segment_descriptor(cs, &seg, R_FS); in hvf_get_segments()
184 hvf_get_segment(&env->segs[R_FS], &seg); in hvf_get_segments()
186 vmx_read_segment_descriptor(cs, &seg, R_GS); in hvf_get_segments()
187 hvf_get_segment(&env->segs[R_GS], &seg); in hvf_get_segments()
189 vmx_read_segment_descriptor(cs, &seg, R_SS); in hvf_get_segments()
190 hvf_get_segment(&env->segs[R_SS], &seg); in hvf_get_segments()
192 vmx_read_segment_descriptor(cs, &seg, R_TR); in hvf_get_segments()
193 hvf_get_segment(&env->tr, &seg); in hvf_get_segments()
195 vmx_read_segment_descriptor(cs, &seg, R_LDTR); in hvf_get_segments()
196 hvf_get_segment(&env->ldt, &seg); in hvf_get_segments()
198 env->idt.limit = rvmcs(cs->accel->fd, VMCS_GUEST_IDTR_LIMIT); in hvf_get_segments()
199 env->idt.base = rvmcs(cs->accel->fd, VMCS_GUEST_IDTR_BASE); in hvf_get_segments()
200 env->gdt.limit = rvmcs(cs->accel->fd, VMCS_GUEST_GDTR_LIMIT); in hvf_get_segments()
201 env->gdt.base = rvmcs(cs->accel->fd, VMCS_GUEST_GDTR_BASE); in hvf_get_segments()
203 env->cr[0] = rvmcs(cs->accel->fd, VMCS_GUEST_CR0); in hvf_get_segments()
204 env->cr[2] = 0; in hvf_get_segments()
205 env->cr[3] = rvmcs(cs->accel->fd, VMCS_GUEST_CR3); in hvf_get_segments()
206 env->cr[4] = rvmcs(cs->accel->fd, VMCS_GUEST_CR4); in hvf_get_segments()
208 env->efer = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); in hvf_get_segments()
211 void hvf_get_msrs(CPUState *cs) in hvf_get_msrs() argument
213 CPUX86State *env = &X86_CPU(cs)->env; in hvf_get_msrs()
216 hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_CS, &tmp); in hvf_get_msrs()
217 env->sysenter_cs = tmp; in hvf_get_msrs()
219 hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_ESP, &tmp); in hvf_get_msrs()
220 env->sysenter_esp = tmp; in hvf_get_msrs()
222 hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_SYSENTER_EIP, &tmp); in hvf_get_msrs()
223 env->sysenter_eip = tmp; in hvf_get_msrs()
225 hv_vcpu_read_msr(cs->accel->fd, MSR_STAR, &env->star); in hvf_get_msrs()
228 hv_vcpu_read_msr(cs->accel->fd, MSR_CSTAR, &env->cstar); in hvf_get_msrs()
229 hv_vcpu_read_msr(cs->accel->fd, MSR_KERNELGSBASE, &env->kernelgsbase); in hvf_get_msrs()
230 hv_vcpu_read_msr(cs->accel->fd, MSR_FMASK, &env->fmask); in hvf_get_msrs()
231 hv_vcpu_read_msr(cs->accel->fd, MSR_LSTAR, &env->lstar); in hvf_get_msrs()
234 hv_vcpu_read_msr(cs->accel->fd, MSR_IA32_APICBASE, &tmp); in hvf_get_msrs()
236 env->tsc = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET); in hvf_get_msrs()
239 int hvf_put_registers(CPUState *cs) in hvf_put_registers() argument
241 X86CPU *x86cpu = X86_CPU(cs); in hvf_put_registers()
242 CPUX86State *env = &x86cpu->env; in hvf_put_registers()
244 wreg(cs->accel->fd, HV_X86_RAX, env->regs[R_EAX]); in hvf_put_registers()
245 wreg(cs->accel->fd, HV_X86_RBX, env->regs[R_EBX]); in hvf_put_registers()
246 wreg(cs->accel->fd, HV_X86_RCX, env->regs[R_ECX]); in hvf_put_registers()
247 wreg(cs->accel->fd, HV_X86_RDX, env->regs[R_EDX]); in hvf_put_registers()
248 wreg(cs->accel->fd, HV_X86_RBP, env->regs[R_EBP]); in hvf_put_registers()
249 wreg(cs->accel->fd, HV_X86_RSP, env->regs[R_ESP]); in hvf_put_registers()
250 wreg(cs->accel->fd, HV_X86_RSI, env->regs[R_ESI]); in hvf_put_registers()
251 wreg(cs->accel->fd, HV_X86_RDI, env->regs[R_EDI]); in hvf_put_registers()
252 wreg(cs->accel->fd, HV_X86_R8, env->regs[8]); in hvf_put_registers()
253 wreg(cs->accel->fd, HV_X86_R9, env->regs[9]); in hvf_put_registers()
254 wreg(cs->accel->fd, HV_X86_R10, env->regs[10]); in hvf_put_registers()
255 wreg(cs->accel->fd, HV_X86_R11, env->regs[11]); in hvf_put_registers()
256 wreg(cs->accel->fd, HV_X86_R12, env->regs[12]); in hvf_put_registers()
257 wreg(cs->accel->fd, HV_X86_R13, env->regs[13]); in hvf_put_registers()
258 wreg(cs->accel->fd, HV_X86_R14, env->regs[14]); in hvf_put_registers()
259 wreg(cs->accel->fd, HV_X86_R15, env->regs[15]); in hvf_put_registers()
260 wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); in hvf_put_registers()
261 wreg(cs->accel->fd, HV_X86_RIP, env->eip); in hvf_put_registers()
263 wreg(cs->accel->fd, HV_X86_XCR0, env->xcr0); in hvf_put_registers()
265 hvf_put_xsave(cs); in hvf_put_registers()
267 hvf_put_segments(cs); in hvf_put_registers()
269 hvf_put_msrs(cs); in hvf_put_registers()
271 wreg(cs->accel->fd, HV_X86_DR0, env->dr[0]); in hvf_put_registers()
272 wreg(cs->accel->fd, HV_X86_DR1, env->dr[1]); in hvf_put_registers()
273 wreg(cs->accel->fd, HV_X86_DR2, env->dr[2]); in hvf_put_registers()
274 wreg(cs->accel->fd, HV_X86_DR3, env->dr[3]); in hvf_put_registers()
275 wreg(cs->accel->fd, HV_X86_DR4, env->dr[4]); in hvf_put_registers()
276 wreg(cs->accel->fd, HV_X86_DR5, env->dr[5]); in hvf_put_registers()
277 wreg(cs->accel->fd, HV_X86_DR6, env->dr[6]); in hvf_put_registers()
278 wreg(cs->accel->fd, HV_X86_DR7, env->dr[7]); in hvf_put_registers()
283 int hvf_get_registers(CPUState *cs) in hvf_get_registers() argument
285 X86CPU *x86cpu = X86_CPU(cs); in hvf_get_registers()
286 CPUX86State *env = &x86cpu->env; in hvf_get_registers()
288 env->regs[R_EAX] = rreg(cs->accel->fd, HV_X86_RAX); in hvf_get_registers()
289 env->regs[R_EBX] = rreg(cs->accel->fd, HV_X86_RBX); in hvf_get_registers()
290 env->regs[R_ECX] = rreg(cs->accel->fd, HV_X86_RCX); in hvf_get_registers()
291 env->regs[R_EDX] = rreg(cs->accel->fd, HV_X86_RDX); in hvf_get_registers()
292 env->regs[R_EBP] = rreg(cs->accel->fd, HV_X86_RBP); in hvf_get_registers()
293 env->regs[R_ESP] = rreg(cs->accel->fd, HV_X86_RSP); in hvf_get_registers()
294 env->regs[R_ESI] = rreg(cs->accel->fd, HV_X86_RSI); in hvf_get_registers()
295 env->regs[R_EDI] = rreg(cs->accel->fd, HV_X86_RDI); in hvf_get_registers()
296 env->regs[8] = rreg(cs->accel->fd, HV_X86_R8); in hvf_get_registers()
297 env->regs[9] = rreg(cs->accel->fd, HV_X86_R9); in hvf_get_registers()
298 env->regs[10] = rreg(cs->accel->fd, HV_X86_R10); in hvf_get_registers()
299 env->regs[11] = rreg(cs->accel->fd, HV_X86_R11); in hvf_get_registers()
300 env->regs[12] = rreg(cs->accel->fd, HV_X86_R12); in hvf_get_registers()
301 env->regs[13] = rreg(cs->accel->fd, HV_X86_R13); in hvf_get_registers()
302 env->regs[14] = rreg(cs->accel->fd, HV_X86_R14); in hvf_get_registers()
303 env->regs[15] = rreg(cs->accel->fd, HV_X86_R15); in hvf_get_registers()
305 env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); in hvf_get_registers()
306 env->eip = rreg(cs->accel->fd, HV_X86_RIP); in hvf_get_registers()
308 hvf_get_xsave(cs); in hvf_get_registers()
309 env->xcr0 = rreg(cs->accel->fd, HV_X86_XCR0); in hvf_get_registers()
311 hvf_get_segments(cs); in hvf_get_registers()
312 hvf_get_msrs(cs); in hvf_get_registers()
314 env->dr[0] = rreg(cs->accel->fd, HV_X86_DR0); in hvf_get_registers()
315 env->dr[1] = rreg(cs->accel->fd, HV_X86_DR1); in hvf_get_registers()
316 env->dr[2] = rreg(cs->accel->fd, HV_X86_DR2); in hvf_get_registers()
317 env->dr[3] = rreg(cs->accel->fd, HV_X86_DR3); in hvf_get_registers()
318 env->dr[4] = rreg(cs->accel->fd, HV_X86_DR4); in hvf_get_registers()
319 env->dr[5] = rreg(cs->accel->fd, HV_X86_DR5); in hvf_get_registers()
320 env->dr[6] = rreg(cs->accel->fd, HV_X86_DR6); in hvf_get_registers()
321 env->dr[7] = rreg(cs->accel->fd, HV_X86_DR7); in hvf_get_registers()
327 static void vmx_set_int_window_exiting(CPUState *cs) in vmx_set_int_window_exiting() argument
330 val = rvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS); in vmx_set_int_window_exiting()
331 wvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val | in vmx_set_int_window_exiting()
335 void vmx_clear_int_window_exiting(CPUState *cs) in vmx_clear_int_window_exiting() argument
338 val = rvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS); in vmx_clear_int_window_exiting()
339 wvmcs(cs->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val & in vmx_clear_int_window_exiting()
343 bool hvf_inject_interrupts(CPUState *cs) in hvf_inject_interrupts() argument
345 X86CPU *x86cpu = X86_CPU(cs); in hvf_inject_interrupts()
346 CPUX86State *env = &x86cpu->env; in hvf_inject_interrupts()
351 if (env->interrupt_injected != -1) { in hvf_inject_interrupts()
352 vector = env->interrupt_injected; in hvf_inject_interrupts()
353 if (env->ins_len) { in hvf_inject_interrupts()
358 } else if (env->exception_nr != -1) { in hvf_inject_interrupts()
359 vector = env->exception_nr; in hvf_inject_interrupts()
365 } else if (env->nmi_injected) { in hvf_inject_interrupts()
375 uint64_t reason = rvmcs(cs->accel->fd, VMCS_EXIT_REASON); in hvf_inject_interrupts()
376 if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { in hvf_inject_interrupts()
377 vmx_clear_nmi_blocking(cs); in hvf_inject_interrupts()
380 if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) { in hvf_inject_interrupts()
381 info &= ~(1 << 12); /* clear undefined bit */ in hvf_inject_interrupts()
384 wvmcs(cs->accel->fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); in hvf_inject_interrupts()
387 if (env->has_error_code) { in hvf_inject_interrupts()
388 wvmcs(cs->accel->fd, VMCS_ENTRY_EXCEPTION_ERROR, in hvf_inject_interrupts()
389 env->error_code); in hvf_inject_interrupts()
394 wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info); in hvf_inject_interrupts()
398 if (cs->interrupt_request & CPU_INTERRUPT_NMI) { in hvf_inject_interrupts()
399 if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { in hvf_inject_interrupts()
400 cs->interrupt_request &= ~CPU_INTERRUPT_NMI; in hvf_inject_interrupts()
402 wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info); in hvf_inject_interrupts()
404 vmx_set_nmi_window_exiting(cs); in hvf_inject_interrupts()
408 if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && in hvf_inject_interrupts()
409 (cs->interrupt_request & CPU_INTERRUPT_HARD) && in hvf_inject_interrupts()
410 (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { in hvf_inject_interrupts()
412 cs->interrupt_request &= ~CPU_INTERRUPT_HARD; in hvf_inject_interrupts()
414 wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, line | in hvf_inject_interrupts()
418 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { in hvf_inject_interrupts()
419 vmx_set_int_window_exiting(cs); in hvf_inject_interrupts()
421 return (cs->interrupt_request in hvf_inject_interrupts()
425 int hvf_process_events(CPUState *cs) in hvf_process_events() argument
427 X86CPU *cpu = X86_CPU(cs); in hvf_process_events()
428 CPUX86State *env = &cpu->env; in hvf_process_events()
430 if (!cs->accel->dirty) { in hvf_process_events()
432 env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); in hvf_process_events()
435 if (cs->interrupt_request & CPU_INTERRUPT_INIT) { in hvf_process_events()
436 cpu_synchronize_state(cs); in hvf_process_events()
440 if (cs->interrupt_request & CPU_INTERRUPT_POLL) { in hvf_process_events()
441 cs->interrupt_request &= ~CPU_INTERRUPT_POLL; in hvf_process_events()
442 apic_poll_irq(cpu->apic_state); in hvf_process_events()
444 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && in hvf_process_events()
445 (env->eflags & IF_MASK)) || in hvf_process_events()
446 (cs->interrupt_request & CPU_INTERRUPT_NMI)) { in hvf_process_events()
447 cs->halted = 0; in hvf_process_events()
449 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { in hvf_process_events()
450 cpu_synchronize_state(cs); in hvf_process_events()
453 if (cs->interrupt_request & CPU_INTERRUPT_TPR) { in hvf_process_events()
454 cs->interrupt_request &= ~CPU_INTERRUPT_TPR; in hvf_process_events()
455 cpu_synchronize_state(cs); in hvf_process_events()
456 apic_handle_tpr_access_report(cpu->apic_state, env->eip, in hvf_process_events()
457 env->tpr_access_type); in hvf_process_events()
459 return cs->halted; in hvf_process_events()