Lines Matching +full:oc +full:- +full:delay +full:- +full:us
7 * See the COPYING file in the top-level directory.
13 #include "exec/address-spaces.h"
20 #include "qemu/main-loop.h"
24 #include "qemu/error-report.h"
26 #include "qapi/qapi-types-common.h"
27 #include "qapi/qapi-visit-common.h"
31 #include "whpx-internal.h"
32 #include "whpx-accel-ops.h"
169 * that will continue single-stepping.
172 * stepping. INT1 events generated by it would be intercepted by us,
176 * 1. Stepping through flags-modifying instructions may cause gdb to
186 * with the guest-level debuggers.
191 * defined in the "Combined Volume Set of Intel 64 and IA-32
194 * virtual 8086 mode, and differences between 64-bit and 32-bit modes.
211 * 3. In order to properly support guest-level debugging in parallel with
212 * the QEMU-level debugging, we would need to be able to pass some INT1
264 unsigned flags = qs->flags; in whpx_seg_q2h()
266 hs.Base = qs->base; in whpx_seg_q2h()
267 hs.Limit = qs->limit; in whpx_seg_q2h()
268 hs.Selector = qs->selector; in whpx_seg_q2h()
292 qs.base = hs->Base; in whpx_seg_h2q()
293 qs.limit = hs->Limit; in whpx_seg_h2q()
294 qs.selector = hs->Selector; in whpx_seg_h2q()
296 qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT; in whpx_seg_h2q()
314 xcr0.Reg64 = cpu_env(cpu)->xcr0; in whpx_set_xcrs()
316 whpx->partition, cpu->cpu_index, &xcr0_name, 1, &xcr0); in whpx_set_xcrs()
341 hr = whp_dispatch.WHvSuspendPartitionTime(whpx->partition); in whpx_set_tsc()
347 tsc_val.Reg64 = cpu_env(cpu)->tsc; in whpx_set_tsc()
349 whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val); in whpx_set_tsc()
352 return -1; in whpx_set_tsc()
365 * and IA-32 Architectures Software Developer's Manual.
383 AccelCPUState *vcpu = cpu->accel; in whpx_set_registers()
385 CPUX86State *env = &x86_cpu->env; in whpx_set_registers()
405 v86 = (env->eflags & VM_MASK); in whpx_set_registers()
406 r86 = !(env->cr[0] & CR0_PE_MASK); in whpx_set_registers()
408 vcpu->tpr = whpx_apic_tpr_to_cr8(cpu_get_apic_tpr(x86_cpu->apic_state)); in whpx_set_registers()
409 vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state); in whpx_set_registers()
416 vcxt.values[idx].Reg64 = (uint64_t)env->regs[idx]; in whpx_set_registers()
422 vcxt.values[idx++].Reg64 = env->eip; in whpx_set_registers()
425 vcxt.values[idx++].Reg64 = env->eflags; in whpx_set_registers()
430 vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86); in whpx_set_registers()
434 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0); in whpx_set_registers()
437 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0); in whpx_set_registers()
440 vcxt.values[idx].Table.Base = env->idt.base; in whpx_set_registers()
441 vcxt.values[idx].Table.Limit = env->idt.limit; in whpx_set_registers()
445 vcxt.values[idx].Table.Base = env->gdt.base; in whpx_set_registers()
446 vcxt.values[idx].Table.Limit = env->gdt.limit; in whpx_set_registers()
451 vcxt.values[idx++].Reg64 = env->cr[0]; in whpx_set_registers()
453 vcxt.values[idx++].Reg64 = env->cr[2]; in whpx_set_registers()
455 vcxt.values[idx++].Reg64 = env->cr[3]; in whpx_set_registers()
457 vcxt.values[idx++].Reg64 = env->cr[4]; in whpx_set_registers()
459 vcxt.values[idx++].Reg64 = vcpu->tpr; in whpx_set_registers()
461 /* 8 Debug Registers - Skipped */ in whpx_set_registers()
472 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) { in whpx_set_registers()
473 vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0); in whpx_set_registers()
474 vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1); in whpx_set_registers()
481 vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0); in whpx_set_registers()
483 env->fpregs[i].mmx.MMX_Q(1); in whpx_set_registers()
489 vcxt.values[idx].FpControlStatus.FpControl = env->fpuc; in whpx_set_registers()
491 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; in whpx_set_registers()
494 vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i; in whpx_set_registers()
497 vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop; in whpx_set_registers()
498 vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip; in whpx_set_registers()
504 vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr; in whpx_set_registers()
510 vcxt.values[idx++].Reg64 = env->efer; in whpx_set_registers()
513 vcxt.values[idx++].Reg64 = env->kernelgsbase; in whpx_set_registers()
517 vcxt.values[idx++].Reg64 = vcpu->apic_base; in whpx_set_registers()
519 /* WHvX64RegisterPat - Skipped */ in whpx_set_registers()
522 vcxt.values[idx++].Reg64 = env->sysenter_cs; in whpx_set_registers()
524 vcxt.values[idx++].Reg64 = env->sysenter_eip; in whpx_set_registers()
526 vcxt.values[idx++].Reg64 = env->sysenter_esp; in whpx_set_registers()
528 vcxt.values[idx++].Reg64 = env->star; in whpx_set_registers()
531 vcxt.values[idx++].Reg64 = env->lstar; in whpx_set_registers()
533 vcxt.values[idx++].Reg64 = env->cstar; in whpx_set_registers()
535 vcxt.values[idx++].Reg64 = env->fmask; in whpx_set_registers()
538 /* Interrupt / Event Registers - Skipped */ in whpx_set_registers()
543 whpx->partition, cpu->cpu_index, in whpx_set_registers()
564 whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val); in whpx_get_tsc()
567 return -1; in whpx_get_tsc()
570 cpu_env(cpu)->tsc = tsc_val.Reg64; in whpx_get_tsc()
588 whpx->partition, cpu->cpu_index, &xcr0_name, 1, &xcr0); in whpx_get_xcrs()
594 cpu_env(cpu)->xcr0 = xcr0.Reg64; in whpx_get_xcrs()
600 AccelCPUState *vcpu = cpu->accel; in whpx_get_registers()
602 CPUX86State *env = &x86_cpu->env; in whpx_get_registers()
612 if (!env->tsc_valid) { in whpx_get_registers()
614 env->tsc_valid = !runstate_is_running(); in whpx_get_registers()
618 whpx->partition, cpu->cpu_index, in whpx_get_registers()
633 whpx_apic_get(x86_cpu->apic_state); in whpx_get_registers()
634 vcpu->tpr = whpx_apic_tpr_to_cr8( in whpx_get_registers()
635 cpu_get_apic_tpr(x86_cpu->apic_state)); in whpx_get_registers()
643 env->regs[idx] = vcxt.values[idx].Reg64; in whpx_get_registers()
649 env->eip = vcxt.values[idx++].Reg64; in whpx_get_registers()
651 env->eflags = vcxt.values[idx++].Reg64; in whpx_get_registers()
656 env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment); in whpx_get_registers()
660 env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment); in whpx_get_registers()
662 env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment); in whpx_get_registers()
664 env->idt.base = vcxt.values[idx].Table.Base; in whpx_get_registers()
665 env->idt.limit = vcxt.values[idx].Table.Limit; in whpx_get_registers()
668 env->gdt.base = vcxt.values[idx].Table.Base; in whpx_get_registers()
669 env->gdt.limit = vcxt.values[idx].Table.Limit; in whpx_get_registers()
674 env->cr[0] = vcxt.values[idx++].Reg64; in whpx_get_registers()
676 env->cr[2] = vcxt.values[idx++].Reg64; in whpx_get_registers()
678 env->cr[3] = vcxt.values[idx++].Reg64; in whpx_get_registers()
680 env->cr[4] = vcxt.values[idx++].Reg64; in whpx_get_registers()
683 if (tpr != vcpu->tpr) { in whpx_get_registers()
684 vcpu->tpr = tpr; in whpx_get_registers()
685 cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(tpr)); in whpx_get_registers()
688 /* 8 Debug Registers - Skipped */ in whpx_get_registers()
699 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) { in whpx_get_registers()
700 env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64; in whpx_get_registers()
701 env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64; in whpx_get_registers()
708 env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64; in whpx_get_registers()
709 /* env->fpregs[i].mmx.MMX_Q(1) = in whpx_get_registers()
716 env->fpuc = vcxt.values[idx].FpControlStatus.FpControl; in whpx_get_registers()
717 env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7; in whpx_get_registers()
718 env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800; in whpx_get_registers()
720 env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1); in whpx_get_registers()
722 env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp; in whpx_get_registers()
723 env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip; in whpx_get_registers()
728 env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl; in whpx_get_registers()
733 env->efer = vcxt.values[idx++].Reg64; in whpx_get_registers()
736 env->kernelgsbase = vcxt.values[idx++].Reg64; in whpx_get_registers()
741 if (apic_base != vcpu->apic_base) { in whpx_get_registers()
742 vcpu->apic_base = apic_base; in whpx_get_registers()
743 cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base); in whpx_get_registers()
746 /* WHvX64RegisterPat - Skipped */ in whpx_get_registers()
749 env->sysenter_cs = vcxt.values[idx++].Reg64; in whpx_get_registers()
751 env->sysenter_eip = vcxt.values[idx++].Reg64; in whpx_get_registers()
753 env->sysenter_esp = vcxt.values[idx++].Reg64; in whpx_get_registers()
755 env->star = vcxt.values[idx++].Reg64; in whpx_get_registers()
758 env->lstar = vcxt.values[idx++].Reg64; in whpx_get_registers()
760 env->cstar = vcxt.values[idx++].Reg64; in whpx_get_registers()
762 env->fmask = vcxt.values[idx++].Reg64; in whpx_get_registers()
765 /* Interrupt / Event Registers - Skipped */ in whpx_get_registers()
770 whpx_apic_get(x86_cpu->apic_state); in whpx_get_registers()
783 address_space_rw(&address_space_io, IoAccess->Port, attrs, in whpx_emu_ioport_callback()
784 &IoAccess->Data, IoAccess->AccessSize, in whpx_emu_ioport_callback()
785 IoAccess->Direction); in whpx_emu_ioport_callback()
793 cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize, in whpx_emu_mmio_callback()
794 ma->Direction); in whpx_emu_mmio_callback()
809 whpx->partition, cpu->cpu_index, in whpx_emu_getreg_callback()
831 whpx->partition, cpu->cpu_index, in whpx_emu_setreg_callback()
843 cpu->accel->dirty = false; in whpx_emu_setreg_callback()
860 hr = whp_dispatch.WHvTranslateGva(whpx->partition, cpu->cpu_index, in whpx_emu_translate_callback()
883 AccelCPUState *vcpu = cpu->accel; in whpx_handle_mmio()
887 vcpu->emulator, cpu, in whpx_handle_mmio()
888 &vcpu->exit_ctx.VpContext, ctx, in whpx_handle_mmio()
892 return -1; in whpx_handle_mmio()
898 return -1; in whpx_handle_mmio()
908 AccelCPUState *vcpu = cpu->accel; in whpx_handle_portio()
912 vcpu->emulator, cpu, in whpx_handle_portio()
913 &vcpu->exit_ctx.VpContext, ctx, in whpx_handle_portio()
917 return -1; in whpx_handle_portio()
923 return -1; in whpx_handle_portio()
931 * namely breakpoint/single-step events.
942 if (exceptions == whpx->exception_exit_bitmap) { in whpx_set_exception_exit_bitmap()
949 whpx->partition, in whpx_set_exception_exit_bitmap()
955 whpx->exception_exit_bitmap = exceptions; in whpx_set_exception_exit_bitmap()
982 whpx->partition, in whpx_vcpu_configure_single_stepping()
983 cpu->cpu_index, in whpx_vcpu_configure_single_stepping()
1009 whpx->partition, in whpx_vcpu_configure_single_stepping()
1010 cpu->cpu_index, in whpx_vcpu_configure_single_stepping()
1025 /* Suspend delivery of hardware interrupts during single-stepping. */ in whpx_vcpu_configure_single_stepping()
1029 whpx->partition, in whpx_vcpu_configure_single_stepping()
1030 cpu->cpu_index, in whpx_vcpu_configure_single_stepping()
1052 whpx->partition, in whpx_vcpu_configure_single_stepping()
1053 cpu->cpu_index, in whpx_vcpu_configure_single_stepping()
1068 whpx->partition, in whpx_vcpu_configure_single_stepping()
1069 cpu->cpu_index, in whpx_vcpu_configure_single_stepping()
1092 if (whpx->breakpoints.breakpoints) { in whpx_lookup_breakpoint_by_addr()
1093 for (i = 0; i < whpx->breakpoints.breakpoints->used; i++) { in whpx_lookup_breakpoint_by_addr()
1094 if (address == whpx->breakpoints.breakpoints->data[i].address) { in whpx_lookup_breakpoint_by_addr()
1095 return &whpx->breakpoints.breakpoints->data[i]; in whpx_lookup_breakpoint_by_addr()
1105 * debugging user-mode applications. Since the WHPX API does not offer
1125 * The function below rebuilds a list of low-level breakpoints (one per
1127 * high-level breakpoints (set via cpu_breakpoint_insert()).
1130 * high-level breakpoints (a.k.a. CPU breakpoints) used to compute the
1131 * low-level ones, so that it won't be re-invoked until these breakpoints
1146 breakpoints->original_addresses = in whpx_translate_cpu_breakpoints()
1147 g_renew(vaddr, breakpoints->original_addresses, cpu_breakpoint_count); in whpx_translate_cpu_breakpoints()
1149 breakpoints->original_address_count = cpu_breakpoint_count; in whpx_translate_cpu_breakpoints()
1152 (breakpoints->breakpoints ? breakpoints->breakpoints->used : 0); in whpx_translate_cpu_breakpoints()
1158 new_breakpoints->allocated = max_breakpoints; in whpx_translate_cpu_breakpoints()
1159 new_breakpoints->used = 0; in whpx_translate_cpu_breakpoints()
1165 if (breakpoints->breakpoints) { in whpx_translate_cpu_breakpoints()
1167 for (i = 0; i < breakpoints->breakpoints->used; i++) { in whpx_translate_cpu_breakpoints()
1168 if (breakpoints->breakpoints->data[i].state != WHPX_BP_CLEARED) { in whpx_translate_cpu_breakpoints()
1169 new_breakpoints->data[new_breakpoints->used++] = in whpx_translate_cpu_breakpoints()
1170 breakpoints->breakpoints->data[i]; in whpx_translate_cpu_breakpoints()
1176 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { in whpx_translate_cpu_breakpoints()
1181 breakpoints->original_addresses[cpu_bp_index++] = bp->pc; in whpx_translate_cpu_breakpoints()
1183 for (i = 0; i < new_breakpoints->used; i++) { in whpx_translate_cpu_breakpoints()
1187 * real-world scenarios, since it only needs to run once after in whpx_translate_cpu_breakpoints()
1190 * high-level breakpoint objects in a tree or hash map. in whpx_translate_cpu_breakpoints()
1193 if (new_breakpoints->data[i].address == bp->pc) { in whpx_translate_cpu_breakpoints()
1195 if (new_breakpoints->data[i].state == WHPX_BP_CLEAR_PENDING) { in whpx_translate_cpu_breakpoints()
1196 new_breakpoints->data[i].state = WHPX_BP_SET; in whpx_translate_cpu_breakpoints()
1197 } else if (new_breakpoints->data[i].state == WHPX_BP_SET) { in whpx_translate_cpu_breakpoints()
1198 new_breakpoints->data[i].state = WHPX_BP_SET_PENDING; in whpx_translate_cpu_breakpoints()
1206 if (!found && new_breakpoints->used < new_breakpoints->allocated) { in whpx_translate_cpu_breakpoints()
1208 new_breakpoints->data[new_breakpoints->used].address = bp->pc; in whpx_translate_cpu_breakpoints()
1209 new_breakpoints->data[new_breakpoints->used].state = in whpx_translate_cpu_breakpoints()
1211 new_breakpoints->used++; in whpx_translate_cpu_breakpoints()
1220 g_free(breakpoints->breakpoints); in whpx_translate_cpu_breakpoints()
1222 breakpoints->breakpoints = new_breakpoints; in whpx_translate_cpu_breakpoints()
1242 for (i = 0; i < breakpoints->used; i++) { in whpx_apply_breakpoints()
1244 WhpxBreakpointState state = breakpoints->data[i].state; in whpx_apply_breakpoints()
1271 breakpoints->data[i].address, in whpx_apply_breakpoints()
1272 &breakpoints->data[i].original_instruction, in whpx_apply_breakpoints()
1279 breakpoints->data[i].address, in whpx_apply_breakpoints()
1294 breakpoints->data[i].address, in whpx_apply_breakpoints()
1295 &breakpoints->data[i].original_instruction, in whpx_apply_breakpoints()
1304 breakpoints->data[i].state = state; in whpx_apply_breakpoints()
1326 if (!QTAILQ_EMPTY(&cpu->breakpoints) || in whpx_first_vcpu_starting()
1327 (whpx->breakpoints.breakpoints && in whpx_first_vcpu_starting()
1328 whpx->breakpoints.breakpoints->used)) { in whpx_first_vcpu_starting()
1333 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { in whpx_first_vcpu_starting()
1334 if (i >= whpx->breakpoints.original_address_count || in whpx_first_vcpu_starting()
1335 bp->pc != whpx->breakpoints.original_addresses[i]) { in whpx_first_vcpu_starting()
1342 if (i != whpx->breakpoints.original_address_count) { in whpx_first_vcpu_starting()
1352 whpx_translate_cpu_breakpoints(&whpx->breakpoints, cpu, i); in whpx_first_vcpu_starting()
1356 whpx_apply_breakpoints(whpx->breakpoints.breakpoints, cpu, true); in whpx_first_vcpu_starting()
1360 if (whpx->step_pending || in whpx_first_vcpu_starting()
1361 (whpx->breakpoints.breakpoints && in whpx_first_vcpu_starting()
1362 whpx->breakpoints.breakpoints->used)) { in whpx_first_vcpu_starting()
1364 * We are either attempting to single-step one or more CPUs, or in whpx_first_vcpu_starting()
1398 if (cpu->accel->dirty) { in whpx_vcpu_get_pc()
1400 return cpu_env(cpu)->eip; in whpx_vcpu_get_pc()
1407 AccelCPUState *vcpu = cpu->accel; in whpx_vcpu_get_pc()
1408 return vcpu->exit_ctx.VpContext.Rip; in whpx_vcpu_get_pc()
1412 * WHvSetVirtualProcessorRegisters() and must be re-queried from in whpx_vcpu_get_pc()
1421 whpx->partition, in whpx_vcpu_get_pc()
1422 cpu->cpu_index, in whpx_vcpu_get_pc()
1441 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && in whpx_handle_halt()
1442 (cpu_env(cpu)->eflags & IF_MASK)) && in whpx_handle_halt()
1443 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { in whpx_handle_halt()
1444 cpu->exception_index = EXCP_HLT; in whpx_handle_halt()
1445 cpu->halted = true; in whpx_handle_halt()
1457 AccelCPUState *vcpu = cpu->accel; in whpx_vcpu_pre_run()
1459 CPUX86State *env = &x86_cpu->env; in whpx_vcpu_pre_run()
1473 if (!vcpu->interruption_pending && in whpx_vcpu_pre_run()
1474 cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { in whpx_vcpu_pre_run()
1475 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { in whpx_vcpu_pre_run()
1476 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; in whpx_vcpu_pre_run()
1477 vcpu->interruptable = false; in whpx_vcpu_pre_run()
1482 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { in whpx_vcpu_pre_run()
1483 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; in whpx_vcpu_pre_run()
1491 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { in whpx_vcpu_pre_run()
1492 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && in whpx_vcpu_pre_run()
1493 !(env->hflags & HF_SMM_MASK)) { in whpx_vcpu_pre_run()
1494 cpu->exit_request = 1; in whpx_vcpu_pre_run()
1496 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { in whpx_vcpu_pre_run()
1497 cpu->exit_request = 1; in whpx_vcpu_pre_run()
1503 if (!vcpu->interruption_pending && in whpx_vcpu_pre_run()
1504 vcpu->interruptable && (env->eflags & IF_MASK)) { in whpx_vcpu_pre_run()
1506 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { in whpx_vcpu_pre_run()
1507 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; in whpx_vcpu_pre_run()
1523 } else if (vcpu->ready_for_pic_interrupt && in whpx_vcpu_pre_run()
1524 (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { in whpx_vcpu_pre_run()
1525 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; in whpx_vcpu_pre_run()
1540 tpr = whpx_apic_tpr_to_cr8(cpu_get_apic_tpr(x86_cpu->apic_state)); in whpx_vcpu_pre_run()
1541 if (tpr != vcpu->tpr) { in whpx_vcpu_pre_run()
1542 vcpu->tpr = tpr; in whpx_vcpu_pre_run()
1544 cpu->exit_request = 1; in whpx_vcpu_pre_run()
1550 if (!vcpu->window_registered && in whpx_vcpu_pre_run()
1551 cpu->interrupt_request & CPU_INTERRUPT_HARD) { in whpx_vcpu_pre_run()
1556 vcpu->window_registered = 1; in whpx_vcpu_pre_run()
1562 vcpu->ready_for_pic_interrupt = false; in whpx_vcpu_pre_run()
1566 whpx->partition, cpu->cpu_index, in whpx_vcpu_pre_run()
1579 AccelCPUState *vcpu = cpu->accel; in whpx_vcpu_post_run()
1581 CPUX86State *env = &x86_cpu->env; in whpx_vcpu_post_run()
1583 env->eflags = vcpu->exit_ctx.VpContext.Rflags; in whpx_vcpu_post_run()
1585 uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8; in whpx_vcpu_post_run()
1586 if (vcpu->tpr != tpr) { in whpx_vcpu_post_run()
1587 vcpu->tpr = tpr; in whpx_vcpu_post_run()
1589 cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr)); in whpx_vcpu_post_run()
1593 vcpu->interruption_pending = in whpx_vcpu_post_run()
1594 vcpu->exit_ctx.VpContext.ExecutionState.InterruptionPending; in whpx_vcpu_post_run()
1596 vcpu->interruptable = in whpx_vcpu_post_run()
1597 !vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow; in whpx_vcpu_post_run()
1605 CPUX86State *env = &x86_cpu->env; in whpx_vcpu_process_async_events()
1606 AccelCPUState *vcpu = cpu->accel; in whpx_vcpu_process_async_events()
1608 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && in whpx_vcpu_process_async_events()
1609 !(env->hflags & HF_SMM_MASK)) { in whpx_vcpu_process_async_events()
1612 vcpu->interruptable = true; in whpx_vcpu_process_async_events()
1615 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { in whpx_vcpu_process_async_events()
1616 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; in whpx_vcpu_process_async_events()
1617 apic_poll_irq(x86_cpu->apic_state); in whpx_vcpu_process_async_events()
1620 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && in whpx_vcpu_process_async_events()
1621 (env->eflags & IF_MASK)) || in whpx_vcpu_process_async_events()
1622 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { in whpx_vcpu_process_async_events()
1623 cpu->halted = false; in whpx_vcpu_process_async_events()
1626 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { in whpx_vcpu_process_async_events()
1631 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { in whpx_vcpu_process_async_events()
1632 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; in whpx_vcpu_process_async_events()
1634 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, in whpx_vcpu_process_async_events()
1635 env->tpr_access_type); in whpx_vcpu_process_async_events()
1645 AccelCPUState *vcpu = cpu->accel; in whpx_vcpu_run()
1652 if (whpx->running_cpus++ == 0) { in whpx_vcpu_run()
1660 if (whpx->breakpoints.breakpoints && in whpx_vcpu_run()
1661 whpx->breakpoints.breakpoints->used > 0) in whpx_vcpu_run()
1665 if (stepped_over_bp && stepped_over_bp->state != WHPX_BP_SET) { in whpx_vcpu_run()
1681 if (cpu->halted && !whpx_apic_in_platform()) { in whpx_vcpu_run()
1682 cpu->exception_index = EXCP_HLT; in whpx_vcpu_run()
1683 qatomic_set(&cpu->exit_request, false); in whpx_vcpu_run()
1693 g_assert(!cpu->running); in whpx_vcpu_run()
1694 cpu->running = true; in whpx_vcpu_run()
1707 stepped_over_bp->address, in whpx_vcpu_run()
1708 &stepped_over_bp->original_instruction, in whpx_vcpu_run()
1717 if (cpu->accel->dirty) { in whpx_vcpu_run()
1719 cpu->accel->dirty = false; in whpx_vcpu_run()
1725 if (qatomic_read(&cpu->exit_request)) { in whpx_vcpu_run()
1730 if (exclusive_step_mode != WHPX_STEP_NONE || cpu->singlestep_enabled) { in whpx_vcpu_run()
1735 whpx->partition, cpu->cpu_index, in whpx_vcpu_run()
1736 &vcpu->exit_ctx, sizeof(vcpu->exit_ctx)); in whpx_vcpu_run()
1741 ret = -1; in whpx_vcpu_run()
1745 if (exclusive_step_mode != WHPX_STEP_NONE || cpu->singlestep_enabled) { in whpx_vcpu_run()
1748 &vcpu->exit_ctx.VpContext.Rflags); in whpx_vcpu_run()
1753 switch (vcpu->exit_ctx.ExitReason) { in whpx_vcpu_run()
1755 ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess); in whpx_vcpu_run()
1759 ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess); in whpx_vcpu_run()
1763 vcpu->ready_for_pic_interrupt = 1; in whpx_vcpu_run()
1764 vcpu->window_registered = 0; in whpx_vcpu_run()
1770 ioapic_eoi_broadcast(vcpu->exit_ctx.ApicEoi.InterruptVector); in whpx_vcpu_run()
1783 uint64_t icr = vcpu->exit_ctx.ApicInitSipi.ApicIcr; in whpx_vcpu_run()
1818 /* no shorthand. Bits 56-63 contain the destination. */ in whpx_vcpu_run()
1821 hr = whp_dispatch.WHvRequestInterrupt(whpx->partition, in whpx_vcpu_run()
1852 if (i == cpu->cpu_index && !include_self) { in whpx_vcpu_run()
1863 hr = whp_dispatch.WHvRequestInterrupt(whpx->partition, in whpx_vcpu_run()
1880 * Delay it until we are done stepping in whpx_vcpu_run()
1885 cpu->exception_index = EXCP_INTERRUPT; in whpx_vcpu_run()
1899 vcpu->exit_ctx.VpContext.Rip + in whpx_vcpu_run()
1900 vcpu->exit_ctx.VpContext.InstructionLength; in whpx_vcpu_run()
1907 reg_count = vcpu->exit_ctx.MsrAccess.AccessInfo.IsWrite ? in whpx_vcpu_run()
1911 whpx->partition, in whpx_vcpu_run()
1912 cpu->cpu_index, in whpx_vcpu_run()
1929 CPUX86State *env = &x86_cpu->env; in whpx_vcpu_run()
1933 rip = vcpu->exit_ctx.VpContext.Rip + in whpx_vcpu_run()
1934 vcpu->exit_ctx.VpContext.InstructionLength; in whpx_vcpu_run()
1935 cpuid_fn = vcpu->exit_ctx.CpuidAccess.Rax; in whpx_vcpu_run()
1956 rax = env->tsc_khz; in whpx_vcpu_run()
1957 rbx = env->apic_bus_freq / 1000; /* Hz to KHz */ in whpx_vcpu_run()
1980 whpx->partition, cpu->cpu_index, in whpx_vcpu_run()
1995 if ((vcpu->exit_ctx.VpException.ExceptionType == in whpx_vcpu_run()
1997 (vcpu->exit_ctx.VpException.InstructionByteCount >= 1) && in whpx_vcpu_run()
1998 (vcpu->exit_ctx.VpException.InstructionBytes[0] == in whpx_vcpu_run()
2001 cpu->exception_index = EXCP_DEBUG; in whpx_vcpu_run()
2002 } else if ((vcpu->exit_ctx.VpException.ExceptionType == in whpx_vcpu_run()
2004 !cpu->singlestep_enabled) { in whpx_vcpu_run()
2007 * gdb does not expect us to do single-stepping. in whpx_vcpu_run()
2010 cpu->exception_index = EXCP_INTERRUPT; in whpx_vcpu_run()
2013 cpu->exception_index = EXCP_DEBUG; in whpx_vcpu_run()
2024 vcpu->exit_ctx.ExitReason); in whpx_vcpu_run()
2037 stepped_over_bp->address, in whpx_vcpu_run()
2045 cpu->running = false; in whpx_vcpu_run()
2056 if (--whpx->running_cpus == 0) { in whpx_vcpu_run()
2060 qatomic_set(&cpu->exit_request, false); in whpx_vcpu_run()
2067 if (!cpu->accel->dirty) { in do_whpx_cpu_synchronize_state()
2069 cpu->accel->dirty = true; in do_whpx_cpu_synchronize_state()
2077 cpu->accel->dirty = false; in do_whpx_cpu_synchronize_post_reset()
2084 cpu->accel->dirty = false; in do_whpx_cpu_synchronize_post_init()
2090 cpu->accel->dirty = true; in do_whpx_cpu_synchronize_pre_loadvm()
2099 if (!cpu->accel->dirty) { in whpx_cpu_synchronize_state()
2135 env->tsc_valid = false; in whpx_cpu_update_state()
2146 CPUX86State *env = &x86_cpu->env; in whpx_init_vcpu()
2155 "State blocked due to non-migratable CPUID feature support," in whpx_init_vcpu()
2160 ret = -EINVAL; in whpx_init_vcpu()
2169 &vcpu->emulator); in whpx_init_vcpu()
2173 ret = -EINVAL; in whpx_init_vcpu()
2178 whpx->partition, cpu->cpu_index, 0); in whpx_init_vcpu()
2182 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); in whpx_init_vcpu()
2183 ret = -EINVAL; in whpx_init_vcpu()
2189 * provided by Hyper-V if the former is not present. In the latter case, we in whpx_init_vcpu()
2190 * query it from Hyper-V and record in env->tsc_khz, so that vcpu's TSC in whpx_init_vcpu()
2193 if (!env->tsc_khz) { in whpx_init_vcpu()
2201 env->tsc_khz = freq / 1000; /* Hz to KHz */ in whpx_init_vcpu()
2206 env->apic_bus_freq = HYPERV_APIC_BUS_FREQUENCY; in whpx_init_vcpu()
2213 env->apic_bus_freq = freq; in whpx_init_vcpu()
2221 if (x86_cpu->vmware_cpuid_freq && env->tsc_khz) { in whpx_init_vcpu()
2225 whpx->partition, in whpx_init_vcpu()
2233 ret = -EINVAL; in whpx_init_vcpu()
2238 vcpu->interruptable = true; in whpx_init_vcpu()
2239 vcpu->dirty = true; in whpx_init_vcpu()
2240 cpu->accel = vcpu; in whpx_init_vcpu()
2241 max_vcpu_index = max(max_vcpu_index, cpu->cpu_index); in whpx_init_vcpu()
2258 if (cpu->exception_index >= EXCP_INTERRUPT) { in whpx_vcpu_exec()
2259 ret = cpu->exception_index; in whpx_vcpu_exec()
2260 cpu->exception_index = -1; in whpx_vcpu_exec()
2278 AccelCPUState *vcpu = cpu->accel; in whpx_destroy_vcpu()
2280 whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index); in whpx_destroy_vcpu()
2281 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator); in whpx_destroy_vcpu()
2282 g_free(cpu->accel); in whpx_destroy_vcpu()
2290 whpx->partition, cpu->cpu_index, 0); in whpx_vcpu_kick()
2316 hr = whp_dispatch.WHvMapGpaRange(whpx->partition, in whpx_update_mapping()
2324 hr = whp_dispatch.WHvUnmapGpaRange(whpx->partition, in whpx_update_mapping()
2339 MemoryRegion *mr = section->mr; in whpx_process_section()
2340 hwaddr start_pa = section->offset_within_address_space; in whpx_process_section()
2341 ram_addr_t size = int128_get64(section->size); in whpx_process_section()
2349 delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask()); in whpx_process_section()
2355 size -= delta; in whpx_process_section()
2362 + section->offset_within_region + delta; in whpx_process_section()
2365 memory_region_is_rom(mr), mr->name); in whpx_process_section()
2371 memory_region_ref(section->mr); in whpx_region_add()
2379 memory_region_unref(section->mr); in whpx_region_del()
2393 MemoryRegion *mr = section->mr; in whpx_log_sync()
2399 memory_region_set_dirty(mr, 0, int128_get64(section->size)); in whpx_log_sync()
2491 whpx->kernel_irqchip_allowed = true; in whpx_set_kernel_irqchip()
2492 whpx->kernel_irqchip_required = true; in whpx_set_kernel_irqchip()
2496 whpx->kernel_irqchip_allowed = false; in whpx_set_kernel_irqchip()
2497 whpx->kernel_irqchip_required = false; in whpx_set_kernel_irqchip()
2503 "Try without kernel-irqchip or with kernel-irqchip=on|off"); in whpx_set_kernel_irqchip()
2533 ret = -ENOSYS; in whpx_accel_init()
2537 whpx->mem_quota = ms->ram_size; in whpx_accel_init()
2544 ret = -ENOSPC; in whpx_accel_init()
2552 ret = -EINVAL; in whpx_accel_init()
2556 hr = whp_dispatch.WHvCreatePartition(&whpx->partition); in whpx_accel_init()
2559 ret = -EINVAL; in whpx_accel_init()
2568 whpx->partition, in whpx_accel_init()
2587 prop.ProcessorCount = ms->smp.cpus; in whpx_accel_init()
2589 whpx->partition, in whpx_accel_init()
2597 ret = -EINVAL; in whpx_accel_init()
2605 if (whpx->kernel_irqchip_required && (!features.LocalApicEmulation || in whpx_accel_init()
2608 "Try without kernel-irqchip or with kernel-irqchip=off"); in whpx_accel_init()
2609 ret = -EINVAL; in whpx_accel_init()
2613 if (whpx->kernel_irqchip_allowed && features.LocalApicEmulation && in whpx_accel_init()
2619 whpx->partition, in whpx_accel_init()
2625 if (whpx->kernel_irqchip_required) { in whpx_accel_init()
2627 ret = -EINVAL; in whpx_accel_init()
2631 whpx->apic_in_platform = true; in whpx_accel_init()
2645 whpx->partition, in whpx_accel_init()
2651 ret = -EINVAL; in whpx_accel_init()
2656 whpx->partition, in whpx_accel_init()
2664 ret = -EINVAL; in whpx_accel_init()
2672 whpx->exception_exit_bitmap = -1; in whpx_accel_init()
2677 ret = -EINVAL; in whpx_accel_init()
2681 hr = whp_dispatch.WHvSetupPartition(whpx->partition); in whpx_accel_init()
2684 ret = -EINVAL; in whpx_accel_init()
2695 if (NULL != whpx->partition) { in whpx_accel_init()
2696 whp_dispatch.WHvDeletePartition(whpx->partition); in whpx_accel_init()
2697 whpx->partition = NULL; in whpx_accel_init()
2712 static void whpx_accel_class_init(ObjectClass *oc, void *data) in whpx_accel_class_init() argument
2714 AccelClass *ac = ACCEL_CLASS(oc); in whpx_accel_class_init()
2715 ac->name = "WHPX"; in whpx_accel_class_init()
2716 ac->init_machine = whpx_accel_init; in whpx_accel_class_init()
2717 ac->allowed = &whpx_allowed; in whpx_accel_class_init()
2719 object_class_property_add(oc, "kernel-irqchip", "on|off|split", in whpx_accel_class_init()
2722 object_class_property_set_description(oc, "kernel-irqchip", in whpx_accel_class_init()
2723 "Configure WHPX in-kernel irqchip"); in whpx_accel_class_init()
2731 /* Turn on kernel-irqchip, by default */ in whpx_accel_instance_init()
2732 whpx->kernel_irqchip_allowed = true; in whpx_accel_instance_init()