Lines Matching +full:write +full:- +full:assist

2  * Copyright (c) 2018-2019 Maxime Villard, All rights reserved.
7 * See the COPYING file in the top-level directory.
12 #include "system/address-spaces.h"
15 #include "accel/accel-ops.h"
19 #include "qemu/main-loop.h"
20 #include "qemu/error-report.h"
23 #include "accel/accel-cpu-target.h"
24 #include "host-cpu.h"
28 #include "nvmm-accel-ops.h"
37 /* Window-exiting for INTs/NMIs. */
50 /* -------------------------------------------------------------------------- */
61 /* -------------------------------------------------------------------------- */
66 uint32_t attrib = qseg->flags; in nvmm_set_segment()
68 nseg->selector = qseg->selector; in nvmm_set_segment()
69 nseg->limit = qseg->limit; in nvmm_set_segment()
70 nseg->base = qseg->base; in nvmm_set_segment()
71 nseg->attrib.type = __SHIFTOUT(attrib, DESC_TYPE_MASK); in nvmm_set_segment()
72 nseg->attrib.s = __SHIFTOUT(attrib, DESC_S_MASK); in nvmm_set_segment()
73 nseg->attrib.dpl = __SHIFTOUT(attrib, DESC_DPL_MASK); in nvmm_set_segment()
74 nseg->attrib.p = __SHIFTOUT(attrib, DESC_P_MASK); in nvmm_set_segment()
75 nseg->attrib.avl = __SHIFTOUT(attrib, DESC_AVL_MASK); in nvmm_set_segment()
76 nseg->attrib.l = __SHIFTOUT(attrib, DESC_L_MASK); in nvmm_set_segment()
77 nseg->attrib.def = __SHIFTOUT(attrib, DESC_B_MASK); in nvmm_set_segment()
78 nseg->attrib.g = __SHIFTOUT(attrib, DESC_G_MASK); in nvmm_set_segment()
86 AccelCPUState *qcpu = cpu->accel; in nvmm_set_registers()
87 struct nvmm_vcpu *vcpu = &qcpu->vcpu; in nvmm_set_registers()
88 struct nvmm_x64_state *state = vcpu->state; in nvmm_set_registers()
96 state->gprs[NVMM_X64_GPR_RAX] = env->regs[R_EAX]; in nvmm_set_registers()
97 state->gprs[NVMM_X64_GPR_RCX] = env->regs[R_ECX]; in nvmm_set_registers()
98 state->gprs[NVMM_X64_GPR_RDX] = env->regs[R_EDX]; in nvmm_set_registers()
99 state->gprs[NVMM_X64_GPR_RBX] = env->regs[R_EBX]; in nvmm_set_registers()
100 state->gprs[NVMM_X64_GPR_RSP] = env->regs[R_ESP]; in nvmm_set_registers()
101 state->gprs[NVMM_X64_GPR_RBP] = env->regs[R_EBP]; in nvmm_set_registers()
102 state->gprs[NVMM_X64_GPR_RSI] = env->regs[R_ESI]; in nvmm_set_registers()
103 state->gprs[NVMM_X64_GPR_RDI] = env->regs[R_EDI]; in nvmm_set_registers()
105 state->gprs[NVMM_X64_GPR_R8] = env->regs[R_R8]; in nvmm_set_registers()
106 state->gprs[NVMM_X64_GPR_R9] = env->regs[R_R9]; in nvmm_set_registers()
107 state->gprs[NVMM_X64_GPR_R10] = env->regs[R_R10]; in nvmm_set_registers()
108 state->gprs[NVMM_X64_GPR_R11] = env->regs[R_R11]; in nvmm_set_registers()
109 state->gprs[NVMM_X64_GPR_R12] = env->regs[R_R12]; in nvmm_set_registers()
110 state->gprs[NVMM_X64_GPR_R13] = env->regs[R_R13]; in nvmm_set_registers()
111 state->gprs[NVMM_X64_GPR_R14] = env->regs[R_R14]; in nvmm_set_registers()
112 state->gprs[NVMM_X64_GPR_R15] = env->regs[R_R15]; in nvmm_set_registers()
116 state->gprs[NVMM_X64_GPR_RIP] = env->eip; in nvmm_set_registers()
117 state->gprs[NVMM_X64_GPR_RFLAGS] = env->eflags; in nvmm_set_registers()
120 nvmm_set_segment(&state->segs[NVMM_X64_SEG_CS], &env->segs[R_CS]); in nvmm_set_registers()
121 nvmm_set_segment(&state->segs[NVMM_X64_SEG_DS], &env->segs[R_DS]); in nvmm_set_registers()
122 nvmm_set_segment(&state->segs[NVMM_X64_SEG_ES], &env->segs[R_ES]); in nvmm_set_registers()
123 nvmm_set_segment(&state->segs[NVMM_X64_SEG_FS], &env->segs[R_FS]); in nvmm_set_registers()
124 nvmm_set_segment(&state->segs[NVMM_X64_SEG_GS], &env->segs[R_GS]); in nvmm_set_registers()
125 nvmm_set_segment(&state->segs[NVMM_X64_SEG_SS], &env->segs[R_SS]); in nvmm_set_registers()
128 nvmm_set_segment(&state->segs[NVMM_X64_SEG_GDT], &env->gdt); in nvmm_set_registers()
129 nvmm_set_segment(&state->segs[NVMM_X64_SEG_LDT], &env->ldt); in nvmm_set_registers()
130 nvmm_set_segment(&state->segs[NVMM_X64_SEG_TR], &env->tr); in nvmm_set_registers()
131 nvmm_set_segment(&state->segs[NVMM_X64_SEG_IDT], &env->idt); in nvmm_set_registers()
134 state->crs[NVMM_X64_CR_CR0] = env->cr[0]; in nvmm_set_registers()
135 state->crs[NVMM_X64_CR_CR2] = env->cr[2]; in nvmm_set_registers()
136 state->crs[NVMM_X64_CR_CR3] = env->cr[3]; in nvmm_set_registers()
137 state->crs[NVMM_X64_CR_CR4] = env->cr[4]; in nvmm_set_registers()
138 state->crs[NVMM_X64_CR_CR8] = qcpu->tpr; in nvmm_set_registers()
139 state->crs[NVMM_X64_CR_XCR0] = env->xcr0; in nvmm_set_registers()
142 state->drs[NVMM_X64_DR_DR0] = env->dr[0]; in nvmm_set_registers()
143 state->drs[NVMM_X64_DR_DR1] = env->dr[1]; in nvmm_set_registers()
144 state->drs[NVMM_X64_DR_DR2] = env->dr[2]; in nvmm_set_registers()
145 state->drs[NVMM_X64_DR_DR3] = env->dr[3]; in nvmm_set_registers()
146 state->drs[NVMM_X64_DR_DR6] = env->dr[6]; in nvmm_set_registers()
147 state->drs[NVMM_X64_DR_DR7] = env->dr[7]; in nvmm_set_registers()
150 state->fpu.fx_cw = env->fpuc; in nvmm_set_registers()
151 state->fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11); in nvmm_set_registers()
152 state->fpu.fx_tw = 0; in nvmm_set_registers()
154 state->fpu.fx_tw |= (!env->fptags[i]) << i; in nvmm_set_registers()
156 state->fpu.fx_opcode = env->fpop; in nvmm_set_registers()
157 state->fpu.fx_ip.fa_64 = env->fpip; in nvmm_set_registers()
158 state->fpu.fx_dp.fa_64 = env->fpdp; in nvmm_set_registers()
159 state->fpu.fx_mxcsr = env->mxcsr; in nvmm_set_registers()
160 state->fpu.fx_mxcsr_mask = 0x0000FFFF; in nvmm_set_registers()
161 assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs)); in nvmm_set_registers()
162 memcpy(state->fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs)); in nvmm_set_registers()
164 memcpy(&state->fpu.fx_xmm[i].xmm_bytes[0], in nvmm_set_registers()
165 &env->xmm_regs[i].ZMM_Q(0), 8); in nvmm_set_registers()
166 memcpy(&state->fpu.fx_xmm[i].xmm_bytes[8], in nvmm_set_registers()
167 &env->xmm_regs[i].ZMM_Q(1), 8); in nvmm_set_registers()
171 state->msrs[NVMM_X64_MSR_EFER] = env->efer; in nvmm_set_registers()
172 state->msrs[NVMM_X64_MSR_STAR] = env->star; in nvmm_set_registers()
174 state->msrs[NVMM_X64_MSR_LSTAR] = env->lstar; in nvmm_set_registers()
175 state->msrs[NVMM_X64_MSR_CSTAR] = env->cstar; in nvmm_set_registers()
176 state->msrs[NVMM_X64_MSR_SFMASK] = env->fmask; in nvmm_set_registers()
177 state->msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase; in nvmm_set_registers()
179 state->msrs[NVMM_X64_MSR_SYSENTER_CS] = env->sysenter_cs; in nvmm_set_registers()
180 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp; in nvmm_set_registers()
181 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip; in nvmm_set_registers()
182 state->msrs[NVMM_X64_MSR_PAT] = env->pat; in nvmm_set_registers()
183 state->msrs[NVMM_X64_MSR_TSC] = env->tsc; in nvmm_set_registers()
194 if (ret == -1) { in nvmm_set_registers()
203 qseg->selector = nseg->selector; in nvmm_get_segment()
204 qseg->limit = nseg->limit; in nvmm_get_segment()
205 qseg->base = nseg->base; in nvmm_get_segment()
207 qseg->flags = in nvmm_get_segment()
208 __SHIFTIN((uint32_t)nseg->attrib.type, DESC_TYPE_MASK) | in nvmm_get_segment()
209 __SHIFTIN((uint32_t)nseg->attrib.s, DESC_S_MASK) | in nvmm_get_segment()
210 __SHIFTIN((uint32_t)nseg->attrib.dpl, DESC_DPL_MASK) | in nvmm_get_segment()
211 __SHIFTIN((uint32_t)nseg->attrib.p, DESC_P_MASK) | in nvmm_get_segment()
212 __SHIFTIN((uint32_t)nseg->attrib.avl, DESC_AVL_MASK) | in nvmm_get_segment()
213 __SHIFTIN((uint32_t)nseg->attrib.l, DESC_L_MASK) | in nvmm_get_segment()
214 __SHIFTIN((uint32_t)nseg->attrib.def, DESC_B_MASK) | in nvmm_get_segment()
215 __SHIFTIN((uint32_t)nseg->attrib.g, DESC_G_MASK); in nvmm_get_segment()
223 AccelCPUState *qcpu = cpu->accel; in nvmm_get_registers()
224 struct nvmm_vcpu *vcpu = &qcpu->vcpu; in nvmm_get_registers()
226 struct nvmm_x64_state *state = vcpu->state; in nvmm_get_registers()
242 if (ret == -1) { in nvmm_get_registers()
248 env->regs[R_EAX] = state->gprs[NVMM_X64_GPR_RAX]; in nvmm_get_registers()
249 env->regs[R_ECX] = state->gprs[NVMM_X64_GPR_RCX]; in nvmm_get_registers()
250 env->regs[R_EDX] = state->gprs[NVMM_X64_GPR_RDX]; in nvmm_get_registers()
251 env->regs[R_EBX] = state->gprs[NVMM_X64_GPR_RBX]; in nvmm_get_registers()
252 env->regs[R_ESP] = state->gprs[NVMM_X64_GPR_RSP]; in nvmm_get_registers()
253 env->regs[R_EBP] = state->gprs[NVMM_X64_GPR_RBP]; in nvmm_get_registers()
254 env->regs[R_ESI] = state->gprs[NVMM_X64_GPR_RSI]; in nvmm_get_registers()
255 env->regs[R_EDI] = state->gprs[NVMM_X64_GPR_RDI]; in nvmm_get_registers()
257 env->regs[R_R8] = state->gprs[NVMM_X64_GPR_R8]; in nvmm_get_registers()
258 env->regs[R_R9] = state->gprs[NVMM_X64_GPR_R9]; in nvmm_get_registers()
259 env->regs[R_R10] = state->gprs[NVMM_X64_GPR_R10]; in nvmm_get_registers()
260 env->regs[R_R11] = state->gprs[NVMM_X64_GPR_R11]; in nvmm_get_registers()
261 env->regs[R_R12] = state->gprs[NVMM_X64_GPR_R12]; in nvmm_get_registers()
262 env->regs[R_R13] = state->gprs[NVMM_X64_GPR_R13]; in nvmm_get_registers()
263 env->regs[R_R14] = state->gprs[NVMM_X64_GPR_R14]; in nvmm_get_registers()
264 env->regs[R_R15] = state->gprs[NVMM_X64_GPR_R15]; in nvmm_get_registers()
268 env->eip = state->gprs[NVMM_X64_GPR_RIP]; in nvmm_get_registers()
269 env->eflags = state->gprs[NVMM_X64_GPR_RFLAGS]; in nvmm_get_registers()
272 nvmm_get_segment(&env->segs[R_ES], &state->segs[NVMM_X64_SEG_ES]); in nvmm_get_registers()
273 nvmm_get_segment(&env->segs[R_CS], &state->segs[NVMM_X64_SEG_CS]); in nvmm_get_registers()
274 nvmm_get_segment(&env->segs[R_SS], &state->segs[NVMM_X64_SEG_SS]); in nvmm_get_registers()
275 nvmm_get_segment(&env->segs[R_DS], &state->segs[NVMM_X64_SEG_DS]); in nvmm_get_registers()
276 nvmm_get_segment(&env->segs[R_FS], &state->segs[NVMM_X64_SEG_FS]); in nvmm_get_registers()
277 nvmm_get_segment(&env->segs[R_GS], &state->segs[NVMM_X64_SEG_GS]); in nvmm_get_registers()
280 nvmm_get_segment(&env->gdt, &state->segs[NVMM_X64_SEG_GDT]); in nvmm_get_registers()
281 nvmm_get_segment(&env->ldt, &state->segs[NVMM_X64_SEG_LDT]); in nvmm_get_registers()
282 nvmm_get_segment(&env->tr, &state->segs[NVMM_X64_SEG_TR]); in nvmm_get_registers()
283 nvmm_get_segment(&env->idt, &state->segs[NVMM_X64_SEG_IDT]); in nvmm_get_registers()
286 env->cr[0] = state->crs[NVMM_X64_CR_CR0]; in nvmm_get_registers()
287 env->cr[2] = state->crs[NVMM_X64_CR_CR2]; in nvmm_get_registers()
288 env->cr[3] = state->crs[NVMM_X64_CR_CR3]; in nvmm_get_registers()
289 env->cr[4] = state->crs[NVMM_X64_CR_CR4]; in nvmm_get_registers()
290 tpr = state->crs[NVMM_X64_CR_CR8]; in nvmm_get_registers()
291 if (tpr != qcpu->tpr) { in nvmm_get_registers()
292 qcpu->tpr = tpr; in nvmm_get_registers()
293 cpu_set_apic_tpr(x86_cpu->apic_state, tpr); in nvmm_get_registers()
295 env->xcr0 = state->crs[NVMM_X64_CR_XCR0]; in nvmm_get_registers()
298 env->dr[0] = state->drs[NVMM_X64_DR_DR0]; in nvmm_get_registers()
299 env->dr[1] = state->drs[NVMM_X64_DR_DR1]; in nvmm_get_registers()
300 env->dr[2] = state->drs[NVMM_X64_DR_DR2]; in nvmm_get_registers()
301 env->dr[3] = state->drs[NVMM_X64_DR_DR3]; in nvmm_get_registers()
302 env->dr[6] = state->drs[NVMM_X64_DR_DR6]; in nvmm_get_registers()
303 env->dr[7] = state->drs[NVMM_X64_DR_DR7]; in nvmm_get_registers()
306 env->fpuc = state->fpu.fx_cw; in nvmm_get_registers()
307 env->fpstt = (state->fpu.fx_sw >> 11) & 0x7; in nvmm_get_registers()
308 env->fpus = state->fpu.fx_sw & ~0x3800; in nvmm_get_registers()
310 env->fptags[i] = !((state->fpu.fx_tw >> i) & 1); in nvmm_get_registers()
312 env->fpop = state->fpu.fx_opcode; in nvmm_get_registers()
313 env->fpip = state->fpu.fx_ip.fa_64; in nvmm_get_registers()
314 env->fpdp = state->fpu.fx_dp.fa_64; in nvmm_get_registers()
315 env->mxcsr = state->fpu.fx_mxcsr; in nvmm_get_registers()
316 assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs)); in nvmm_get_registers()
317 memcpy(env->fpregs, state->fpu.fx_87_ac, sizeof(env->fpregs)); in nvmm_get_registers()
319 memcpy(&env->xmm_regs[i].ZMM_Q(0), in nvmm_get_registers()
320 &state->fpu.fx_xmm[i].xmm_bytes[0], 8); in nvmm_get_registers()
321 memcpy(&env->xmm_regs[i].ZMM_Q(1), in nvmm_get_registers()
322 &state->fpu.fx_xmm[i].xmm_bytes[8], 8); in nvmm_get_registers()
326 env->efer = state->msrs[NVMM_X64_MSR_EFER]; in nvmm_get_registers()
327 env->star = state->msrs[NVMM_X64_MSR_STAR]; in nvmm_get_registers()
329 env->lstar = state->msrs[NVMM_X64_MSR_LSTAR]; in nvmm_get_registers()
330 env->cstar = state->msrs[NVMM_X64_MSR_CSTAR]; in nvmm_get_registers()
331 env->fmask = state->msrs[NVMM_X64_MSR_SFMASK]; in nvmm_get_registers()
332 env->kernelgsbase = state->msrs[NVMM_X64_MSR_KERNELGSBASE]; in nvmm_get_registers()
334 env->sysenter_cs = state->msrs[NVMM_X64_MSR_SYSENTER_CS]; in nvmm_get_registers()
335 env->sysenter_esp = state->msrs[NVMM_X64_MSR_SYSENTER_ESP]; in nvmm_get_registers()
336 env->sysenter_eip = state->msrs[NVMM_X64_MSR_SYSENTER_EIP]; in nvmm_get_registers()
337 env->pat = state->msrs[NVMM_X64_MSR_PAT]; in nvmm_get_registers()
338 env->tsc = state->msrs[NVMM_X64_MSR_TSC]; in nvmm_get_registers()
346 AccelCPUState *qcpu = cpu->accel; in nvmm_can_take_int()
347 struct nvmm_vcpu *vcpu = &qcpu->vcpu; in nvmm_can_take_int()
350 if (qcpu->int_window_exit) { in nvmm_can_take_int()
354 if (qcpu->int_shadow || !(cpu_env(cpu)->eflags & IF_MASK)) { in nvmm_can_take_int()
355 struct nvmm_x64_state *state = vcpu->state; in nvmm_can_take_int()
359 state->intr.int_window_exiting = 1; in nvmm_can_take_int()
371 AccelCPUState *qcpu = cpu->accel; in nvmm_can_take_nmi()
375 * completed. Therefore, if window-exiting is enabled, it means in nvmm_can_take_nmi()
378 if (qcpu->nmi_window_exit) { in nvmm_can_take_nmi()
394 AccelCPUState *qcpu = cpu->accel; in nvmm_vcpu_pre_run()
395 struct nvmm_vcpu *vcpu = &qcpu->vcpu; in nvmm_vcpu_pre_run()
397 struct nvmm_x64_state *state = vcpu->state; in nvmm_vcpu_pre_run()
398 struct nvmm_vcpu_event *event = vcpu->event; in nvmm_vcpu_pre_run()
406 tpr = cpu_get_apic_tpr(x86_cpu->apic_state); in nvmm_vcpu_pre_run()
407 if (tpr != qcpu->tpr) { in nvmm_vcpu_pre_run()
408 qcpu->tpr = tpr; in nvmm_vcpu_pre_run()
416 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { in nvmm_vcpu_pre_run()
417 cpu->exit_request = 1; in nvmm_vcpu_pre_run()
420 if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { in nvmm_vcpu_pre_run()
422 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; in nvmm_vcpu_pre_run()
423 event->type = NVMM_VCPU_EVENT_INTR; in nvmm_vcpu_pre_run()
424 event->vector = 2; in nvmm_vcpu_pre_run()
429 if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { in nvmm_vcpu_pre_run()
431 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; in nvmm_vcpu_pre_run()
432 event->type = NVMM_VCPU_EVENT_INTR; in nvmm_vcpu_pre_run()
433 event->vector = cpu_get_pic_interrupt(env); in nvmm_vcpu_pre_run()
439 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { in nvmm_vcpu_pre_run()
440 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; in nvmm_vcpu_pre_run()
445 if (ret == -1) { in nvmm_vcpu_pre_run()
450 state->crs[NVMM_X64_CR_CR8] = qcpu->tpr; in nvmm_vcpu_pre_run()
453 if (ret == -1) { in nvmm_vcpu_pre_run()
461 if (ret == -1) { in nvmm_vcpu_pre_run()
477 AccelCPUState *qcpu = cpu->accel; in nvmm_vcpu_post_run()
479 CPUX86State *env = &x86_cpu->env; in nvmm_vcpu_post_run()
482 env->eflags = exit->exitstate.rflags; in nvmm_vcpu_post_run()
483 qcpu->int_shadow = exit->exitstate.int_shadow; in nvmm_vcpu_post_run()
484 qcpu->int_window_exit = exit->exitstate.int_window_exiting; in nvmm_vcpu_post_run()
485 qcpu->nmi_window_exit = exit->exitstate.nmi_window_exiting; in nvmm_vcpu_post_run()
487 tpr = exit->exitstate.cr8; in nvmm_vcpu_post_run()
488 if (qcpu->tpr != tpr) { in nvmm_vcpu_post_run()
489 qcpu->tpr = tpr; in nvmm_vcpu_post_run()
491 cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr); in nvmm_vcpu_post_run()
496 /* -------------------------------------------------------------------------- */
504 ret = address_space_rw(&address_space_io, io->port, attrs, io->data, in nvmm_io_callback()
505 io->size, !io->in); in nvmm_io_callback()
508 "[%s, port=%u, size=%zu]", (io->in ? "in" : "out"), in nvmm_io_callback()
509 io->port, io->size); in nvmm_io_callback()
513 current_cpu->vcpu_dirty = false; in nvmm_io_callback()
519 cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write); in nvmm_mem_callback()
522 current_cpu->vcpu_dirty = false; in nvmm_mem_callback()
530 /* -------------------------------------------------------------------------- */
538 if (ret == -1) { in nvmm_handle_mem()
539 error_report("NVMM: Mem Assist Failed [gpa=%p]", in nvmm_handle_mem()
540 (void *)vcpu->exit->u.mem.gpa); in nvmm_handle_mem()
552 if (ret == -1) { in nvmm_handle_io()
553 error_report("NVMM: I/O Assist Failed [port=%d]", in nvmm_handle_io()
554 (int)vcpu->exit->u.io.port); in nvmm_handle_io()
564 AccelCPUState *qcpu = cpu->accel; in nvmm_handle_rdmsr()
565 struct nvmm_vcpu *vcpu = &qcpu->vcpu; in nvmm_handle_rdmsr()
567 struct nvmm_x64_state *state = vcpu->state; in nvmm_handle_rdmsr()
571 switch (exit->u.rdmsr.msr) { in nvmm_handle_rdmsr()
573 val = cpu_get_apic_base(x86_cpu->apic_state); in nvmm_handle_rdmsr()
584 exit->u.rdmsr.msr); in nvmm_handle_rdmsr()
589 if (ret == -1) { in nvmm_handle_rdmsr()
590 return -1; in nvmm_handle_rdmsr()
593 state->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF); in nvmm_handle_rdmsr()
594 state->gprs[NVMM_X64_GPR_RDX] = (val >> 32); in nvmm_handle_rdmsr()
595 state->gprs[NVMM_X64_GPR_RIP] = exit->u.rdmsr.npc; in nvmm_handle_rdmsr()
598 if (ret == -1) { in nvmm_handle_rdmsr()
599 return -1; in nvmm_handle_rdmsr()
609 AccelCPUState *qcpu = cpu->accel; in nvmm_handle_wrmsr()
610 struct nvmm_vcpu *vcpu = &qcpu->vcpu; in nvmm_handle_wrmsr()
612 struct nvmm_x64_state *state = vcpu->state; in nvmm_handle_wrmsr()
616 val = exit->u.wrmsr.val; in nvmm_handle_wrmsr()
618 switch (exit->u.wrmsr.msr) { in nvmm_handle_wrmsr()
620 cpu_set_apic_base(x86_cpu->apic_state, val); in nvmm_handle_wrmsr()
627 exit->u.wrmsr.msr, val); in nvmm_handle_wrmsr()
632 if (ret == -1) { in nvmm_handle_wrmsr()
633 return -1; in nvmm_handle_wrmsr()
636 state->gprs[NVMM_X64_GPR_RIP] = exit->u.wrmsr.npc; in nvmm_handle_wrmsr()
639 if (ret == -1) { in nvmm_handle_wrmsr()
640 return -1; in nvmm_handle_wrmsr()
654 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && in nvmm_handle_halted()
655 (cpu_env(cpu)->eflags & IF_MASK)) && in nvmm_handle_halted()
656 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { in nvmm_handle_halted()
657 cpu->exception_index = EXCP_HLT; in nvmm_handle_halted()
658 cpu->halted = true; in nvmm_handle_halted()
670 struct nvmm_vcpu_event *event = vcpu->event; in nvmm_inject_ud()
672 event->type = NVMM_VCPU_EVENT_EXCP; in nvmm_inject_ud()
673 event->vector = 6; in nvmm_inject_ud()
674 event->u.excp.error = 0; in nvmm_inject_ud()
683 AccelCPUState *qcpu = cpu->accel; in nvmm_vcpu_loop()
684 struct nvmm_vcpu *vcpu = &qcpu->vcpu; in nvmm_vcpu_loop()
686 CPUX86State *env = &x86_cpu->env; in nvmm_vcpu_loop()
687 struct nvmm_vcpu_exit *exit = vcpu->exit; in nvmm_vcpu_loop()
694 if (cpu->interrupt_request & CPU_INTERRUPT_INIT) { in nvmm_vcpu_loop()
699 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { in nvmm_vcpu_loop()
700 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; in nvmm_vcpu_loop()
701 apic_poll_irq(x86_cpu->apic_state); in nvmm_vcpu_loop()
703 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && in nvmm_vcpu_loop()
704 (env->eflags & IF_MASK)) || in nvmm_vcpu_loop()
705 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { in nvmm_vcpu_loop()
706 cpu->halted = false; in nvmm_vcpu_loop()
708 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { in nvmm_vcpu_loop()
712 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { in nvmm_vcpu_loop()
713 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; in nvmm_vcpu_loop()
715 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, in nvmm_vcpu_loop()
716 env->tpr_access_type); in nvmm_vcpu_loop()
719 if (cpu->halted) { in nvmm_vcpu_loop()
720 cpu->exception_index = EXCP_HLT; in nvmm_vcpu_loop()
721 qatomic_set(&cpu->exit_request, false); in nvmm_vcpu_loop()
732 if (cpu->vcpu_dirty) { in nvmm_vcpu_loop()
734 cpu->vcpu_dirty = false; in nvmm_vcpu_loop()
737 if (qcpu->stop) { in nvmm_vcpu_loop()
738 cpu->exception_index = EXCP_INTERRUPT; in nvmm_vcpu_loop()
739 qcpu->stop = false; in nvmm_vcpu_loop()
746 if (qatomic_read(&cpu->exit_request)) { in nvmm_vcpu_loop()
757 if (ret == -1) { in nvmm_vcpu_loop()
765 switch (exit->reason) { in nvmm_vcpu_loop()
771 * The kernel cleared the immediate exit flag; cpu->exit_request in nvmm_vcpu_loop()
775 qcpu->stop = true; in nvmm_vcpu_loop()
793 cpu->exception_index = EXCP_INTERRUPT; in nvmm_vcpu_loop()
808 exit->reason, exit->u.inv.hwcode); in nvmm_vcpu_loop()
813 ret = -1; in nvmm_vcpu_loop()
821 qatomic_set(&cpu->exit_request, false); in nvmm_vcpu_loop()
826 /* -------------------------------------------------------------------------- */
832 cpu->vcpu_dirty = true; in do_nvmm_cpu_synchronize_state()
839 cpu->vcpu_dirty = false; in do_nvmm_cpu_synchronize_post_reset()
846 cpu->vcpu_dirty = false; in do_nvmm_cpu_synchronize_post_init()
852 cpu->vcpu_dirty = true; in do_nvmm_cpu_synchronize_pre_loadvm()
857 if (!cpu->vcpu_dirty) { in nvmm_cpu_synchronize_state()
877 /* -------------------------------------------------------------------------- */
890 AccelCPUState *qcpu = current_cpu->accel; in nvmm_ipi_signal()
892 struct nvmm_vcpu *vcpu = &qcpu->vcpu; in nvmm_ipi_signal()
895 qcpu->stop = true; in nvmm_ipi_signal()
935 return -EINVAL; in nvmm_init_vcpu()
941 ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu); in nvmm_init_vcpu()
942 if (ret == -1) { in nvmm_init_vcpu()
947 return -err; in nvmm_init_vcpu()
954 ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_CPUID, in nvmm_init_vcpu()
956 if (ret == -1) { in nvmm_init_vcpu()
961 return -err; in nvmm_init_vcpu()
964 ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_CALLBACKS, in nvmm_init_vcpu()
966 if (ret == -1) { in nvmm_init_vcpu()
971 return -err; in nvmm_init_vcpu()
977 ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_TPR, &tpr); in nvmm_init_vcpu()
978 if (ret == -1) { in nvmm_init_vcpu()
983 return -err; in nvmm_init_vcpu()
987 qcpu->vcpu_dirty = true; in nvmm_init_vcpu()
988 cpu->accel = qcpu; in nvmm_init_vcpu()
999 if (cpu->exception_index >= EXCP_INTERRUPT) { in nvmm_vcpu_exec()
1000 ret = cpu->exception_index; in nvmm_vcpu_exec()
1001 cpu->exception_index = -1; in nvmm_vcpu_exec()
1020 AccelCPUState *qcpu = cpu->accel; in nvmm_destroy_vcpu()
1022 nvmm_vcpu_destroy(mach, &qcpu->vcpu); in nvmm_destroy_vcpu()
1023 g_free(cpu->accel); in nvmm_destroy_vcpu()
1026 /* -------------------------------------------------------------------------- */
1045 if (ret == -1) { in nvmm_update_mapping()
1056 MemoryRegion *mr = section->mr; in nvmm_process_section()
1057 hwaddr start_pa = section->offset_within_address_space; in nvmm_process_section()
1058 ram_addr_t size = int128_get64(section->size); in nvmm_process_section()
1066 /* Adjust start_pa and size so that they are page-aligned. */ in nvmm_process_section()
1067 delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask()); in nvmm_process_section()
1073 size -= delta; in nvmm_process_section()
1080 section->offset_within_region + delta; in nvmm_process_section()
1083 memory_region_is_rom(mr), mr->name); in nvmm_process_section()
1089 memory_region_ref(section->mr); in nvmm_region_add()
1097 memory_region_unref(section->mr); in nvmm_region_del()
1115 MemoryRegion *mr = section->mr; in nvmm_log_sync()
1121 memory_region_set_dirty(mr, 0, int128_get64(section->size)); in nvmm_log_sync()
1144 if (ret == -1) { in nvmm_ram_block_added()
1155 /* -------------------------------------------------------------------------- */
1163 if (ret == -1) { in nvmm_accel_init()
1166 return -err; in nvmm_accel_init()
1170 if (ret == -1) { in nvmm_accel_init()
1173 return -err; in nvmm_accel_init()
1177 return -EPROGMISMATCH; in nvmm_accel_init()
1181 return -EPROGMISMATCH; in nvmm_accel_init()
1185 if (ret == -1) { in nvmm_accel_init()
1188 return -err; in nvmm_accel_init()
1202 ac->name = "NVMM"; in nvmm_accel_class_init()
1203 ac->init_machine = nvmm_accel_init; in nvmm_accel_class_init()
1204 ac->allowed = &nvmm_allowed; in nvmm_accel_class_init()
1224 acc->cpu_instance_init = nvmm_cpu_instance_init; in nvmm_cpu_accel_class_init()