1 /* 2 * Copyright (c) 2018-2019 Maxime Villard, All rights reserved. 3 * 4 * NetBSD Virtual Machine Monitor (NVMM) accelerator for QEMU. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "cpu.h" 12 #include "system/address-spaces.h" 13 #include "system/ioport.h" 14 #include "qemu/accel.h" 15 #include "system/nvmm.h" 16 #include "system/cpus.h" 17 #include "system/runstate.h" 18 #include "qemu/main-loop.h" 19 #include "qemu/error-report.h" 20 #include "qapi/error.h" 21 #include "qemu/queue.h" 22 #include "accel/accel-cpu-target.h" 23 #include "host-cpu.h" 24 #include "migration/blocker.h" 25 #include "strings.h" 26 27 #include "nvmm-accel-ops.h" 28 29 #include <nvmm.h> 30 31 struct AccelCPUState { 32 struct nvmm_vcpu vcpu; 33 uint8_t tpr; 34 bool stop; 35 36 /* Window-exiting for INTs/NMIs. */ 37 bool int_window_exit; 38 bool nmi_window_exit; 39 40 /* The guest is in an interrupt shadow (POP SS, etc). */ 41 bool int_shadow; 42 }; 43 44 struct qemu_machine { 45 struct nvmm_capability cap; 46 struct nvmm_machine mach; 47 }; 48 49 /* -------------------------------------------------------------------------- */ 50 51 bool nvmm_allowed; 52 static struct qemu_machine qemu_mach; 53 54 static struct nvmm_machine * 55 get_nvmm_mach(void) 56 { 57 return &qemu_mach.mach; 58 } 59 60 /* -------------------------------------------------------------------------- */ 61 62 static void 63 nvmm_set_segment(struct nvmm_x64_state_seg *nseg, const SegmentCache *qseg) 64 { 65 uint32_t attrib = qseg->flags; 66 67 nseg->selector = qseg->selector; 68 nseg->limit = qseg->limit; 69 nseg->base = qseg->base; 70 nseg->attrib.type = __SHIFTOUT(attrib, DESC_TYPE_MASK); 71 nseg->attrib.s = __SHIFTOUT(attrib, DESC_S_MASK); 72 nseg->attrib.dpl = __SHIFTOUT(attrib, DESC_DPL_MASK); 73 nseg->attrib.p = __SHIFTOUT(attrib, DESC_P_MASK); 74 nseg->attrib.avl = __SHIFTOUT(attrib, DESC_AVL_MASK); 75 nseg->attrib.l = __SHIFTOUT(attrib, DESC_L_MASK); 76 nseg->attrib.def = __SHIFTOUT(attrib, DESC_B_MASK); 77 nseg->attrib.g = __SHIFTOUT(attrib, DESC_G_MASK); 78 } 79 80 static void 81 nvmm_set_registers(CPUState *cpu) 82 { 83 CPUX86State *env = cpu_env(cpu); 84 struct nvmm_machine *mach = get_nvmm_mach(); 85 AccelCPUState *qcpu = cpu->accel; 86 struct nvmm_vcpu *vcpu = &qcpu->vcpu; 87 struct nvmm_x64_state *state = vcpu->state; 88 uint64_t bitmap; 89 size_t i; 90 int ret; 91 92 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 93 94 /* GPRs. */ 95 state->gprs[NVMM_X64_GPR_RAX] = env->regs[R_EAX]; 96 state->gprs[NVMM_X64_GPR_RCX] = env->regs[R_ECX]; 97 state->gprs[NVMM_X64_GPR_RDX] = env->regs[R_EDX]; 98 state->gprs[NVMM_X64_GPR_RBX] = env->regs[R_EBX]; 99 state->gprs[NVMM_X64_GPR_RSP] = env->regs[R_ESP]; 100 state->gprs[NVMM_X64_GPR_RBP] = env->regs[R_EBP]; 101 state->gprs[NVMM_X64_GPR_RSI] = env->regs[R_ESI]; 102 state->gprs[NVMM_X64_GPR_RDI] = env->regs[R_EDI]; 103 #ifdef TARGET_X86_64 104 state->gprs[NVMM_X64_GPR_R8] = env->regs[R_R8]; 105 state->gprs[NVMM_X64_GPR_R9] = env->regs[R_R9]; 106 state->gprs[NVMM_X64_GPR_R10] = env->regs[R_R10]; 107 state->gprs[NVMM_X64_GPR_R11] = env->regs[R_R11]; 108 state->gprs[NVMM_X64_GPR_R12] = env->regs[R_R12]; 109 state->gprs[NVMM_X64_GPR_R13] = env->regs[R_R13]; 110 state->gprs[NVMM_X64_GPR_R14] = env->regs[R_R14]; 111 state->gprs[NVMM_X64_GPR_R15] = env->regs[R_R15]; 112 #endif 113 114 /* RIP and RFLAGS. */ 115 state->gprs[NVMM_X64_GPR_RIP] = env->eip; 116 state->gprs[NVMM_X64_GPR_RFLAGS] = env->eflags; 117 118 /* Segments. */ 119 nvmm_set_segment(&state->segs[NVMM_X64_SEG_CS], &env->segs[R_CS]); 120 nvmm_set_segment(&state->segs[NVMM_X64_SEG_DS], &env->segs[R_DS]); 121 nvmm_set_segment(&state->segs[NVMM_X64_SEG_ES], &env->segs[R_ES]); 122 nvmm_set_segment(&state->segs[NVMM_X64_SEG_FS], &env->segs[R_FS]); 123 nvmm_set_segment(&state->segs[NVMM_X64_SEG_GS], &env->segs[R_GS]); 124 nvmm_set_segment(&state->segs[NVMM_X64_SEG_SS], &env->segs[R_SS]); 125 126 /* Special segments. */ 127 nvmm_set_segment(&state->segs[NVMM_X64_SEG_GDT], &env->gdt); 128 nvmm_set_segment(&state->segs[NVMM_X64_SEG_LDT], &env->ldt); 129 nvmm_set_segment(&state->segs[NVMM_X64_SEG_TR], &env->tr); 130 nvmm_set_segment(&state->segs[NVMM_X64_SEG_IDT], &env->idt); 131 132 /* Control registers. */ 133 state->crs[NVMM_X64_CR_CR0] = env->cr[0]; 134 state->crs[NVMM_X64_CR_CR2] = env->cr[2]; 135 state->crs[NVMM_X64_CR_CR3] = env->cr[3]; 136 state->crs[NVMM_X64_CR_CR4] = env->cr[4]; 137 state->crs[NVMM_X64_CR_CR8] = qcpu->tpr; 138 state->crs[NVMM_X64_CR_XCR0] = env->xcr0; 139 140 /* Debug registers. */ 141 state->drs[NVMM_X64_DR_DR0] = env->dr[0]; 142 state->drs[NVMM_X64_DR_DR1] = env->dr[1]; 143 state->drs[NVMM_X64_DR_DR2] = env->dr[2]; 144 state->drs[NVMM_X64_DR_DR3] = env->dr[3]; 145 state->drs[NVMM_X64_DR_DR6] = env->dr[6]; 146 state->drs[NVMM_X64_DR_DR7] = env->dr[7]; 147 148 /* FPU. */ 149 state->fpu.fx_cw = env->fpuc; 150 state->fpu.fx_sw = (env->fpus & ~0x3800) | ((env->fpstt & 0x7) << 11); 151 state->fpu.fx_tw = 0; 152 for (i = 0; i < 8; i++) { 153 state->fpu.fx_tw |= (!env->fptags[i]) << i; 154 } 155 state->fpu.fx_opcode = env->fpop; 156 state->fpu.fx_ip.fa_64 = env->fpip; 157 state->fpu.fx_dp.fa_64 = env->fpdp; 158 state->fpu.fx_mxcsr = env->mxcsr; 159 state->fpu.fx_mxcsr_mask = 0x0000FFFF; 160 assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs)); 161 memcpy(state->fpu.fx_87_ac, env->fpregs, sizeof(env->fpregs)); 162 for (i = 0; i < CPU_NB_REGS; i++) { 163 memcpy(&state->fpu.fx_xmm[i].xmm_bytes[0], 164 &env->xmm_regs[i].ZMM_Q(0), 8); 165 memcpy(&state->fpu.fx_xmm[i].xmm_bytes[8], 166 &env->xmm_regs[i].ZMM_Q(1), 8); 167 } 168 169 /* MSRs. */ 170 state->msrs[NVMM_X64_MSR_EFER] = env->efer; 171 state->msrs[NVMM_X64_MSR_STAR] = env->star; 172 #ifdef TARGET_X86_64 173 state->msrs[NVMM_X64_MSR_LSTAR] = env->lstar; 174 state->msrs[NVMM_X64_MSR_CSTAR] = env->cstar; 175 state->msrs[NVMM_X64_MSR_SFMASK] = env->fmask; 176 state->msrs[NVMM_X64_MSR_KERNELGSBASE] = env->kernelgsbase; 177 #endif 178 state->msrs[NVMM_X64_MSR_SYSENTER_CS] = env->sysenter_cs; 179 state->msrs[NVMM_X64_MSR_SYSENTER_ESP] = env->sysenter_esp; 180 state->msrs[NVMM_X64_MSR_SYSENTER_EIP] = env->sysenter_eip; 181 state->msrs[NVMM_X64_MSR_PAT] = env->pat; 182 state->msrs[NVMM_X64_MSR_TSC] = env->tsc; 183 184 bitmap = 185 NVMM_X64_STATE_SEGS | 186 NVMM_X64_STATE_GPRS | 187 NVMM_X64_STATE_CRS | 188 NVMM_X64_STATE_DRS | 189 NVMM_X64_STATE_MSRS | 190 NVMM_X64_STATE_FPU; 191 192 ret = nvmm_vcpu_setstate(mach, vcpu, bitmap); 193 if (ret == -1) { 194 error_report("NVMM: Failed to set virtual processor context," 195 " error=%d", errno); 196 } 197 } 198 199 static void 200 nvmm_get_segment(SegmentCache *qseg, const struct nvmm_x64_state_seg *nseg) 201 { 202 qseg->selector = nseg->selector; 203 qseg->limit = nseg->limit; 204 qseg->base = nseg->base; 205 206 qseg->flags = 207 __SHIFTIN((uint32_t)nseg->attrib.type, DESC_TYPE_MASK) | 208 __SHIFTIN((uint32_t)nseg->attrib.s, DESC_S_MASK) | 209 __SHIFTIN((uint32_t)nseg->attrib.dpl, DESC_DPL_MASK) | 210 __SHIFTIN((uint32_t)nseg->attrib.p, DESC_P_MASK) | 211 __SHIFTIN((uint32_t)nseg->attrib.avl, DESC_AVL_MASK) | 212 __SHIFTIN((uint32_t)nseg->attrib.l, DESC_L_MASK) | 213 __SHIFTIN((uint32_t)nseg->attrib.def, DESC_B_MASK) | 214 __SHIFTIN((uint32_t)nseg->attrib.g, DESC_G_MASK); 215 } 216 217 static void 218 nvmm_get_registers(CPUState *cpu) 219 { 220 CPUX86State *env = cpu_env(cpu); 221 struct nvmm_machine *mach = get_nvmm_mach(); 222 AccelCPUState *qcpu = cpu->accel; 223 struct nvmm_vcpu *vcpu = &qcpu->vcpu; 224 X86CPU *x86_cpu = X86_CPU(cpu); 225 struct nvmm_x64_state *state = vcpu->state; 226 uint64_t bitmap, tpr; 227 size_t i; 228 int ret; 229 230 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); 231 232 bitmap = 233 NVMM_X64_STATE_SEGS | 234 NVMM_X64_STATE_GPRS | 235 NVMM_X64_STATE_CRS | 236 NVMM_X64_STATE_DRS | 237 NVMM_X64_STATE_MSRS | 238 NVMM_X64_STATE_FPU; 239 240 ret = nvmm_vcpu_getstate(mach, vcpu, bitmap); 241 if (ret == -1) { 242 error_report("NVMM: Failed to get virtual processor context," 243 " error=%d", errno); 244 } 245 246 /* GPRs. */ 247 env->regs[R_EAX] = state->gprs[NVMM_X64_GPR_RAX]; 248 env->regs[R_ECX] = state->gprs[NVMM_X64_GPR_RCX]; 249 env->regs[R_EDX] = state->gprs[NVMM_X64_GPR_RDX]; 250 env->regs[R_EBX] = state->gprs[NVMM_X64_GPR_RBX]; 251 env->regs[R_ESP] = state->gprs[NVMM_X64_GPR_RSP]; 252 env->regs[R_EBP] = state->gprs[NVMM_X64_GPR_RBP]; 253 env->regs[R_ESI] = state->gprs[NVMM_X64_GPR_RSI]; 254 env->regs[R_EDI] = state->gprs[NVMM_X64_GPR_RDI]; 255 #ifdef TARGET_X86_64 256 env->regs[R_R8] = state->gprs[NVMM_X64_GPR_R8]; 257 env->regs[R_R9] = state->gprs[NVMM_X64_GPR_R9]; 258 env->regs[R_R10] = state->gprs[NVMM_X64_GPR_R10]; 259 env->regs[R_R11] = state->gprs[NVMM_X64_GPR_R11]; 260 env->regs[R_R12] = state->gprs[NVMM_X64_GPR_R12]; 261 env->regs[R_R13] = state->gprs[NVMM_X64_GPR_R13]; 262 env->regs[R_R14] = state->gprs[NVMM_X64_GPR_R14]; 263 env->regs[R_R15] = state->gprs[NVMM_X64_GPR_R15]; 264 #endif 265 266 /* RIP and RFLAGS. */ 267 env->eip = state->gprs[NVMM_X64_GPR_RIP]; 268 env->eflags = state->gprs[NVMM_X64_GPR_RFLAGS]; 269 270 /* Segments. */ 271 nvmm_get_segment(&env->segs[R_ES], &state->segs[NVMM_X64_SEG_ES]); 272 nvmm_get_segment(&env->segs[R_CS], &state->segs[NVMM_X64_SEG_CS]); 273 nvmm_get_segment(&env->segs[R_SS], &state->segs[NVMM_X64_SEG_SS]); 274 nvmm_get_segment(&env->segs[R_DS], &state->segs[NVMM_X64_SEG_DS]); 275 nvmm_get_segment(&env->segs[R_FS], &state->segs[NVMM_X64_SEG_FS]); 276 nvmm_get_segment(&env->segs[R_GS], &state->segs[NVMM_X64_SEG_GS]); 277 278 /* Special segments. */ 279 nvmm_get_segment(&env->gdt, &state->segs[NVMM_X64_SEG_GDT]); 280 nvmm_get_segment(&env->ldt, &state->segs[NVMM_X64_SEG_LDT]); 281 nvmm_get_segment(&env->tr, &state->segs[NVMM_X64_SEG_TR]); 282 nvmm_get_segment(&env->idt, &state->segs[NVMM_X64_SEG_IDT]); 283 284 /* Control registers. */ 285 env->cr[0] = state->crs[NVMM_X64_CR_CR0]; 286 env->cr[2] = state->crs[NVMM_X64_CR_CR2]; 287 env->cr[3] = state->crs[NVMM_X64_CR_CR3]; 288 env->cr[4] = state->crs[NVMM_X64_CR_CR4]; 289 tpr = state->crs[NVMM_X64_CR_CR8]; 290 if (tpr != qcpu->tpr) { 291 qcpu->tpr = tpr; 292 cpu_set_apic_tpr(x86_cpu->apic_state, tpr); 293 } 294 env->xcr0 = state->crs[NVMM_X64_CR_XCR0]; 295 296 /* Debug registers. */ 297 env->dr[0] = state->drs[NVMM_X64_DR_DR0]; 298 env->dr[1] = state->drs[NVMM_X64_DR_DR1]; 299 env->dr[2] = state->drs[NVMM_X64_DR_DR2]; 300 env->dr[3] = state->drs[NVMM_X64_DR_DR3]; 301 env->dr[6] = state->drs[NVMM_X64_DR_DR6]; 302 env->dr[7] = state->drs[NVMM_X64_DR_DR7]; 303 304 /* FPU. */ 305 env->fpuc = state->fpu.fx_cw; 306 env->fpstt = (state->fpu.fx_sw >> 11) & 0x7; 307 env->fpus = state->fpu.fx_sw & ~0x3800; 308 for (i = 0; i < 8; i++) { 309 env->fptags[i] = !((state->fpu.fx_tw >> i) & 1); 310 } 311 env->fpop = state->fpu.fx_opcode; 312 env->fpip = state->fpu.fx_ip.fa_64; 313 env->fpdp = state->fpu.fx_dp.fa_64; 314 env->mxcsr = state->fpu.fx_mxcsr; 315 assert(sizeof(state->fpu.fx_87_ac) == sizeof(env->fpregs)); 316 memcpy(env->fpregs, state->fpu.fx_87_ac, sizeof(env->fpregs)); 317 for (i = 0; i < CPU_NB_REGS; i++) { 318 memcpy(&env->xmm_regs[i].ZMM_Q(0), 319 &state->fpu.fx_xmm[i].xmm_bytes[0], 8); 320 memcpy(&env->xmm_regs[i].ZMM_Q(1), 321 &state->fpu.fx_xmm[i].xmm_bytes[8], 8); 322 } 323 324 /* MSRs. */ 325 env->efer = state->msrs[NVMM_X64_MSR_EFER]; 326 env->star = state->msrs[NVMM_X64_MSR_STAR]; 327 #ifdef TARGET_X86_64 328 env->lstar = state->msrs[NVMM_X64_MSR_LSTAR]; 329 env->cstar = state->msrs[NVMM_X64_MSR_CSTAR]; 330 env->fmask = state->msrs[NVMM_X64_MSR_SFMASK]; 331 env->kernelgsbase = state->msrs[NVMM_X64_MSR_KERNELGSBASE]; 332 #endif 333 env->sysenter_cs = state->msrs[NVMM_X64_MSR_SYSENTER_CS]; 334 env->sysenter_esp = state->msrs[NVMM_X64_MSR_SYSENTER_ESP]; 335 env->sysenter_eip = state->msrs[NVMM_X64_MSR_SYSENTER_EIP]; 336 env->pat = state->msrs[NVMM_X64_MSR_PAT]; 337 env->tsc = state->msrs[NVMM_X64_MSR_TSC]; 338 339 x86_update_hflags(env); 340 } 341 342 static bool 343 nvmm_can_take_int(CPUState *cpu) 344 { 345 AccelCPUState *qcpu = cpu->accel; 346 struct nvmm_vcpu *vcpu = &qcpu->vcpu; 347 struct nvmm_machine *mach = get_nvmm_mach(); 348 349 if (qcpu->int_window_exit) { 350 return false; 351 } 352 353 if (qcpu->int_shadow || !(cpu_env(cpu)->eflags & IF_MASK)) { 354 struct nvmm_x64_state *state = vcpu->state; 355 356 /* Exit on interrupt window. */ 357 nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_INTR); 358 state->intr.int_window_exiting = 1; 359 nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_INTR); 360 361 return false; 362 } 363 364 return true; 365 } 366 367 static bool 368 nvmm_can_take_nmi(CPUState *cpu) 369 { 370 AccelCPUState *qcpu = cpu->accel; 371 372 /* 373 * Contrary to INTs, NMIs always schedule an exit when they are 374 * completed. Therefore, if window-exiting is enabled, it means 375 * NMIs are blocked. 376 */ 377 if (qcpu->nmi_window_exit) { 378 return false; 379 } 380 381 return true; 382 } 383 384 /* 385 * Called before the VCPU is run. We inject events generated by the I/O 386 * thread, and synchronize the guest TPR. 387 */ 388 static void 389 nvmm_vcpu_pre_run(CPUState *cpu) 390 { 391 CPUX86State *env = cpu_env(cpu); 392 struct nvmm_machine *mach = get_nvmm_mach(); 393 AccelCPUState *qcpu = cpu->accel; 394 struct nvmm_vcpu *vcpu = &qcpu->vcpu; 395 X86CPU *x86_cpu = X86_CPU(cpu); 396 struct nvmm_x64_state *state = vcpu->state; 397 struct nvmm_vcpu_event *event = vcpu->event; 398 bool has_event = false; 399 bool sync_tpr = false; 400 uint8_t tpr; 401 int ret; 402 403 bql_lock(); 404 405 tpr = cpu_get_apic_tpr(x86_cpu->apic_state); 406 if (tpr != qcpu->tpr) { 407 qcpu->tpr = tpr; 408 sync_tpr = true; 409 } 410 411 /* 412 * Force the VCPU out of its inner loop to process any INIT requests 413 * or commit pending TPR access. 414 */ 415 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { 416 cpu->exit_request = 1; 417 } 418 419 if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { 420 if (nvmm_can_take_nmi(cpu)) { 421 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; 422 event->type = NVMM_VCPU_EVENT_INTR; 423 event->vector = 2; 424 has_event = true; 425 } 426 } 427 428 if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { 429 if (nvmm_can_take_int(cpu)) { 430 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; 431 event->type = NVMM_VCPU_EVENT_INTR; 432 event->vector = cpu_get_pic_interrupt(env); 433 has_event = true; 434 } 435 } 436 437 /* Don't want SMIs. */ 438 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { 439 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; 440 } 441 442 if (sync_tpr) { 443 ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_CRS); 444 if (ret == -1) { 445 error_report("NVMM: Failed to get CPU state," 446 " error=%d", errno); 447 } 448 449 state->crs[NVMM_X64_CR_CR8] = qcpu->tpr; 450 451 ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_CRS); 452 if (ret == -1) { 453 error_report("NVMM: Failed to set CPU state," 454 " error=%d", errno); 455 } 456 } 457 458 if (has_event) { 459 ret = nvmm_vcpu_inject(mach, vcpu); 460 if (ret == -1) { 461 error_report("NVMM: Failed to inject event," 462 " error=%d", errno); 463 } 464 } 465 466 bql_unlock(); 467 } 468 469 /* 470 * Called after the VCPU ran. We synchronize the host view of the TPR and 471 * RFLAGS. 472 */ 473 static void 474 nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) 475 { 476 AccelCPUState *qcpu = cpu->accel; 477 X86CPU *x86_cpu = X86_CPU(cpu); 478 CPUX86State *env = &x86_cpu->env; 479 uint64_t tpr; 480 481 env->eflags = exit->exitstate.rflags; 482 qcpu->int_shadow = exit->exitstate.int_shadow; 483 qcpu->int_window_exit = exit->exitstate.int_window_exiting; 484 qcpu->nmi_window_exit = exit->exitstate.nmi_window_exiting; 485 486 tpr = exit->exitstate.cr8; 487 if (qcpu->tpr != tpr) { 488 qcpu->tpr = tpr; 489 bql_lock(); 490 cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr); 491 bql_unlock(); 492 } 493 } 494 495 /* -------------------------------------------------------------------------- */ 496 497 static void 498 nvmm_io_callback(struct nvmm_io *io) 499 { 500 MemTxAttrs attrs = { 0 }; 501 int ret; 502 503 ret = address_space_rw(&address_space_io, io->port, attrs, io->data, 504 io->size, !io->in); 505 if (ret != MEMTX_OK) { 506 error_report("NVMM: I/O Transaction Failed " 507 "[%s, port=%u, size=%zu]", (io->in ? "in" : "out"), 508 io->port, io->size); 509 } 510 511 /* Needed, otherwise infinite loop. */ 512 current_cpu->vcpu_dirty = false; 513 } 514 515 static void 516 nvmm_mem_callback(struct nvmm_mem *mem) 517 { 518 cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write); 519 520 /* Needed, otherwise infinite loop. */ 521 current_cpu->vcpu_dirty = false; 522 } 523 524 static struct nvmm_assist_callbacks nvmm_callbacks = { 525 .io = nvmm_io_callback, 526 .mem = nvmm_mem_callback 527 }; 528 529 /* -------------------------------------------------------------------------- */ 530 531 static int 532 nvmm_handle_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 533 { 534 int ret; 535 536 ret = nvmm_assist_mem(mach, vcpu); 537 if (ret == -1) { 538 error_report("NVMM: Mem Assist Failed [gpa=%p]", 539 (void *)vcpu->exit->u.mem.gpa); 540 } 541 542 return ret; 543 } 544 545 static int 546 nvmm_handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 547 { 548 int ret; 549 550 ret = nvmm_assist_io(mach, vcpu); 551 if (ret == -1) { 552 error_report("NVMM: I/O Assist Failed [port=%d]", 553 (int)vcpu->exit->u.io.port); 554 } 555 556 return ret; 557 } 558 559 static int 560 nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu, 561 struct nvmm_vcpu_exit *exit) 562 { 563 AccelCPUState *qcpu = cpu->accel; 564 struct nvmm_vcpu *vcpu = &qcpu->vcpu; 565 X86CPU *x86_cpu = X86_CPU(cpu); 566 struct nvmm_x64_state *state = vcpu->state; 567 uint64_t val; 568 int ret; 569 570 switch (exit->u.rdmsr.msr) { 571 case MSR_IA32_APICBASE: 572 val = cpu_get_apic_base(x86_cpu->apic_state); 573 break; 574 case MSR_MTRRcap: 575 case MSR_MTRRdefType: 576 case MSR_MCG_CAP: 577 case MSR_MCG_STATUS: 578 val = 0; 579 break; 580 default: /* More MSRs to add? */ 581 val = 0; 582 error_report("NVMM: Unexpected RDMSR 0x%x, ignored", 583 exit->u.rdmsr.msr); 584 break; 585 } 586 587 ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS); 588 if (ret == -1) { 589 return -1; 590 } 591 592 state->gprs[NVMM_X64_GPR_RAX] = (val & 0xFFFFFFFF); 593 state->gprs[NVMM_X64_GPR_RDX] = (val >> 32); 594 state->gprs[NVMM_X64_GPR_RIP] = exit->u.rdmsr.npc; 595 596 ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS); 597 if (ret == -1) { 598 return -1; 599 } 600 601 return 0; 602 } 603 604 static int 605 nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu, 606 struct nvmm_vcpu_exit *exit) 607 { 608 AccelCPUState *qcpu = cpu->accel; 609 struct nvmm_vcpu *vcpu = &qcpu->vcpu; 610 X86CPU *x86_cpu = X86_CPU(cpu); 611 struct nvmm_x64_state *state = vcpu->state; 612 uint64_t val; 613 int ret; 614 615 val = exit->u.wrmsr.val; 616 617 switch (exit->u.wrmsr.msr) { 618 case MSR_IA32_APICBASE: 619 cpu_set_apic_base(x86_cpu->apic_state, val); 620 break; 621 case MSR_MTRRdefType: 622 case MSR_MCG_STATUS: 623 break; 624 default: /* More MSRs to add? */ 625 error_report("NVMM: Unexpected WRMSR 0x%x [val=0x%lx], ignored", 626 exit->u.wrmsr.msr, val); 627 break; 628 } 629 630 ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_GPRS); 631 if (ret == -1) { 632 return -1; 633 } 634 635 state->gprs[NVMM_X64_GPR_RIP] = exit->u.wrmsr.npc; 636 637 ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS); 638 if (ret == -1) { 639 return -1; 640 } 641 642 return 0; 643 } 644 645 static int 646 nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, 647 struct nvmm_vcpu_exit *exit) 648 { 649 int ret = 0; 650 651 bql_lock(); 652 653 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && 654 (cpu_env(cpu)->eflags & IF_MASK)) && 655 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { 656 cpu->exception_index = EXCP_HLT; 657 cpu->halted = true; 658 ret = 1; 659 } 660 661 bql_unlock(); 662 663 return ret; 664 } 665 666 static int 667 nvmm_inject_ud(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu) 668 { 669 struct nvmm_vcpu_event *event = vcpu->event; 670 671 event->type = NVMM_VCPU_EVENT_EXCP; 672 event->vector = 6; 673 event->u.excp.error = 0; 674 675 return nvmm_vcpu_inject(mach, vcpu); 676 } 677 678 static int 679 nvmm_vcpu_loop(CPUState *cpu) 680 { 681 struct nvmm_machine *mach = get_nvmm_mach(); 682 AccelCPUState *qcpu = cpu->accel; 683 struct nvmm_vcpu *vcpu = &qcpu->vcpu; 684 X86CPU *x86_cpu = X86_CPU(cpu); 685 CPUX86State *env = &x86_cpu->env; 686 struct nvmm_vcpu_exit *exit = vcpu->exit; 687 int ret; 688 689 /* 690 * Some asynchronous events must be handled outside of the inner 691 * VCPU loop. They are handled here. 692 */ 693 if (cpu->interrupt_request & CPU_INTERRUPT_INIT) { 694 nvmm_cpu_synchronize_state(cpu); 695 do_cpu_init(x86_cpu); 696 /* set int/nmi windows back to the reset state */ 697 } 698 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { 699 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; 700 apic_poll_irq(x86_cpu->apic_state); 701 } 702 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && 703 (env->eflags & IF_MASK)) || 704 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { 705 cpu->halted = false; 706 } 707 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { 708 nvmm_cpu_synchronize_state(cpu); 709 do_cpu_sipi(x86_cpu); 710 } 711 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { 712 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; 713 nvmm_cpu_synchronize_state(cpu); 714 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, 715 env->tpr_access_type); 716 } 717 718 if (cpu->halted) { 719 cpu->exception_index = EXCP_HLT; 720 qatomic_set(&cpu->exit_request, false); 721 return 0; 722 } 723 724 bql_unlock(); 725 cpu_exec_start(cpu); 726 727 /* 728 * Inner VCPU loop. 729 */ 730 do { 731 if (cpu->vcpu_dirty) { 732 nvmm_set_registers(cpu); 733 cpu->vcpu_dirty = false; 734 } 735 736 if (qcpu->stop) { 737 cpu->exception_index = EXCP_INTERRUPT; 738 qcpu->stop = false; 739 ret = 1; 740 break; 741 } 742 743 nvmm_vcpu_pre_run(cpu); 744 745 if (qatomic_read(&cpu->exit_request)) { 746 #if NVMM_USER_VERSION >= 2 747 nvmm_vcpu_stop(vcpu); 748 #else 749 qemu_cpu_kick_self(); 750 #endif 751 } 752 753 /* Read exit_request before the kernel reads the immediate exit flag */ 754 smp_rmb(); 755 ret = nvmm_vcpu_run(mach, vcpu); 756 if (ret == -1) { 757 error_report("NVMM: Failed to exec a virtual processor," 758 " error=%d", errno); 759 break; 760 } 761 762 nvmm_vcpu_post_run(cpu, exit); 763 764 switch (exit->reason) { 765 case NVMM_VCPU_EXIT_NONE: 766 break; 767 #if NVMM_USER_VERSION >= 2 768 case NVMM_VCPU_EXIT_STOPPED: 769 /* 770 * The kernel cleared the immediate exit flag; cpu->exit_request 771 * must be cleared after 772 */ 773 smp_wmb(); 774 qcpu->stop = true; 775 break; 776 #endif 777 case NVMM_VCPU_EXIT_MEMORY: 778 ret = nvmm_handle_mem(mach, vcpu); 779 break; 780 case NVMM_VCPU_EXIT_IO: 781 ret = nvmm_handle_io(mach, vcpu); 782 break; 783 case NVMM_VCPU_EXIT_INT_READY: 784 case NVMM_VCPU_EXIT_NMI_READY: 785 case NVMM_VCPU_EXIT_TPR_CHANGED: 786 break; 787 case NVMM_VCPU_EXIT_HALTED: 788 ret = nvmm_handle_halted(mach, cpu, exit); 789 break; 790 case NVMM_VCPU_EXIT_SHUTDOWN: 791 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 792 cpu->exception_index = EXCP_INTERRUPT; 793 ret = 1; 794 break; 795 case NVMM_VCPU_EXIT_RDMSR: 796 ret = nvmm_handle_rdmsr(mach, cpu, exit); 797 break; 798 case NVMM_VCPU_EXIT_WRMSR: 799 ret = nvmm_handle_wrmsr(mach, cpu, exit); 800 break; 801 case NVMM_VCPU_EXIT_MONITOR: 802 case NVMM_VCPU_EXIT_MWAIT: 803 ret = nvmm_inject_ud(mach, vcpu); 804 break; 805 default: 806 error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]", 807 exit->reason, exit->u.inv.hwcode); 808 nvmm_get_registers(cpu); 809 bql_lock(); 810 qemu_system_guest_panicked(cpu_get_crash_info(cpu)); 811 bql_unlock(); 812 ret = -1; 813 break; 814 } 815 } while (ret == 0); 816 817 cpu_exec_end(cpu); 818 bql_lock(); 819 820 qatomic_set(&cpu->exit_request, false); 821 822 return ret < 0; 823 } 824 825 /* -------------------------------------------------------------------------- */ 826 827 static void 828 do_nvmm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) 829 { 830 nvmm_get_registers(cpu); 831 cpu->vcpu_dirty = true; 832 } 833 834 static void 835 do_nvmm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) 836 { 837 nvmm_set_registers(cpu); 838 cpu->vcpu_dirty = false; 839 } 840 841 static void 842 do_nvmm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) 843 { 844 nvmm_set_registers(cpu); 845 cpu->vcpu_dirty = false; 846 } 847 848 static void 849 do_nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) 850 { 851 cpu->vcpu_dirty = true; 852 } 853 854 void nvmm_cpu_synchronize_state(CPUState *cpu) 855 { 856 if (!cpu->vcpu_dirty) { 857 run_on_cpu(cpu, do_nvmm_cpu_synchronize_state, RUN_ON_CPU_NULL); 858 } 859 } 860 861 void nvmm_cpu_synchronize_post_reset(CPUState *cpu) 862 { 863 run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL); 864 } 865 866 void nvmm_cpu_synchronize_post_init(CPUState *cpu) 867 { 868 run_on_cpu(cpu, do_nvmm_cpu_synchronize_post_init, RUN_ON_CPU_NULL); 869 } 870 871 void nvmm_cpu_synchronize_pre_loadvm(CPUState *cpu) 872 { 873 run_on_cpu(cpu, do_nvmm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); 874 } 875 876 /* -------------------------------------------------------------------------- */ 877 878 static Error *nvmm_migration_blocker; 879 880 /* 881 * The nvmm_vcpu_stop() mechanism breaks races between entering the VMM 882 * and another thread signaling the vCPU thread to exit. 883 */ 884 885 static void 886 nvmm_ipi_signal(int sigcpu) 887 { 888 if (current_cpu) { 889 AccelCPUState *qcpu = current_cpu->accel; 890 #if NVMM_USER_VERSION >= 2 891 struct nvmm_vcpu *vcpu = &qcpu->vcpu; 892 nvmm_vcpu_stop(vcpu); 893 #else 894 qcpu->stop = true; 895 #endif 896 } 897 } 898 899 static void 900 nvmm_init_cpu_signals(void) 901 { 902 struct sigaction sigact; 903 sigset_t set; 904 905 /* Install the IPI handler. */ 906 memset(&sigact, 0, sizeof(sigact)); 907 sigact.sa_handler = nvmm_ipi_signal; 908 sigaction(SIG_IPI, &sigact, NULL); 909 910 /* Allow IPIs on the current thread. */ 911 sigprocmask(SIG_BLOCK, NULL, &set); 912 sigdelset(&set, SIG_IPI); 913 pthread_sigmask(SIG_SETMASK, &set, NULL); 914 } 915 916 int 917 nvmm_init_vcpu(CPUState *cpu) 918 { 919 struct nvmm_machine *mach = get_nvmm_mach(); 920 struct nvmm_vcpu_conf_cpuid cpuid; 921 struct nvmm_vcpu_conf_tpr tpr; 922 Error *local_error = NULL; 923 AccelCPUState *qcpu; 924 int ret, err; 925 926 nvmm_init_cpu_signals(); 927 928 if (nvmm_migration_blocker == NULL) { 929 error_setg(&nvmm_migration_blocker, 930 "NVMM: Migration not supported"); 931 932 if (migrate_add_blocker(&nvmm_migration_blocker, &local_error) < 0) { 933 error_report_err(local_error); 934 return -EINVAL; 935 } 936 } 937 938 qcpu = g_new0(AccelCPUState, 1); 939 940 ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu); 941 if (ret == -1) { 942 err = errno; 943 error_report("NVMM: Failed to create a virtual processor," 944 " error=%d", err); 945 g_free(qcpu); 946 return -err; 947 } 948 949 memset(&cpuid, 0, sizeof(cpuid)); 950 cpuid.mask = 1; 951 cpuid.leaf = 0x00000001; 952 cpuid.u.mask.set.edx = CPUID_MCE | CPUID_MCA | CPUID_MTRR; 953 ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_CPUID, 954 &cpuid); 955 if (ret == -1) { 956 err = errno; 957 error_report("NVMM: Failed to configure a virtual processor," 958 " error=%d", err); 959 g_free(qcpu); 960 return -err; 961 } 962 963 ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_CALLBACKS, 964 &nvmm_callbacks); 965 if (ret == -1) { 966 err = errno; 967 error_report("NVMM: Failed to configure a virtual processor," 968 " error=%d", err); 969 g_free(qcpu); 970 return -err; 971 } 972 973 if (qemu_mach.cap.arch.vcpu_conf_support & NVMM_CAP_ARCH_VCPU_CONF_TPR) { 974 memset(&tpr, 0, sizeof(tpr)); 975 tpr.exit_changed = 1; 976 ret = nvmm_vcpu_configure(mach, &qcpu->vcpu, NVMM_VCPU_CONF_TPR, &tpr); 977 if (ret == -1) { 978 err = errno; 979 error_report("NVMM: Failed to configure a virtual processor," 980 " error=%d", err); 981 g_free(qcpu); 982 return -err; 983 } 984 } 985 986 qcpu->vcpu_dirty = true; 987 cpu->accel = qcpu; 988 989 return 0; 990 } 991 992 int 993 nvmm_vcpu_exec(CPUState *cpu) 994 { 995 int ret, fatal; 996 997 while (1) { 998 if (cpu->exception_index >= EXCP_INTERRUPT) { 999 ret = cpu->exception_index; 1000 cpu->exception_index = -1; 1001 break; 1002 } 1003 1004 fatal = nvmm_vcpu_loop(cpu); 1005 1006 if (fatal) { 1007 error_report("NVMM: Failed to execute a VCPU."); 1008 abort(); 1009 } 1010 } 1011 1012 return ret; 1013 } 1014 1015 void 1016 nvmm_destroy_vcpu(CPUState *cpu) 1017 { 1018 struct nvmm_machine *mach = get_nvmm_mach(); 1019 AccelCPUState *qcpu = cpu->accel; 1020 1021 nvmm_vcpu_destroy(mach, &qcpu->vcpu); 1022 g_free(cpu->accel); 1023 } 1024 1025 /* -------------------------------------------------------------------------- */ 1026 1027 static void 1028 nvmm_update_mapping(hwaddr start_pa, ram_addr_t size, uintptr_t hva, 1029 bool add, bool rom, const char *name) 1030 { 1031 struct nvmm_machine *mach = get_nvmm_mach(); 1032 int ret, prot; 1033 1034 if (add) { 1035 prot = PROT_READ | PROT_EXEC; 1036 if (!rom) { 1037 prot |= PROT_WRITE; 1038 } 1039 ret = nvmm_gpa_map(mach, hva, start_pa, size, prot); 1040 } else { 1041 ret = nvmm_gpa_unmap(mach, hva, start_pa, size); 1042 } 1043 1044 if (ret == -1) { 1045 error_report("NVMM: Failed to %s GPA range '%s' PA:%p, " 1046 "Size:%p bytes, HostVA:%p, error=%d", 1047 (add ? "map" : "unmap"), name, (void *)(uintptr_t)start_pa, 1048 (void *)size, (void *)hva, errno); 1049 } 1050 } 1051 1052 static void 1053 nvmm_process_section(MemoryRegionSection *section, int add) 1054 { 1055 MemoryRegion *mr = section->mr; 1056 hwaddr start_pa = section->offset_within_address_space; 1057 ram_addr_t size = int128_get64(section->size); 1058 unsigned int delta; 1059 uintptr_t hva; 1060 1061 if (!memory_region_is_ram(mr)) { 1062 return; 1063 } 1064 1065 /* Adjust start_pa and size so that they are page-aligned. */ 1066 delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask()); 1067 delta &= ~qemu_real_host_page_mask(); 1068 if (delta > size) { 1069 return; 1070 } 1071 start_pa += delta; 1072 size -= delta; 1073 size &= qemu_real_host_page_mask(); 1074 if (!size || (start_pa & ~qemu_real_host_page_mask())) { 1075 return; 1076 } 1077 1078 hva = (uintptr_t)memory_region_get_ram_ptr(mr) + 1079 section->offset_within_region + delta; 1080 1081 nvmm_update_mapping(start_pa, size, hva, add, 1082 memory_region_is_rom(mr), mr->name); 1083 } 1084 1085 static void 1086 nvmm_region_add(MemoryListener *listener, MemoryRegionSection *section) 1087 { 1088 memory_region_ref(section->mr); 1089 nvmm_process_section(section, 1); 1090 } 1091 1092 static void 1093 nvmm_region_del(MemoryListener *listener, MemoryRegionSection *section) 1094 { 1095 nvmm_process_section(section, 0); 1096 memory_region_unref(section->mr); 1097 } 1098 1099 static void 1100 nvmm_transaction_begin(MemoryListener *listener) 1101 { 1102 /* nothing */ 1103 } 1104 1105 static void 1106 nvmm_transaction_commit(MemoryListener *listener) 1107 { 1108 /* nothing */ 1109 } 1110 1111 static void 1112 nvmm_log_sync(MemoryListener *listener, MemoryRegionSection *section) 1113 { 1114 MemoryRegion *mr = section->mr; 1115 1116 if (!memory_region_is_ram(mr)) { 1117 return; 1118 } 1119 1120 memory_region_set_dirty(mr, 0, int128_get64(section->size)); 1121 } 1122 1123 static MemoryListener nvmm_memory_listener = { 1124 .name = "nvmm", 1125 .begin = nvmm_transaction_begin, 1126 .commit = nvmm_transaction_commit, 1127 .region_add = nvmm_region_add, 1128 .region_del = nvmm_region_del, 1129 .log_sync = nvmm_log_sync, 1130 .priority = MEMORY_LISTENER_PRIORITY_ACCEL, 1131 }; 1132 1133 static void 1134 nvmm_ram_block_added(RAMBlockNotifier *n, void *host, size_t size, 1135 size_t max_size) 1136 { 1137 struct nvmm_machine *mach = get_nvmm_mach(); 1138 uintptr_t hva = (uintptr_t)host; 1139 int ret; 1140 1141 ret = nvmm_hva_map(mach, hva, max_size); 1142 1143 if (ret == -1) { 1144 error_report("NVMM: Failed to map HVA, HostVA:%p " 1145 "Size:%p bytes, error=%d", 1146 (void *)hva, (void *)size, errno); 1147 } 1148 } 1149 1150 static struct RAMBlockNotifier nvmm_ram_notifier = { 1151 .ram_block_added = nvmm_ram_block_added 1152 }; 1153 1154 /* -------------------------------------------------------------------------- */ 1155 1156 static int 1157 nvmm_accel_init(AccelState *as, MachineState *ms) 1158 { 1159 int ret, err; 1160 1161 ret = nvmm_init(); 1162 if (ret == -1) { 1163 err = errno; 1164 error_report("NVMM: Initialization failed, error=%d", errno); 1165 return -err; 1166 } 1167 1168 ret = nvmm_capability(&qemu_mach.cap); 1169 if (ret == -1) { 1170 err = errno; 1171 error_report("NVMM: Unable to fetch capability, error=%d", errno); 1172 return -err; 1173 } 1174 if (qemu_mach.cap.version < NVMM_KERN_VERSION) { 1175 error_report("NVMM: Unsupported version %u", qemu_mach.cap.version); 1176 return -EPROGMISMATCH; 1177 } 1178 if (qemu_mach.cap.state_size != sizeof(struct nvmm_x64_state)) { 1179 error_report("NVMM: Wrong state size %u", qemu_mach.cap.state_size); 1180 return -EPROGMISMATCH; 1181 } 1182 1183 ret = nvmm_machine_create(&qemu_mach.mach); 1184 if (ret == -1) { 1185 err = errno; 1186 error_report("NVMM: Machine creation failed, error=%d", errno); 1187 return -err; 1188 } 1189 1190 memory_listener_register(&nvmm_memory_listener, &address_space_memory); 1191 ram_block_notifier_add(&nvmm_ram_notifier); 1192 1193 printf("NetBSD Virtual Machine Monitor accelerator is operational\n"); 1194 return 0; 1195 } 1196 1197 static void 1198 nvmm_accel_class_init(ObjectClass *oc, const void *data) 1199 { 1200 AccelClass *ac = ACCEL_CLASS(oc); 1201 ac->name = "NVMM"; 1202 ac->init_machine = nvmm_accel_init; 1203 ac->allowed = &nvmm_allowed; 1204 } 1205 1206 static const TypeInfo nvmm_accel_type = { 1207 .name = ACCEL_CLASS_NAME("nvmm"), 1208 .parent = TYPE_ACCEL, 1209 .class_init = nvmm_accel_class_init, 1210 }; 1211 1212 static void nvmm_cpu_instance_init(CPUState *cs) 1213 { 1214 X86CPU *cpu = X86_CPU(cs); 1215 1216 host_cpu_instance_init(cpu); 1217 } 1218 1219 static void nvmm_cpu_accel_class_init(ObjectClass *oc, const void *data) 1220 { 1221 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1222 1223 acc->cpu_instance_init = nvmm_cpu_instance_init; 1224 } 1225 1226 static const TypeInfo nvmm_cpu_accel_type = { 1227 .name = ACCEL_CPU_NAME("nvmm"), 1228 1229 .parent = TYPE_ACCEL_CPU, 1230 .class_init = nvmm_cpu_accel_class_init, 1231 .abstract = true, 1232 }; 1233 1234 static void 1235 nvmm_type_init(void) 1236 { 1237 type_register_static(&nvmm_accel_type); 1238 type_register_static(&nvmm_cpu_accel_type); 1239 } 1240 1241 type_init(nvmm_type_init); 1242