1 /* 2 * Copyright (c) 2003-2008 Fabrice Bellard 3 * Copyright (C) 2016 Veertu Inc, 4 * Copyright (C) 2017 Google Inc, 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 22 #include "qemu-common.h" 23 #include "x86hvf.h" 24 #include "vmx.h" 25 #include "vmcs.h" 26 #include "cpu.h" 27 #include "x86_descr.h" 28 #include "x86_decode.h" 29 30 #include "hw/i386/apic_internal.h" 31 32 #include <Hypervisor/hv.h> 33 #include <Hypervisor/hv_vmx.h> 34 35 void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, 36 SegmentCache *qseg, bool is_tr) 37 { 38 vmx_seg->sel = qseg->selector; 39 vmx_seg->base = qseg->base; 40 vmx_seg->limit = qseg->limit; 41 42 if (!qseg->selector && !x86_is_real(cpu) && !is_tr) { 43 /* the TR register is usable after processor reset despite 44 * having a null selector */ 45 vmx_seg->ar = 1 << 16; 46 return; 47 } 48 vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf; 49 vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15; 50 vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14; 51 vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13; 52 vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12; 53 vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7; 54 vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5; 55 vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4; 56 } 57 58 void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg) 59 { 60 qseg->limit = vmx_seg->limit; 61 qseg->base = vmx_seg->base; 62 qseg->selector = vmx_seg->sel; 63 qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) | 64 (((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) | 65 (((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) | 66 (((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) | 67 (((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) | 68 (((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) | 69 (((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) | 70 (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT); 71 } 72 73 void hvf_put_xsave(CPUState *cpu_state) 74 { 75 76 struct X86XSaveArea *xsave; 77 78 xsave = X86_CPU(cpu_state)->env.xsave_buf; 79 80 x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave); 81 82 if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { 83 abort(); 84 } 85 } 86 87 void hvf_put_segments(CPUState *cpu_state) 88 { 89 CPUX86State *env = &X86_CPU(cpu_state)->env; 90 struct vmx_segment seg; 91 92 wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); 93 wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base); 94 95 wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); 96 wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); 97 98 /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */ 99 wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]); 100 vmx_update_tpr(cpu_state); 101 wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer); 102 103 macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]); 104 macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]); 105 106 hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false); 107 vmx_write_segment_descriptor(cpu_state, &seg, R_CS); 108 109 hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false); 110 vmx_write_segment_descriptor(cpu_state, &seg, R_DS); 111 112 hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false); 113 vmx_write_segment_descriptor(cpu_state, &seg, R_ES); 114 115 hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false); 116 vmx_write_segment_descriptor(cpu_state, &seg, R_SS); 117 118 hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false); 119 vmx_write_segment_descriptor(cpu_state, &seg, R_FS); 120 121 hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false); 122 vmx_write_segment_descriptor(cpu_state, &seg, R_GS); 123 124 hvf_set_segment(cpu_state, &seg, &env->tr, true); 125 vmx_write_segment_descriptor(cpu_state, &seg, R_TR); 126 127 hvf_set_segment(cpu_state, &seg, &env->ldt, false); 128 vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR); 129 130 hv_vcpu_flush(cpu_state->hvf_fd); 131 } 132 133 void hvf_put_msrs(CPUState *cpu_state) 134 { 135 CPUX86State *env = &X86_CPU(cpu_state)->env; 136 137 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, 138 env->sysenter_cs); 139 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, 140 env->sysenter_esp); 141 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, 142 env->sysenter_eip); 143 144 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star); 145 146 #ifdef TARGET_X86_64 147 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar); 148 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase); 149 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask); 150 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar); 151 #endif 152 153 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base); 154 hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base); 155 156 /* if (!osx_is_sierra()) 157 wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());*/ 158 hv_vm_sync_tsc(env->tsc); 159 } 160 161 162 void hvf_get_xsave(CPUState *cpu_state) 163 { 164 struct X86XSaveArea *xsave; 165 166 xsave = X86_CPU(cpu_state)->env.xsave_buf; 167 168 if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { 169 abort(); 170 } 171 172 x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave); 173 } 174 175 void hvf_get_segments(CPUState *cpu_state) 176 { 177 CPUX86State *env = &X86_CPU(cpu_state)->env; 178 179 struct vmx_segment seg; 180 181 env->interrupt_injected = -1; 182 183 vmx_read_segment_descriptor(cpu_state, &seg, R_CS); 184 hvf_get_segment(&env->segs[R_CS], &seg); 185 186 vmx_read_segment_descriptor(cpu_state, &seg, R_DS); 187 hvf_get_segment(&env->segs[R_DS], &seg); 188 189 vmx_read_segment_descriptor(cpu_state, &seg, R_ES); 190 hvf_get_segment(&env->segs[R_ES], &seg); 191 192 vmx_read_segment_descriptor(cpu_state, &seg, R_FS); 193 hvf_get_segment(&env->segs[R_FS], &seg); 194 195 vmx_read_segment_descriptor(cpu_state, &seg, R_GS); 196 hvf_get_segment(&env->segs[R_GS], &seg); 197 198 vmx_read_segment_descriptor(cpu_state, &seg, R_SS); 199 hvf_get_segment(&env->segs[R_SS], &seg); 200 201 vmx_read_segment_descriptor(cpu_state, &seg, R_TR); 202 hvf_get_segment(&env->tr, &seg); 203 204 vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR); 205 hvf_get_segment(&env->ldt, &seg); 206 207 env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT); 208 env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE); 209 env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT); 210 env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE); 211 212 env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0); 213 env->cr[2] = 0; 214 env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3); 215 env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4); 216 217 env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER); 218 } 219 220 void hvf_get_msrs(CPUState *cpu_state) 221 { 222 CPUX86State *env = &X86_CPU(cpu_state)->env; 223 uint64_t tmp; 224 225 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp); 226 env->sysenter_cs = tmp; 227 228 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp); 229 env->sysenter_esp = tmp; 230 231 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp); 232 env->sysenter_eip = tmp; 233 234 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star); 235 236 #ifdef TARGET_X86_64 237 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar); 238 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase); 239 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask); 240 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar); 241 #endif 242 243 hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp); 244 245 env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET); 246 } 247 248 int hvf_put_registers(CPUState *cpu_state) 249 { 250 X86CPU *x86cpu = X86_CPU(cpu_state); 251 CPUX86State *env = &x86cpu->env; 252 253 wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]); 254 wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]); 255 wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]); 256 wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]); 257 wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]); 258 wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]); 259 wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]); 260 wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]); 261 wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]); 262 wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]); 263 wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]); 264 wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]); 265 wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]); 266 wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]); 267 wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]); 268 wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]); 269 wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags); 270 wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip); 271 272 wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0); 273 274 hvf_put_xsave(cpu_state); 275 276 hvf_put_segments(cpu_state); 277 278 hvf_put_msrs(cpu_state); 279 280 wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]); 281 wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]); 282 wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]); 283 wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]); 284 wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]); 285 wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]); 286 wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]); 287 wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]); 288 289 return 0; 290 } 291 292 int hvf_get_registers(CPUState *cpu_state) 293 { 294 X86CPU *x86cpu = X86_CPU(cpu_state); 295 CPUX86State *env = &x86cpu->env; 296 297 env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX); 298 env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX); 299 env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX); 300 env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX); 301 env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP); 302 env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP); 303 env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI); 304 env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI); 305 env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8); 306 env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9); 307 env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10); 308 env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11); 309 env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12); 310 env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13); 311 env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14); 312 env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15); 313 314 env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS); 315 env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP); 316 317 hvf_get_xsave(cpu_state); 318 env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0); 319 320 hvf_get_segments(cpu_state); 321 hvf_get_msrs(cpu_state); 322 323 env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0); 324 env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1); 325 env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2); 326 env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3); 327 env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4); 328 env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5); 329 env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6); 330 env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7); 331 332 x86_update_hflags(env); 333 return 0; 334 } 335 336 static void vmx_set_int_window_exiting(CPUState *cpu) 337 { 338 uint64_t val; 339 val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); 340 wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | 341 VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); 342 } 343 344 void vmx_clear_int_window_exiting(CPUState *cpu) 345 { 346 uint64_t val; 347 val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); 348 wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & 349 ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); 350 } 351 352 #define NMI_VEC 2 353 354 bool hvf_inject_interrupts(CPUState *cpu_state) 355 { 356 X86CPU *x86cpu = X86_CPU(cpu_state); 357 CPUX86State *env = &x86cpu->env; 358 359 uint8_t vector; 360 uint64_t intr_type; 361 bool have_event = true; 362 if (env->interrupt_injected != -1) { 363 vector = env->interrupt_injected; 364 intr_type = VMCS_INTR_T_SWINTR; 365 } else if (env->exception_injected != -1) { 366 vector = env->exception_injected; 367 if (vector == EXCP03_INT3 || vector == EXCP04_INTO) { 368 intr_type = VMCS_INTR_T_SWEXCEPTION; 369 } else { 370 intr_type = VMCS_INTR_T_HWEXCEPTION; 371 } 372 } else if (env->nmi_injected) { 373 vector = NMI_VEC; 374 intr_type = VMCS_INTR_T_NMI; 375 } else { 376 have_event = false; 377 } 378 379 uint64_t info = 0; 380 if (have_event) { 381 info = vector | intr_type | VMCS_INTR_VALID; 382 uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON); 383 if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { 384 vmx_clear_nmi_blocking(cpu_state); 385 } 386 387 if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) { 388 info &= ~(1 << 12); /* clear undefined bit */ 389 if (intr_type == VMCS_INTR_T_SWINTR || 390 intr_type == VMCS_INTR_T_SWEXCEPTION) { 391 wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); 392 } 393 394 if (env->has_error_code) { 395 wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR, 396 env->error_code); 397 } 398 /*printf("reinject %lx err %d\n", info, err);*/ 399 wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); 400 }; 401 } 402 403 if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) { 404 if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { 405 cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI; 406 info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | NMI_VEC; 407 wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); 408 } else { 409 vmx_set_nmi_window_exiting(cpu_state); 410 } 411 } 412 413 if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && 414 (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && 415 (EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) { 416 int line = cpu_get_pic_interrupt(&x86cpu->env); 417 cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD; 418 if (line >= 0) { 419 wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line | 420 VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); 421 } 422 } 423 if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) { 424 vmx_set_int_window_exiting(cpu_state); 425 } 426 return (cpu_state->interrupt_request 427 & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)); 428 } 429 430 int hvf_process_events(CPUState *cpu_state) 431 { 432 X86CPU *cpu = X86_CPU(cpu_state); 433 CPUX86State *env = &cpu->env; 434 435 EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS); 436 437 if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) { 438 hvf_cpu_synchronize_state(cpu_state); 439 do_cpu_init(cpu); 440 } 441 442 if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) { 443 cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL; 444 apic_poll_irq(cpu->apic_state); 445 } 446 if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && 447 (EFLAGS(env) & IF_MASK)) || 448 (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) { 449 cpu_state->halted = 0; 450 } 451 if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) { 452 hvf_cpu_synchronize_state(cpu_state); 453 do_cpu_sipi(cpu); 454 } 455 if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) { 456 cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR; 457 hvf_cpu_synchronize_state(cpu_state); 458 apic_handle_tpr_access_report(cpu->apic_state, env->eip, 459 env->tpr_access_type); 460 } 461 return cpu_state->halted; 462 } 463