1 /* 2 * emulator main execution loop 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "cpu.h" 21 #include "trace.h" 22 #include "disas/disas.h" 23 #include "exec/exec-all.h" 24 #include "tcg.h" 25 #include "qemu/atomic.h" 26 #include "sysemu/qtest.h" 27 #include "qemu/timer.h" 28 #include "exec/address-spaces.h" 29 #include "qemu/rcu.h" 30 #include "exec/tb-hash.h" 31 #include "exec/log.h" 32 #include "qemu/main-loop.h" 33 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) 34 #include "hw/i386/apic.h" 35 #endif 36 #include "sysemu/cpus.h" 37 #include "sysemu/replay.h" 38 39 /* -icount align implementation. */ 40 41 typedef struct SyncClocks { 42 int64_t diff_clk; 43 int64_t last_cpu_icount; 44 int64_t realtime_clock; 45 } SyncClocks; 46 47 #if !defined(CONFIG_USER_ONLY) 48 /* Allow the guest to have a max 3ms advance. 49 * The difference between the 2 clocks could therefore 50 * oscillate around 0. 51 */ 52 #define VM_CLOCK_ADVANCE 3000000 53 #define THRESHOLD_REDUCE 1.5 54 #define MAX_DELAY_PRINT_RATE 2000000000LL 55 #define MAX_NB_PRINTS 100 56 57 static void align_clocks(SyncClocks *sc, const CPUState *cpu) 58 { 59 int64_t cpu_icount; 60 61 if (!icount_align_option) { 62 return; 63 } 64 65 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; 66 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); 67 sc->last_cpu_icount = cpu_icount; 68 69 if (sc->diff_clk > VM_CLOCK_ADVANCE) { 70 #ifndef _WIN32 71 struct timespec sleep_delay, rem_delay; 72 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; 73 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; 74 if (nanosleep(&sleep_delay, &rem_delay) < 0) { 75 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; 76 } else { 77 sc->diff_clk = 0; 78 } 79 #else 80 Sleep(sc->diff_clk / SCALE_MS); 81 sc->diff_clk = 0; 82 #endif 83 } 84 } 85 86 static void print_delay(const SyncClocks *sc) 87 { 88 static float threshold_delay; 89 static int64_t last_realtime_clock; 90 static int nb_prints; 91 92 if (icount_align_option && 93 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && 94 nb_prints < MAX_NB_PRINTS) { 95 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || 96 (-sc->diff_clk / (float)1000000000LL < 97 (threshold_delay - THRESHOLD_REDUCE))) { 98 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; 99 printf("Warning: The guest is now late by %.1f to %.1f seconds\n", 100 threshold_delay - 1, 101 threshold_delay); 102 nb_prints++; 103 last_realtime_clock = sc->realtime_clock; 104 } 105 } 106 } 107 108 static void init_delay_params(SyncClocks *sc, 109 const CPUState *cpu) 110 { 111 if (!icount_align_option) { 112 return; 113 } 114 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); 115 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; 116 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; 117 if (sc->diff_clk < max_delay) { 118 max_delay = sc->diff_clk; 119 } 120 if (sc->diff_clk > max_advance) { 121 max_advance = sc->diff_clk; 122 } 123 124 /* Print every 2s max if the guest is late. We limit the number 125 of printed messages to NB_PRINT_MAX(currently 100) */ 126 print_delay(sc); 127 } 128 #else 129 static void align_clocks(SyncClocks *sc, const CPUState *cpu) 130 { 131 } 132 133 static void init_delay_params(SyncClocks *sc, const CPUState *cpu) 134 { 135 } 136 #endif /* CONFIG USER ONLY */ 137 138 /* Execute a TB, and fix up the CPU state afterwards if necessary */ 139 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) 140 { 141 CPUArchState *env = cpu->env_ptr; 142 uintptr_t ret; 143 TranslationBlock *last_tb; 144 int tb_exit; 145 uint8_t *tb_ptr = itb->tc_ptr; 146 147 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, 148 "Trace %p [%d: " TARGET_FMT_lx "] %s\n", 149 itb->tc_ptr, cpu->cpu_index, itb->pc, 150 lookup_symbol(itb->pc)); 151 152 #if defined(DEBUG_DISAS) 153 if (qemu_loglevel_mask(CPU_LOG_TB_CPU) 154 && qemu_log_in_addr_range(itb->pc)) { 155 qemu_log_lock(); 156 #if defined(TARGET_I386) 157 log_cpu_state(cpu, CPU_DUMP_CCOP); 158 #else 159 log_cpu_state(cpu, 0); 160 #endif 161 qemu_log_unlock(); 162 } 163 #endif /* DEBUG_DISAS */ 164 165 cpu->can_do_io = !use_icount; 166 ret = tcg_qemu_tb_exec(env, tb_ptr); 167 cpu->can_do_io = 1; 168 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); 169 tb_exit = ret & TB_EXIT_MASK; 170 trace_exec_tb_exit(last_tb, tb_exit); 171 172 if (tb_exit > TB_EXIT_IDX1) { 173 /* We didn't start executing this TB (eg because the instruction 174 * counter hit zero); we must restore the guest PC to the address 175 * of the start of the TB. 176 */ 177 CPUClass *cc = CPU_GET_CLASS(cpu); 178 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, 179 "Stopped execution of TB chain before %p [" 180 TARGET_FMT_lx "] %s\n", 181 last_tb->tc_ptr, last_tb->pc, 182 lookup_symbol(last_tb->pc)); 183 if (cc->synchronize_from_tb) { 184 cc->synchronize_from_tb(cpu, last_tb); 185 } else { 186 assert(cc->set_pc); 187 cc->set_pc(cpu, last_tb->pc); 188 } 189 } 190 return ret; 191 } 192 193 #ifndef CONFIG_USER_ONLY 194 /* Execute the code without caching the generated code. An interpreter 195 could be used if available. */ 196 static void cpu_exec_nocache(CPUState *cpu, int max_cycles, 197 TranslationBlock *orig_tb, bool ignore_icount) 198 { 199 TranslationBlock *tb; 200 201 /* Should never happen. 202 We only end up here when an existing TB is too long. */ 203 if (max_cycles > CF_COUNT_MASK) 204 max_cycles = CF_COUNT_MASK; 205 206 tb_lock(); 207 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, 208 max_cycles | CF_NOCACHE 209 | (ignore_icount ? CF_IGNORE_ICOUNT : 0)); 210 tb->orig_tb = orig_tb; 211 tb_unlock(); 212 213 /* execute the generated code */ 214 trace_exec_tb_nocache(tb, tb->pc); 215 cpu_tb_exec(cpu, tb); 216 217 tb_lock(); 218 tb_phys_invalidate(tb, -1); 219 tb_free(tb); 220 tb_unlock(); 221 } 222 #endif 223 224 static void cpu_exec_step(CPUState *cpu) 225 { 226 CPUClass *cc = CPU_GET_CLASS(cpu); 227 CPUArchState *env = (CPUArchState *)cpu->env_ptr; 228 TranslationBlock *tb; 229 target_ulong cs_base, pc; 230 uint32_t flags; 231 232 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 233 if (sigsetjmp(cpu->jmp_env, 0) == 0) { 234 mmap_lock(); 235 tb_lock(); 236 tb = tb_gen_code(cpu, pc, cs_base, flags, 237 1 | CF_NOCACHE | CF_IGNORE_ICOUNT); 238 tb->orig_tb = NULL; 239 tb_unlock(); 240 mmap_unlock(); 241 242 cc->cpu_exec_enter(cpu); 243 /* execute the generated code */ 244 trace_exec_tb_nocache(tb, pc); 245 cpu_tb_exec(cpu, tb); 246 cc->cpu_exec_exit(cpu); 247 248 tb_lock(); 249 tb_phys_invalidate(tb, -1); 250 tb_free(tb); 251 tb_unlock(); 252 } else { 253 /* We may have exited due to another problem here, so we need 254 * to reset any tb_locks we may have taken but didn't release. 255 * The mmap_lock is dropped by tb_gen_code if it runs out of 256 * memory. 257 */ 258 #ifndef CONFIG_SOFTMMU 259 tcg_debug_assert(!have_mmap_lock()); 260 #endif 261 tb_lock_reset(); 262 } 263 } 264 265 void cpu_exec_step_atomic(CPUState *cpu) 266 { 267 start_exclusive(); 268 269 /* Since we got here, we know that parallel_cpus must be true. */ 270 parallel_cpus = false; 271 cpu_exec_step(cpu); 272 parallel_cpus = true; 273 274 end_exclusive(); 275 } 276 277 struct tb_desc { 278 target_ulong pc; 279 target_ulong cs_base; 280 CPUArchState *env; 281 tb_page_addr_t phys_page1; 282 uint32_t flags; 283 }; 284 285 static bool tb_cmp(const void *p, const void *d) 286 { 287 const TranslationBlock *tb = p; 288 const struct tb_desc *desc = d; 289 290 if (tb->pc == desc->pc && 291 tb->page_addr[0] == desc->phys_page1 && 292 tb->cs_base == desc->cs_base && 293 tb->flags == desc->flags && 294 !atomic_read(&tb->invalid)) { 295 /* check next page if needed */ 296 if (tb->page_addr[1] == -1) { 297 return true; 298 } else { 299 tb_page_addr_t phys_page2; 300 target_ulong virt_page2; 301 302 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 303 phys_page2 = get_page_addr_code(desc->env, virt_page2); 304 if (tb->page_addr[1] == phys_page2) { 305 return true; 306 } 307 } 308 } 309 return false; 310 } 311 312 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, 313 target_ulong cs_base, uint32_t flags) 314 { 315 tb_page_addr_t phys_pc; 316 struct tb_desc desc; 317 uint32_t h; 318 319 desc.env = (CPUArchState *)cpu->env_ptr; 320 desc.cs_base = cs_base; 321 desc.flags = flags; 322 desc.pc = pc; 323 phys_pc = get_page_addr_code(desc.env, pc); 324 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; 325 h = tb_hash_func(phys_pc, pc, flags); 326 return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h); 327 } 328 329 static inline TranslationBlock *tb_find(CPUState *cpu, 330 TranslationBlock *last_tb, 331 int tb_exit) 332 { 333 CPUArchState *env = (CPUArchState *)cpu->env_ptr; 334 TranslationBlock *tb; 335 target_ulong cs_base, pc; 336 uint32_t flags; 337 bool have_tb_lock = false; 338 339 /* we record a subset of the CPU state. It will 340 always be the same before a given translated block 341 is executed. */ 342 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 343 tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); 344 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || 345 tb->flags != flags)) { 346 tb = tb_htable_lookup(cpu, pc, cs_base, flags); 347 if (!tb) { 348 349 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be 350 * taken outside tb_lock. As system emulation is currently 351 * single threaded the locks are NOPs. 352 */ 353 mmap_lock(); 354 tb_lock(); 355 have_tb_lock = true; 356 357 /* There's a chance that our desired tb has been translated while 358 * taking the locks so we check again inside the lock. 359 */ 360 tb = tb_htable_lookup(cpu, pc, cs_base, flags); 361 if (!tb) { 362 /* if no translated code available, then translate it now */ 363 tb = tb_gen_code(cpu, pc, cs_base, flags, 0); 364 } 365 366 mmap_unlock(); 367 } 368 369 /* We add the TB in the virtual pc hash table for the fast lookup */ 370 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); 371 } 372 #ifndef CONFIG_USER_ONLY 373 /* We don't take care of direct jumps when address mapping changes in 374 * system emulation. So it's not safe to make a direct jump to a TB 375 * spanning two pages because the mapping for the second page can change. 376 */ 377 if (tb->page_addr[1] != -1) { 378 last_tb = NULL; 379 } 380 #endif 381 /* See if we can patch the calling TB. */ 382 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { 383 if (!have_tb_lock) { 384 tb_lock(); 385 have_tb_lock = true; 386 } 387 if (!tb->invalid) { 388 tb_add_jump(last_tb, tb_exit, tb); 389 } 390 } 391 if (have_tb_lock) { 392 tb_unlock(); 393 } 394 return tb; 395 } 396 397 static inline bool cpu_handle_halt(CPUState *cpu) 398 { 399 if (cpu->halted) { 400 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) 401 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) 402 && replay_interrupt()) { 403 X86CPU *x86_cpu = X86_CPU(cpu); 404 qemu_mutex_lock_iothread(); 405 apic_poll_irq(x86_cpu->apic_state); 406 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); 407 qemu_mutex_unlock_iothread(); 408 } 409 #endif 410 if (!cpu_has_work(cpu)) { 411 return true; 412 } 413 414 cpu->halted = 0; 415 } 416 417 return false; 418 } 419 420 static inline void cpu_handle_debug_exception(CPUState *cpu) 421 { 422 CPUClass *cc = CPU_GET_CLASS(cpu); 423 CPUWatchpoint *wp; 424 425 if (!cpu->watchpoint_hit) { 426 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 427 wp->flags &= ~BP_WATCHPOINT_HIT; 428 } 429 } 430 431 cc->debug_excp_handler(cpu); 432 } 433 434 static inline bool cpu_handle_exception(CPUState *cpu, int *ret) 435 { 436 if (cpu->exception_index >= 0) { 437 if (cpu->exception_index >= EXCP_INTERRUPT) { 438 /* exit request from the cpu execution loop */ 439 *ret = cpu->exception_index; 440 if (*ret == EXCP_DEBUG) { 441 cpu_handle_debug_exception(cpu); 442 } 443 cpu->exception_index = -1; 444 return true; 445 } else { 446 #if defined(CONFIG_USER_ONLY) 447 /* if user mode only, we simulate a fake exception 448 which will be handled outside the cpu execution 449 loop */ 450 #if defined(TARGET_I386) 451 CPUClass *cc = CPU_GET_CLASS(cpu); 452 cc->do_interrupt(cpu); 453 #endif 454 *ret = cpu->exception_index; 455 cpu->exception_index = -1; 456 return true; 457 #else 458 if (replay_exception()) { 459 CPUClass *cc = CPU_GET_CLASS(cpu); 460 qemu_mutex_lock_iothread(); 461 cc->do_interrupt(cpu); 462 qemu_mutex_unlock_iothread(); 463 cpu->exception_index = -1; 464 } else if (!replay_has_interrupt()) { 465 /* give a chance to iothread in replay mode */ 466 *ret = EXCP_INTERRUPT; 467 return true; 468 } 469 #endif 470 } 471 #ifndef CONFIG_USER_ONLY 472 } else if (replay_has_exception() 473 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) { 474 /* try to cause an exception pending in the log */ 475 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true); 476 *ret = -1; 477 return true; 478 #endif 479 } 480 481 return false; 482 } 483 484 static inline bool cpu_handle_interrupt(CPUState *cpu, 485 TranslationBlock **last_tb) 486 { 487 CPUClass *cc = CPU_GET_CLASS(cpu); 488 489 if (unlikely(atomic_read(&cpu->interrupt_request))) { 490 int interrupt_request; 491 qemu_mutex_lock_iothread(); 492 interrupt_request = cpu->interrupt_request; 493 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { 494 /* Mask out external interrupts for this step. */ 495 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; 496 } 497 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 498 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; 499 cpu->exception_index = EXCP_DEBUG; 500 qemu_mutex_unlock_iothread(); 501 return true; 502 } 503 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { 504 /* Do nothing */ 505 } else if (interrupt_request & CPU_INTERRUPT_HALT) { 506 replay_interrupt(); 507 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; 508 cpu->halted = 1; 509 cpu->exception_index = EXCP_HLT; 510 qemu_mutex_unlock_iothread(); 511 return true; 512 } 513 #if defined(TARGET_I386) 514 else if (interrupt_request & CPU_INTERRUPT_INIT) { 515 X86CPU *x86_cpu = X86_CPU(cpu); 516 CPUArchState *env = &x86_cpu->env; 517 replay_interrupt(); 518 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); 519 do_cpu_init(x86_cpu); 520 cpu->exception_index = EXCP_HALTED; 521 qemu_mutex_unlock_iothread(); 522 return true; 523 } 524 #else 525 else if (interrupt_request & CPU_INTERRUPT_RESET) { 526 replay_interrupt(); 527 cpu_reset(cpu); 528 qemu_mutex_unlock_iothread(); 529 return true; 530 } 531 #endif 532 /* The target hook has 3 exit conditions: 533 False when the interrupt isn't processed, 534 True when it is, and we should restart on a new TB, 535 and via longjmp via cpu_loop_exit. */ 536 else { 537 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { 538 replay_interrupt(); 539 *last_tb = NULL; 540 } 541 /* The target hook may have updated the 'cpu->interrupt_request'; 542 * reload the 'interrupt_request' value */ 543 interrupt_request = cpu->interrupt_request; 544 } 545 if (interrupt_request & CPU_INTERRUPT_EXITTB) { 546 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; 547 /* ensure that no TB jump will be modified as 548 the program flow was changed */ 549 *last_tb = NULL; 550 } 551 552 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ 553 qemu_mutex_unlock_iothread(); 554 } 555 556 /* Finally, check if we need to exit to the main loop. */ 557 if (unlikely(atomic_read(&cpu->exit_request) 558 || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) { 559 atomic_set(&cpu->exit_request, 0); 560 cpu->exception_index = EXCP_INTERRUPT; 561 return true; 562 } 563 564 return false; 565 } 566 567 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, 568 TranslationBlock **last_tb, int *tb_exit) 569 { 570 uintptr_t ret; 571 int32_t insns_left; 572 573 trace_exec_tb(tb, tb->pc); 574 ret = cpu_tb_exec(cpu, tb); 575 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); 576 *tb_exit = ret & TB_EXIT_MASK; 577 if (*tb_exit != TB_EXIT_REQUESTED) { 578 *last_tb = tb; 579 return; 580 } 581 582 *last_tb = NULL; 583 insns_left = atomic_read(&cpu->icount_decr.u32); 584 atomic_set(&cpu->icount_decr.u16.high, 0); 585 if (insns_left < 0) { 586 /* Something asked us to stop executing chained TBs; just 587 * continue round the main loop. Whatever requested the exit 588 * will also have set something else (eg exit_request or 589 * interrupt_request) which we will handle next time around 590 * the loop. But we need to ensure the zeroing of icount_decr 591 * comes before the next read of cpu->exit_request 592 * or cpu->interrupt_request. 593 */ 594 smp_mb(); 595 return; 596 } 597 598 /* Instruction counter expired. */ 599 assert(use_icount); 600 #ifndef CONFIG_USER_ONLY 601 /* Ensure global icount has gone forward */ 602 cpu_update_icount(cpu); 603 /* Refill decrementer and continue execution. */ 604 insns_left = MIN(0xffff, cpu->icount_budget); 605 cpu->icount_decr.u16.low = insns_left; 606 cpu->icount_extra = cpu->icount_budget - insns_left; 607 if (!cpu->icount_extra) { 608 /* Execute any remaining instructions, then let the main loop 609 * handle the next event. 610 */ 611 if (insns_left > 0) { 612 cpu_exec_nocache(cpu, insns_left, tb, false); 613 } 614 } 615 #endif 616 } 617 618 /* main execution loop */ 619 620 int cpu_exec(CPUState *cpu) 621 { 622 CPUClass *cc = CPU_GET_CLASS(cpu); 623 int ret; 624 SyncClocks sc = { 0 }; 625 626 /* replay_interrupt may need current_cpu */ 627 current_cpu = cpu; 628 629 if (cpu_handle_halt(cpu)) { 630 return EXCP_HALTED; 631 } 632 633 rcu_read_lock(); 634 635 cc->cpu_exec_enter(cpu); 636 637 /* Calculate difference between guest clock and host clock. 638 * This delay includes the delay of the last cycle, so 639 * what we have to do is sleep until it is 0. As for the 640 * advance/delay we gain here, we try to fix it next time. 641 */ 642 init_delay_params(&sc, cpu); 643 644 /* prepare setjmp context for exception handling */ 645 if (sigsetjmp(cpu->jmp_env, 0) != 0) { 646 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) 647 /* Some compilers wrongly smash all local variables after 648 * siglongjmp. There were bug reports for gcc 4.5.0 and clang. 649 * Reload essential local variables here for those compilers. 650 * Newer versions of gcc would complain about this code (-Wclobbered). */ 651 cpu = current_cpu; 652 cc = CPU_GET_CLASS(cpu); 653 #else /* buggy compiler */ 654 /* Assert that the compiler does not smash local variables. */ 655 g_assert(cpu == current_cpu); 656 g_assert(cc == CPU_GET_CLASS(cpu)); 657 #endif /* buggy compiler */ 658 cpu->can_do_io = 1; 659 tb_lock_reset(); 660 if (qemu_mutex_iothread_locked()) { 661 qemu_mutex_unlock_iothread(); 662 } 663 } 664 665 /* if an exception is pending, we execute it here */ 666 while (!cpu_handle_exception(cpu, &ret)) { 667 TranslationBlock *last_tb = NULL; 668 int tb_exit = 0; 669 670 while (!cpu_handle_interrupt(cpu, &last_tb)) { 671 TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit); 672 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); 673 /* Try to align the host and virtual clocks 674 if the guest is in advance */ 675 align_clocks(&sc, cpu); 676 } 677 } 678 679 cc->cpu_exec_exit(cpu); 680 rcu_read_unlock(); 681 682 return ret; 683 } 684