1 /* 2 * emulator main execution loop 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu-common.h" 22 #include "cpu.h" 23 #include "trace.h" 24 #include "disas/disas.h" 25 #include "exec/exec-all.h" 26 #include "tcg/tcg.h" 27 #include "qemu/atomic.h" 28 #include "sysemu/qtest.h" 29 #include "qemu/timer.h" 30 #include "qemu/rcu.h" 31 #include "exec/tb-hash.h" 32 #include "exec/tb-lookup.h" 33 #include "exec/log.h" 34 #include "qemu/main-loop.h" 35 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) 36 #include "hw/i386/apic.h" 37 #endif 38 #include "sysemu/cpus.h" 39 #include "sysemu/replay.h" 40 41 /* -icount align implementation. */ 42 43 typedef struct SyncClocks { 44 int64_t diff_clk; 45 int64_t last_cpu_icount; 46 int64_t realtime_clock; 47 } SyncClocks; 48 49 #if !defined(CONFIG_USER_ONLY) 50 /* Allow the guest to have a max 3ms advance. 51 * The difference between the 2 clocks could therefore 52 * oscillate around 0. 53 */ 54 #define VM_CLOCK_ADVANCE 3000000 55 #define THRESHOLD_REDUCE 1.5 56 #define MAX_DELAY_PRINT_RATE 2000000000LL 57 #define MAX_NB_PRINTS 100 58 59 static void align_clocks(SyncClocks *sc, CPUState *cpu) 60 { 61 int64_t cpu_icount; 62 63 if (!icount_align_option) { 64 return; 65 } 66 67 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; 68 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); 69 sc->last_cpu_icount = cpu_icount; 70 71 if (sc->diff_clk > VM_CLOCK_ADVANCE) { 72 #ifndef _WIN32 73 struct timespec sleep_delay, rem_delay; 74 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; 75 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; 76 if (nanosleep(&sleep_delay, &rem_delay) < 0) { 77 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; 78 } else { 79 sc->diff_clk = 0; 80 } 81 #else 82 Sleep(sc->diff_clk / SCALE_MS); 83 sc->diff_clk = 0; 84 #endif 85 } 86 } 87 88 static void print_delay(const SyncClocks *sc) 89 { 90 static float threshold_delay; 91 static int64_t last_realtime_clock; 92 static int nb_prints; 93 94 if (icount_align_option && 95 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && 96 nb_prints < MAX_NB_PRINTS) { 97 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || 98 (-sc->diff_clk / (float)1000000000LL < 99 (threshold_delay - THRESHOLD_REDUCE))) { 100 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; 101 printf("Warning: The guest is now late by %.1f to %.1f seconds\n", 102 threshold_delay - 1, 103 threshold_delay); 104 nb_prints++; 105 last_realtime_clock = sc->realtime_clock; 106 } 107 } 108 } 109 110 static void init_delay_params(SyncClocks *sc, CPUState *cpu) 111 { 112 if (!icount_align_option) { 113 return; 114 } 115 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); 116 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; 117 sc->last_cpu_icount 118 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; 119 if (sc->diff_clk < max_delay) { 120 max_delay = sc->diff_clk; 121 } 122 if (sc->diff_clk > max_advance) { 123 max_advance = sc->diff_clk; 124 } 125 126 /* Print every 2s max if the guest is late. We limit the number 127 of printed messages to NB_PRINT_MAX(currently 100) */ 128 print_delay(sc); 129 } 130 #else 131 static void align_clocks(SyncClocks *sc, const CPUState *cpu) 132 { 133 } 134 135 static void init_delay_params(SyncClocks *sc, const CPUState *cpu) 136 { 137 } 138 #endif /* CONFIG USER ONLY */ 139 140 /* Execute a TB, and fix up the CPU state afterwards if necessary */ 141 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) 142 { 143 CPUArchState *env = cpu->env_ptr; 144 uintptr_t ret; 145 TranslationBlock *last_tb; 146 int tb_exit; 147 uint8_t *tb_ptr = itb->tc.ptr; 148 149 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, 150 "Trace %d: %p [" 151 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n", 152 cpu->cpu_index, itb->tc.ptr, 153 itb->cs_base, itb->pc, itb->flags, 154 lookup_symbol(itb->pc)); 155 156 #if defined(DEBUG_DISAS) 157 if (qemu_loglevel_mask(CPU_LOG_TB_CPU) 158 && qemu_log_in_addr_range(itb->pc)) { 159 FILE *logfile = qemu_log_lock(); 160 int flags = 0; 161 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { 162 flags |= CPU_DUMP_FPU; 163 } 164 #if defined(TARGET_I386) 165 flags |= CPU_DUMP_CCOP; 166 #endif 167 log_cpu_state(cpu, flags); 168 qemu_log_unlock(logfile); 169 } 170 #endif /* DEBUG_DISAS */ 171 172 ret = tcg_qemu_tb_exec(env, tb_ptr); 173 cpu->can_do_io = 1; 174 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); 175 tb_exit = ret & TB_EXIT_MASK; 176 trace_exec_tb_exit(last_tb, tb_exit); 177 178 if (tb_exit > TB_EXIT_IDX1) { 179 /* We didn't start executing this TB (eg because the instruction 180 * counter hit zero); we must restore the guest PC to the address 181 * of the start of the TB. 182 */ 183 CPUClass *cc = CPU_GET_CLASS(cpu); 184 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, 185 "Stopped execution of TB chain before %p [" 186 TARGET_FMT_lx "] %s\n", 187 last_tb->tc.ptr, last_tb->pc, 188 lookup_symbol(last_tb->pc)); 189 if (cc->synchronize_from_tb) { 190 cc->synchronize_from_tb(cpu, last_tb); 191 } else { 192 assert(cc->set_pc); 193 cc->set_pc(cpu, last_tb->pc); 194 } 195 } 196 return ret; 197 } 198 199 #ifndef CONFIG_USER_ONLY 200 /* Execute the code without caching the generated code. An interpreter 201 could be used if available. */ 202 static void cpu_exec_nocache(CPUState *cpu, int max_cycles, 203 TranslationBlock *orig_tb, bool ignore_icount) 204 { 205 TranslationBlock *tb; 206 uint32_t cflags = curr_cflags() | CF_NOCACHE; 207 208 if (ignore_icount) { 209 cflags &= ~CF_USE_ICOUNT; 210 } 211 212 /* Should never happen. 213 We only end up here when an existing TB is too long. */ 214 cflags |= MIN(max_cycles, CF_COUNT_MASK); 215 216 mmap_lock(); 217 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, 218 orig_tb->flags, cflags); 219 tb->orig_tb = orig_tb; 220 mmap_unlock(); 221 222 /* execute the generated code */ 223 trace_exec_tb_nocache(tb, tb->pc); 224 cpu_tb_exec(cpu, tb); 225 226 mmap_lock(); 227 tb_phys_invalidate(tb, -1); 228 mmap_unlock(); 229 tcg_tb_remove(tb); 230 } 231 #endif 232 233 void cpu_exec_step_atomic(CPUState *cpu) 234 { 235 CPUClass *cc = CPU_GET_CLASS(cpu); 236 TranslationBlock *tb; 237 target_ulong cs_base, pc; 238 uint32_t flags; 239 uint32_t cflags = 1; 240 uint32_t cf_mask = cflags & CF_HASH_MASK; 241 242 if (sigsetjmp(cpu->jmp_env, 0) == 0) { 243 start_exclusive(); 244 245 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); 246 if (tb == NULL) { 247 mmap_lock(); 248 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); 249 mmap_unlock(); 250 } 251 252 /* Since we got here, we know that parallel_cpus must be true. */ 253 parallel_cpus = false; 254 cc->cpu_exec_enter(cpu); 255 /* execute the generated code */ 256 trace_exec_tb(tb, pc); 257 cpu_tb_exec(cpu, tb); 258 cc->cpu_exec_exit(cpu); 259 } else { 260 /* 261 * The mmap_lock is dropped by tb_gen_code if it runs out of 262 * memory. 263 */ 264 #ifndef CONFIG_SOFTMMU 265 tcg_debug_assert(!have_mmap_lock()); 266 #endif 267 if (qemu_mutex_iothread_locked()) { 268 qemu_mutex_unlock_iothread(); 269 } 270 assert_no_pages_locked(); 271 qemu_plugin_disable_mem_helpers(cpu); 272 } 273 274 275 /* 276 * As we start the exclusive region before codegen we must still 277 * be in the region if we longjump out of either the codegen or 278 * the execution. 279 */ 280 g_assert(cpu_in_exclusive_context(cpu)); 281 parallel_cpus = true; 282 end_exclusive(); 283 } 284 285 struct tb_desc { 286 target_ulong pc; 287 target_ulong cs_base; 288 CPUArchState *env; 289 tb_page_addr_t phys_page1; 290 uint32_t flags; 291 uint32_t cf_mask; 292 uint32_t trace_vcpu_dstate; 293 }; 294 295 static bool tb_lookup_cmp(const void *p, const void *d) 296 { 297 const TranslationBlock *tb = p; 298 const struct tb_desc *desc = d; 299 300 if (tb->pc == desc->pc && 301 tb->page_addr[0] == desc->phys_page1 && 302 tb->cs_base == desc->cs_base && 303 tb->flags == desc->flags && 304 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && 305 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) { 306 /* check next page if needed */ 307 if (tb->page_addr[1] == -1) { 308 return true; 309 } else { 310 tb_page_addr_t phys_page2; 311 target_ulong virt_page2; 312 313 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 314 phys_page2 = get_page_addr_code(desc->env, virt_page2); 315 if (tb->page_addr[1] == phys_page2) { 316 return true; 317 } 318 } 319 } 320 return false; 321 } 322 323 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, 324 target_ulong cs_base, uint32_t flags, 325 uint32_t cf_mask) 326 { 327 tb_page_addr_t phys_pc; 328 struct tb_desc desc; 329 uint32_t h; 330 331 desc.env = (CPUArchState *)cpu->env_ptr; 332 desc.cs_base = cs_base; 333 desc.flags = flags; 334 desc.cf_mask = cf_mask; 335 desc.trace_vcpu_dstate = *cpu->trace_dstate; 336 desc.pc = pc; 337 phys_pc = get_page_addr_code(desc.env, pc); 338 if (phys_pc == -1) { 339 return NULL; 340 } 341 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; 342 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate); 343 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); 344 } 345 346 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) 347 { 348 if (TCG_TARGET_HAS_direct_jump) { 349 uintptr_t offset = tb->jmp_target_arg[n]; 350 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; 351 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr); 352 } else { 353 tb->jmp_target_arg[n] = addr; 354 } 355 } 356 357 static inline void tb_add_jump(TranslationBlock *tb, int n, 358 TranslationBlock *tb_next) 359 { 360 uintptr_t old; 361 362 assert(n < ARRAY_SIZE(tb->jmp_list_next)); 363 qemu_spin_lock(&tb_next->jmp_lock); 364 365 /* make sure the destination TB is valid */ 366 if (tb_next->cflags & CF_INVALID) { 367 goto out_unlock_next; 368 } 369 /* Atomically claim the jump destination slot only if it was NULL */ 370 old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next); 371 if (old) { 372 goto out_unlock_next; 373 } 374 375 /* patch the native jump address */ 376 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); 377 378 /* add in TB jmp list */ 379 tb->jmp_list_next[n] = tb_next->jmp_list_head; 380 tb_next->jmp_list_head = (uintptr_t)tb | n; 381 382 qemu_spin_unlock(&tb_next->jmp_lock); 383 384 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, 385 "Linking TBs %p [" TARGET_FMT_lx 386 "] index %d -> %p [" TARGET_FMT_lx "]\n", 387 tb->tc.ptr, tb->pc, n, 388 tb_next->tc.ptr, tb_next->pc); 389 return; 390 391 out_unlock_next: 392 qemu_spin_unlock(&tb_next->jmp_lock); 393 return; 394 } 395 396 static inline TranslationBlock *tb_find(CPUState *cpu, 397 TranslationBlock *last_tb, 398 int tb_exit, uint32_t cf_mask) 399 { 400 TranslationBlock *tb; 401 target_ulong cs_base, pc; 402 uint32_t flags; 403 404 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); 405 if (tb == NULL) { 406 mmap_lock(); 407 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); 408 mmap_unlock(); 409 /* We add the TB in the virtual pc hash table for the fast lookup */ 410 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); 411 } 412 #ifndef CONFIG_USER_ONLY 413 /* We don't take care of direct jumps when address mapping changes in 414 * system emulation. So it's not safe to make a direct jump to a TB 415 * spanning two pages because the mapping for the second page can change. 416 */ 417 if (tb->page_addr[1] != -1) { 418 last_tb = NULL; 419 } 420 #endif 421 /* See if we can patch the calling TB. */ 422 if (last_tb) { 423 tb_add_jump(last_tb, tb_exit, tb); 424 } 425 return tb; 426 } 427 428 static inline bool cpu_handle_halt(CPUState *cpu) 429 { 430 if (cpu->halted) { 431 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) 432 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) 433 && replay_interrupt()) { 434 X86CPU *x86_cpu = X86_CPU(cpu); 435 qemu_mutex_lock_iothread(); 436 apic_poll_irq(x86_cpu->apic_state); 437 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); 438 qemu_mutex_unlock_iothread(); 439 } 440 #endif 441 if (!cpu_has_work(cpu)) { 442 return true; 443 } 444 445 cpu->halted = 0; 446 } 447 448 return false; 449 } 450 451 static inline void cpu_handle_debug_exception(CPUState *cpu) 452 { 453 CPUClass *cc = CPU_GET_CLASS(cpu); 454 CPUWatchpoint *wp; 455 456 if (!cpu->watchpoint_hit) { 457 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 458 wp->flags &= ~BP_WATCHPOINT_HIT; 459 } 460 } 461 462 cc->debug_excp_handler(cpu); 463 } 464 465 static inline bool cpu_handle_exception(CPUState *cpu, int *ret) 466 { 467 if (cpu->exception_index < 0) { 468 #ifndef CONFIG_USER_ONLY 469 if (replay_has_exception() 470 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { 471 /* try to cause an exception pending in the log */ 472 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true); 473 } 474 #endif 475 if (cpu->exception_index < 0) { 476 return false; 477 } 478 } 479 480 if (cpu->exception_index >= EXCP_INTERRUPT) { 481 /* exit request from the cpu execution loop */ 482 *ret = cpu->exception_index; 483 if (*ret == EXCP_DEBUG) { 484 cpu_handle_debug_exception(cpu); 485 } 486 cpu->exception_index = -1; 487 return true; 488 } else { 489 #if defined(CONFIG_USER_ONLY) 490 /* if user mode only, we simulate a fake exception 491 which will be handled outside the cpu execution 492 loop */ 493 #if defined(TARGET_I386) 494 CPUClass *cc = CPU_GET_CLASS(cpu); 495 cc->do_interrupt(cpu); 496 #endif 497 *ret = cpu->exception_index; 498 cpu->exception_index = -1; 499 return true; 500 #else 501 if (replay_exception()) { 502 CPUClass *cc = CPU_GET_CLASS(cpu); 503 qemu_mutex_lock_iothread(); 504 cc->do_interrupt(cpu); 505 qemu_mutex_unlock_iothread(); 506 cpu->exception_index = -1; 507 508 if (unlikely(cpu->singlestep_enabled)) { 509 /* 510 * After processing the exception, ensure an EXCP_DEBUG is 511 * raised when single-stepping so that GDB doesn't miss the 512 * next instruction. 513 */ 514 *ret = EXCP_DEBUG; 515 cpu_handle_debug_exception(cpu); 516 return true; 517 } 518 } else if (!replay_has_interrupt()) { 519 /* give a chance to iothread in replay mode */ 520 *ret = EXCP_INTERRUPT; 521 return true; 522 } 523 #endif 524 } 525 526 return false; 527 } 528 529 static inline bool cpu_handle_interrupt(CPUState *cpu, 530 TranslationBlock **last_tb) 531 { 532 CPUClass *cc = CPU_GET_CLASS(cpu); 533 534 /* Clear the interrupt flag now since we're processing 535 * cpu->interrupt_request and cpu->exit_request. 536 * Ensure zeroing happens before reading cpu->exit_request or 537 * cpu->interrupt_request (see also smp_wmb in cpu_exit()) 538 */ 539 atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); 540 541 if (unlikely(atomic_read(&cpu->interrupt_request))) { 542 int interrupt_request; 543 qemu_mutex_lock_iothread(); 544 interrupt_request = cpu->interrupt_request; 545 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { 546 /* Mask out external interrupts for this step. */ 547 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; 548 } 549 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 550 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; 551 cpu->exception_index = EXCP_DEBUG; 552 qemu_mutex_unlock_iothread(); 553 return true; 554 } 555 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { 556 /* Do nothing */ 557 } else if (interrupt_request & CPU_INTERRUPT_HALT) { 558 replay_interrupt(); 559 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; 560 cpu->halted = 1; 561 cpu->exception_index = EXCP_HLT; 562 qemu_mutex_unlock_iothread(); 563 return true; 564 } 565 #if defined(TARGET_I386) 566 else if (interrupt_request & CPU_INTERRUPT_INIT) { 567 X86CPU *x86_cpu = X86_CPU(cpu); 568 CPUArchState *env = &x86_cpu->env; 569 replay_interrupt(); 570 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); 571 do_cpu_init(x86_cpu); 572 cpu->exception_index = EXCP_HALTED; 573 qemu_mutex_unlock_iothread(); 574 return true; 575 } 576 #else 577 else if (interrupt_request & CPU_INTERRUPT_RESET) { 578 replay_interrupt(); 579 cpu_reset(cpu); 580 qemu_mutex_unlock_iothread(); 581 return true; 582 } 583 #endif 584 /* The target hook has 3 exit conditions: 585 False when the interrupt isn't processed, 586 True when it is, and we should restart on a new TB, 587 and via longjmp via cpu_loop_exit. */ 588 else { 589 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { 590 replay_interrupt(); 591 /* 592 * After processing the interrupt, ensure an EXCP_DEBUG is 593 * raised when single-stepping so that GDB doesn't miss the 594 * next instruction. 595 */ 596 cpu->exception_index = 597 (cpu->singlestep_enabled ? EXCP_DEBUG : -1); 598 *last_tb = NULL; 599 } 600 /* The target hook may have updated the 'cpu->interrupt_request'; 601 * reload the 'interrupt_request' value */ 602 interrupt_request = cpu->interrupt_request; 603 } 604 if (interrupt_request & CPU_INTERRUPT_EXITTB) { 605 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; 606 /* ensure that no TB jump will be modified as 607 the program flow was changed */ 608 *last_tb = NULL; 609 } 610 611 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ 612 qemu_mutex_unlock_iothread(); 613 } 614 615 /* Finally, check if we need to exit to the main loop. */ 616 if (unlikely(atomic_read(&cpu->exit_request)) 617 || (use_icount 618 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { 619 atomic_set(&cpu->exit_request, 0); 620 if (cpu->exception_index == -1) { 621 cpu->exception_index = EXCP_INTERRUPT; 622 } 623 return true; 624 } 625 626 return false; 627 } 628 629 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, 630 TranslationBlock **last_tb, int *tb_exit) 631 { 632 uintptr_t ret; 633 int32_t insns_left; 634 635 trace_exec_tb(tb, tb->pc); 636 ret = cpu_tb_exec(cpu, tb); 637 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); 638 *tb_exit = ret & TB_EXIT_MASK; 639 if (*tb_exit != TB_EXIT_REQUESTED) { 640 *last_tb = tb; 641 return; 642 } 643 644 *last_tb = NULL; 645 insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32); 646 if (insns_left < 0) { 647 /* Something asked us to stop executing chained TBs; just 648 * continue round the main loop. Whatever requested the exit 649 * will also have set something else (eg exit_request or 650 * interrupt_request) which will be handled by 651 * cpu_handle_interrupt. cpu_handle_interrupt will also 652 * clear cpu->icount_decr.u16.high. 653 */ 654 return; 655 } 656 657 /* Instruction counter expired. */ 658 assert(use_icount); 659 #ifndef CONFIG_USER_ONLY 660 /* Ensure global icount has gone forward */ 661 cpu_update_icount(cpu); 662 /* Refill decrementer and continue execution. */ 663 insns_left = MIN(0xffff, cpu->icount_budget); 664 cpu_neg(cpu)->icount_decr.u16.low = insns_left; 665 cpu->icount_extra = cpu->icount_budget - insns_left; 666 if (!cpu->icount_extra) { 667 /* Execute any remaining instructions, then let the main loop 668 * handle the next event. 669 */ 670 if (insns_left > 0) { 671 cpu_exec_nocache(cpu, insns_left, tb, false); 672 } 673 } 674 #endif 675 } 676 677 /* main execution loop */ 678 679 int cpu_exec(CPUState *cpu) 680 { 681 CPUClass *cc = CPU_GET_CLASS(cpu); 682 int ret; 683 SyncClocks sc = { 0 }; 684 685 /* replay_interrupt may need current_cpu */ 686 current_cpu = cpu; 687 688 if (cpu_handle_halt(cpu)) { 689 return EXCP_HALTED; 690 } 691 692 rcu_read_lock(); 693 694 cc->cpu_exec_enter(cpu); 695 696 /* Calculate difference between guest clock and host clock. 697 * This delay includes the delay of the last cycle, so 698 * what we have to do is sleep until it is 0. As for the 699 * advance/delay we gain here, we try to fix it next time. 700 */ 701 init_delay_params(&sc, cpu); 702 703 /* prepare setjmp context for exception handling */ 704 if (sigsetjmp(cpu->jmp_env, 0) != 0) { 705 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) 706 /* Some compilers wrongly smash all local variables after 707 * siglongjmp. There were bug reports for gcc 4.5.0 and clang. 708 * Reload essential local variables here for those compilers. 709 * Newer versions of gcc would complain about this code (-Wclobbered). */ 710 cpu = current_cpu; 711 cc = CPU_GET_CLASS(cpu); 712 #else /* buggy compiler */ 713 /* Assert that the compiler does not smash local variables. */ 714 g_assert(cpu == current_cpu); 715 g_assert(cc == CPU_GET_CLASS(cpu)); 716 #endif /* buggy compiler */ 717 #ifndef CONFIG_SOFTMMU 718 tcg_debug_assert(!have_mmap_lock()); 719 #endif 720 if (qemu_mutex_iothread_locked()) { 721 qemu_mutex_unlock_iothread(); 722 } 723 qemu_plugin_disable_mem_helpers(cpu); 724 725 assert_no_pages_locked(); 726 } 727 728 /* if an exception is pending, we execute it here */ 729 while (!cpu_handle_exception(cpu, &ret)) { 730 TranslationBlock *last_tb = NULL; 731 int tb_exit = 0; 732 733 while (!cpu_handle_interrupt(cpu, &last_tb)) { 734 uint32_t cflags = cpu->cflags_next_tb; 735 TranslationBlock *tb; 736 737 /* When requested, use an exact setting for cflags for the next 738 execution. This is used for icount, precise smc, and stop- 739 after-access watchpoints. Since this request should never 740 have CF_INVALID set, -1 is a convenient invalid value that 741 does not require tcg headers for cpu_common_reset. */ 742 if (cflags == -1) { 743 cflags = curr_cflags(); 744 } else { 745 cpu->cflags_next_tb = -1; 746 } 747 748 tb = tb_find(cpu, last_tb, tb_exit, cflags); 749 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); 750 /* Try to align the host and virtual clocks 751 if the guest is in advance */ 752 align_clocks(&sc, cpu); 753 } 754 } 755 756 cc->cpu_exec_exit(cpu); 757 rcu_read_unlock(); 758 759 return ret; 760 } 761