1 /* 2 * emulator main execution loop 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qapi/error.h" 23 #include "qapi/type-helpers.h" 24 #include "hw/core/tcg-cpu-ops.h" 25 #include "trace.h" 26 #include "disas/disas.h" 27 #include "exec/exec-all.h" 28 #include "tcg/tcg.h" 29 #include "qemu/atomic.h" 30 #include "qemu/rcu.h" 31 #include "exec/log.h" 32 #include "qemu/main-loop.h" 33 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) 34 #include "hw/i386/apic.h" 35 #endif 36 #include "sysemu/cpus.h" 37 #include "exec/cpu-all.h" 38 #include "sysemu/cpu-timers.h" 39 #include "exec/replay-core.h" 40 #include "sysemu/tcg.h" 41 #include "exec/helper-proto-common.h" 42 #include "tb-jmp-cache.h" 43 #include "tb-hash.h" 44 #include "tb-context.h" 45 #include "internal-common.h" 46 #include "internal-target.h" 47 48 /* -icount align implementation. */ 49 50 typedef struct SyncClocks { 51 int64_t diff_clk; 52 int64_t last_cpu_icount; 53 int64_t realtime_clock; 54 } SyncClocks; 55 56 #if !defined(CONFIG_USER_ONLY) 57 /* Allow the guest to have a max 3ms advance. 58 * The difference between the 2 clocks could therefore 59 * oscillate around 0. 60 */ 61 #define VM_CLOCK_ADVANCE 3000000 62 #define THRESHOLD_REDUCE 1.5 63 #define MAX_DELAY_PRINT_RATE 2000000000LL 64 #define MAX_NB_PRINTS 100 65 66 int64_t max_delay; 67 int64_t max_advance; 68 69 static void align_clocks(SyncClocks *sc, CPUState *cpu) 70 { 71 int64_t cpu_icount; 72 73 if (!icount_align_option) { 74 return; 75 } 76 77 cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low; 78 sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); 79 sc->last_cpu_icount = cpu_icount; 80 81 if (sc->diff_clk > VM_CLOCK_ADVANCE) { 82 #ifndef _WIN32 83 struct timespec sleep_delay, rem_delay; 84 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; 85 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; 86 if (nanosleep(&sleep_delay, &rem_delay) < 0) { 87 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; 88 } else { 89 sc->diff_clk = 0; 90 } 91 #else 92 Sleep(sc->diff_clk / SCALE_MS); 93 sc->diff_clk = 0; 94 #endif 95 } 96 } 97 98 static void print_delay(const SyncClocks *sc) 99 { 100 static float threshold_delay; 101 static int64_t last_realtime_clock; 102 static int nb_prints; 103 104 if (icount_align_option && 105 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && 106 nb_prints < MAX_NB_PRINTS) { 107 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || 108 (-sc->diff_clk / (float)1000000000LL < 109 (threshold_delay - THRESHOLD_REDUCE))) { 110 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; 111 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", 112 threshold_delay - 1, 113 threshold_delay); 114 nb_prints++; 115 last_realtime_clock = sc->realtime_clock; 116 } 117 } 118 } 119 120 static void init_delay_params(SyncClocks *sc, CPUState *cpu) 121 { 122 if (!icount_align_option) { 123 return; 124 } 125 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); 126 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; 127 sc->last_cpu_icount 128 = cpu->icount_extra + cpu->neg.icount_decr.u16.low; 129 if (sc->diff_clk < max_delay) { 130 max_delay = sc->diff_clk; 131 } 132 if (sc->diff_clk > max_advance) { 133 max_advance = sc->diff_clk; 134 } 135 136 /* Print every 2s max if the guest is late. We limit the number 137 of printed messages to NB_PRINT_MAX(currently 100) */ 138 print_delay(sc); 139 } 140 #else 141 static void align_clocks(SyncClocks *sc, const CPUState *cpu) 142 { 143 } 144 145 static void init_delay_params(SyncClocks *sc, const CPUState *cpu) 146 { 147 } 148 #endif /* CONFIG USER ONLY */ 149 150 uint32_t curr_cflags(CPUState *cpu) 151 { 152 uint32_t cflags = cpu->tcg_cflags; 153 154 /* 155 * Record gdb single-step. We should be exiting the TB by raising 156 * EXCP_DEBUG, but to simplify other tests, disable chaining too. 157 * 158 * For singlestep and -d nochain, suppress goto_tb so that 159 * we can log -d cpu,exec after every TB. 160 */ 161 if (unlikely(cpu->singlestep_enabled)) { 162 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; 163 } else if (qatomic_read(&one_insn_per_tb)) { 164 cflags |= CF_NO_GOTO_TB | 1; 165 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { 166 cflags |= CF_NO_GOTO_TB; 167 } 168 169 return cflags; 170 } 171 172 struct tb_desc { 173 vaddr pc; 174 uint64_t cs_base; 175 CPUArchState *env; 176 tb_page_addr_t page_addr0; 177 uint32_t flags; 178 uint32_t cflags; 179 }; 180 181 static bool tb_lookup_cmp(const void *p, const void *d) 182 { 183 const TranslationBlock *tb = p; 184 const struct tb_desc *desc = d; 185 186 if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) && 187 tb_page_addr0(tb) == desc->page_addr0 && 188 tb->cs_base == desc->cs_base && 189 tb->flags == desc->flags && 190 tb_cflags(tb) == desc->cflags) { 191 /* check next page if needed */ 192 tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); 193 if (tb_phys_page1 == -1) { 194 return true; 195 } else { 196 tb_page_addr_t phys_page1; 197 vaddr virt_page1; 198 199 /* 200 * We know that the first page matched, and an otherwise valid TB 201 * encountered an incomplete instruction at the end of that page, 202 * therefore we know that generating a new TB from the current PC 203 * must also require reading from the next page -- even if the 204 * second pages do not match, and therefore the resulting insn 205 * is different for the new TB. Therefore any exception raised 206 * here by the faulting lookup is not premature. 207 */ 208 virt_page1 = TARGET_PAGE_ALIGN(desc->pc); 209 phys_page1 = get_page_addr_code(desc->env, virt_page1); 210 if (tb_phys_page1 == phys_page1) { 211 return true; 212 } 213 } 214 } 215 return false; 216 } 217 218 static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc, 219 uint64_t cs_base, uint32_t flags, 220 uint32_t cflags) 221 { 222 tb_page_addr_t phys_pc; 223 struct tb_desc desc; 224 uint32_t h; 225 226 desc.env = cpu_env(cpu); 227 desc.cs_base = cs_base; 228 desc.flags = flags; 229 desc.cflags = cflags; 230 desc.pc = pc; 231 phys_pc = get_page_addr_code(desc.env, pc); 232 if (phys_pc == -1) { 233 return NULL; 234 } 235 desc.page_addr0 = phys_pc; 236 h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), 237 flags, cs_base, cflags); 238 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); 239 } 240 241 /* Might cause an exception, so have a longjmp destination ready */ 242 static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, 243 uint64_t cs_base, uint32_t flags, 244 uint32_t cflags) 245 { 246 TranslationBlock *tb; 247 CPUJumpCache *jc; 248 uint32_t hash; 249 250 /* we should never be trying to look up an INVALID tb */ 251 tcg_debug_assert(!(cflags & CF_INVALID)); 252 253 hash = tb_jmp_cache_hash_func(pc); 254 jc = cpu->tb_jmp_cache; 255 256 tb = qatomic_read(&jc->array[hash].tb); 257 if (likely(tb && 258 jc->array[hash].pc == pc && 259 tb->cs_base == cs_base && 260 tb->flags == flags && 261 tb_cflags(tb) == cflags)) { 262 goto hit; 263 } 264 265 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); 266 if (tb == NULL) { 267 return NULL; 268 } 269 270 jc->array[hash].pc = pc; 271 qatomic_set(&jc->array[hash].tb, tb); 272 273 hit: 274 /* 275 * As long as tb is not NULL, the contents are consistent. Therefore, 276 * the virtual PC has to match for non-CF_PCREL translations. 277 */ 278 assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc); 279 return tb; 280 } 281 282 static void log_cpu_exec(vaddr pc, CPUState *cpu, 283 const TranslationBlock *tb) 284 { 285 if (qemu_log_in_addr_range(pc)) { 286 qemu_log_mask(CPU_LOG_EXEC, 287 "Trace %d: %p [%08" PRIx64 288 "/%016" VADDR_PRIx "/%08x/%08x] %s\n", 289 cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, 290 tb->flags, tb->cflags, lookup_symbol(pc)); 291 292 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { 293 FILE *logfile = qemu_log_trylock(); 294 if (logfile) { 295 int flags = 0; 296 297 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { 298 flags |= CPU_DUMP_FPU; 299 } 300 #if defined(TARGET_I386) 301 flags |= CPU_DUMP_CCOP; 302 #endif 303 if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) { 304 flags |= CPU_DUMP_VPU; 305 } 306 cpu_dump_state(cpu, logfile, flags); 307 qemu_log_unlock(logfile); 308 } 309 } 310 } 311 } 312 313 static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc, 314 uint32_t *cflags) 315 { 316 CPUBreakpoint *bp; 317 bool match_page = false; 318 319 /* 320 * Singlestep overrides breakpoints. 321 * This requirement is visible in the record-replay tests, where 322 * we would fail to make forward progress in reverse-continue. 323 * 324 * TODO: gdb singlestep should only override gdb breakpoints, 325 * so that one could (gdb) singlestep into the guest kernel's 326 * architectural breakpoint handler. 327 */ 328 if (cpu->singlestep_enabled) { 329 return false; 330 } 331 332 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 333 /* 334 * If we have an exact pc match, trigger the breakpoint. 335 * Otherwise, note matches within the page. 336 */ 337 if (pc == bp->pc) { 338 bool match_bp = false; 339 340 if (bp->flags & BP_GDB) { 341 match_bp = true; 342 } else if (bp->flags & BP_CPU) { 343 #ifdef CONFIG_USER_ONLY 344 g_assert_not_reached(); 345 #else 346 CPUClass *cc = CPU_GET_CLASS(cpu); 347 assert(cc->tcg_ops->debug_check_breakpoint); 348 match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); 349 #endif 350 } 351 352 if (match_bp) { 353 cpu->exception_index = EXCP_DEBUG; 354 return true; 355 } 356 } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { 357 match_page = true; 358 } 359 } 360 361 /* 362 * Within the same page as a breakpoint, single-step, 363 * returning to helper_lookup_tb_ptr after each insn looking 364 * for the actual breakpoint. 365 * 366 * TODO: Perhaps better to record all of the TBs associated 367 * with a given virtual page that contains a breakpoint, and 368 * then invalidate them when a new overlapping breakpoint is 369 * set on the page. Non-overlapping TBs would not be 370 * invalidated, nor would any TB need to be invalidated as 371 * breakpoints are removed. 372 */ 373 if (match_page) { 374 *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; 375 } 376 return false; 377 } 378 379 static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc, 380 uint32_t *cflags) 381 { 382 return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && 383 check_for_breakpoints_slow(cpu, pc, cflags); 384 } 385 386 /** 387 * helper_lookup_tb_ptr: quick check for next tb 388 * @env: current cpu state 389 * 390 * Look for an existing TB matching the current cpu state. 391 * If found, return the code pointer. If not found, return 392 * the tcg epilogue so that we return into cpu_tb_exec. 393 */ 394 const void *HELPER(lookup_tb_ptr)(CPUArchState *env) 395 { 396 CPUState *cpu = env_cpu(env); 397 TranslationBlock *tb; 398 vaddr pc; 399 uint64_t cs_base; 400 uint32_t flags, cflags; 401 402 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 403 404 cflags = curr_cflags(cpu); 405 if (check_for_breakpoints(cpu, pc, &cflags)) { 406 cpu_loop_exit(cpu); 407 } 408 409 tb = tb_lookup(cpu, pc, cs_base, flags, cflags); 410 if (tb == NULL) { 411 return tcg_code_gen_epilogue; 412 } 413 414 if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { 415 log_cpu_exec(pc, cpu, tb); 416 } 417 418 return tb->tc.ptr; 419 } 420 421 /* Execute a TB, and fix up the CPU state afterwards if necessary */ 422 /* 423 * Disable CFI checks. 424 * TCG creates binary blobs at runtime, with the transformed code. 425 * A TB is a blob of binary code, created at runtime and called with an 426 * indirect function call. Since such function did not exist at compile time, 427 * the CFI runtime has no way to verify its signature and would fail. 428 * TCG is not considered a security-sensitive part of QEMU so this does not 429 * affect the impact of CFI in environment with high security requirements 430 */ 431 static inline TranslationBlock * QEMU_DISABLE_CFI 432 cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) 433 { 434 CPUArchState *env = cpu_env(cpu); 435 uintptr_t ret; 436 TranslationBlock *last_tb; 437 const void *tb_ptr = itb->tc.ptr; 438 439 if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { 440 log_cpu_exec(log_pc(cpu, itb), cpu, itb); 441 } 442 443 qemu_thread_jit_execute(); 444 ret = tcg_qemu_tb_exec(env, tb_ptr); 445 cpu->neg.can_do_io = true; 446 qemu_plugin_disable_mem_helpers(cpu); 447 /* 448 * TODO: Delay swapping back to the read-write region of the TB 449 * until we actually need to modify the TB. The read-only copy, 450 * coming from the rx region, shares the same host TLB entry as 451 * the code that executed the exit_tb opcode that arrived here. 452 * If we insist on touching both the RX and the RW pages, we 453 * double the host TLB pressure. 454 */ 455 last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); 456 *tb_exit = ret & TB_EXIT_MASK; 457 458 trace_exec_tb_exit(last_tb, *tb_exit); 459 460 if (*tb_exit > TB_EXIT_IDX1) { 461 /* We didn't start executing this TB (eg because the instruction 462 * counter hit zero); we must restore the guest PC to the address 463 * of the start of the TB. 464 */ 465 CPUClass *cc = CPU_GET_CLASS(cpu); 466 467 if (cc->tcg_ops->synchronize_from_tb) { 468 cc->tcg_ops->synchronize_from_tb(cpu, last_tb); 469 } else { 470 tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL)); 471 assert(cc->set_pc); 472 cc->set_pc(cpu, last_tb->pc); 473 } 474 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 475 vaddr pc = log_pc(cpu, last_tb); 476 if (qemu_log_in_addr_range(pc)) { 477 qemu_log("Stopped execution of TB chain before %p [%016" 478 VADDR_PRIx "] %s\n", 479 last_tb->tc.ptr, pc, lookup_symbol(pc)); 480 } 481 } 482 } 483 484 /* 485 * If gdb single-step, and we haven't raised another exception, 486 * raise a debug exception. Single-step with another exception 487 * is handled in cpu_handle_exception. 488 */ 489 if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { 490 cpu->exception_index = EXCP_DEBUG; 491 cpu_loop_exit(cpu); 492 } 493 494 return last_tb; 495 } 496 497 498 static void cpu_exec_enter(CPUState *cpu) 499 { 500 CPUClass *cc = CPU_GET_CLASS(cpu); 501 502 if (cc->tcg_ops->cpu_exec_enter) { 503 cc->tcg_ops->cpu_exec_enter(cpu); 504 } 505 } 506 507 static void cpu_exec_exit(CPUState *cpu) 508 { 509 CPUClass *cc = CPU_GET_CLASS(cpu); 510 511 if (cc->tcg_ops->cpu_exec_exit) { 512 cc->tcg_ops->cpu_exec_exit(cpu); 513 } 514 } 515 516 static void cpu_exec_longjmp_cleanup(CPUState *cpu) 517 { 518 /* Non-buggy compilers preserve this; assert the correct value. */ 519 g_assert(cpu == current_cpu); 520 521 #ifdef CONFIG_USER_ONLY 522 clear_helper_retaddr(); 523 if (have_mmap_lock()) { 524 mmap_unlock(); 525 } 526 #else 527 /* 528 * For softmmu, a tlb_fill fault during translation will land here, 529 * and we need to release any page locks held. In system mode we 530 * have one tcg_ctx per thread, so we know it was this cpu doing 531 * the translation. 532 * 533 * Alternative 1: Install a cleanup to be called via an exception 534 * handling safe longjmp. It seems plausible that all our hosts 535 * support such a thing. We'd have to properly register unwind info 536 * for the JIT for EH, rather that just for GDB. 537 * 538 * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to 539 * capture the cpu_loop_exit longjmp, perform the cleanup, and 540 * jump again to arrive here. 541 */ 542 if (tcg_ctx->gen_tb) { 543 tb_unlock_pages(tcg_ctx->gen_tb); 544 tcg_ctx->gen_tb = NULL; 545 } 546 #endif 547 if (bql_locked()) { 548 bql_unlock(); 549 } 550 assert_no_pages_locked(); 551 } 552 553 void cpu_exec_step_atomic(CPUState *cpu) 554 { 555 CPUArchState *env = cpu_env(cpu); 556 TranslationBlock *tb; 557 vaddr pc; 558 uint64_t cs_base; 559 uint32_t flags, cflags; 560 int tb_exit; 561 562 if (sigsetjmp(cpu->jmp_env, 0) == 0) { 563 start_exclusive(); 564 g_assert(cpu == current_cpu); 565 g_assert(!cpu->running); 566 cpu->running = true; 567 568 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 569 570 cflags = curr_cflags(cpu); 571 /* Execute in a serial context. */ 572 cflags &= ~CF_PARALLEL; 573 /* After 1 insn, return and release the exclusive lock. */ 574 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; 575 /* 576 * No need to check_for_breakpoints here. 577 * We only arrive in cpu_exec_step_atomic after beginning execution 578 * of an insn that includes an atomic operation we can't handle. 579 * Any breakpoint for this insn will have been recognized earlier. 580 */ 581 582 tb = tb_lookup(cpu, pc, cs_base, flags, cflags); 583 if (tb == NULL) { 584 mmap_lock(); 585 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); 586 mmap_unlock(); 587 } 588 589 cpu_exec_enter(cpu); 590 /* execute the generated code */ 591 trace_exec_tb(tb, pc); 592 cpu_tb_exec(cpu, tb, &tb_exit); 593 cpu_exec_exit(cpu); 594 } else { 595 cpu_exec_longjmp_cleanup(cpu); 596 } 597 598 /* 599 * As we start the exclusive region before codegen we must still 600 * be in the region if we longjump out of either the codegen or 601 * the execution. 602 */ 603 g_assert(cpu_in_exclusive_context(cpu)); 604 cpu->running = false; 605 end_exclusive(); 606 } 607 608 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) 609 { 610 /* 611 * Get the rx view of the structure, from which we find the 612 * executable code address, and tb_target_set_jmp_target can 613 * produce a pc-relative displacement to jmp_target_addr[n]. 614 */ 615 const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb); 616 uintptr_t offset = tb->jmp_insn_offset[n]; 617 uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset; 618 uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; 619 620 tb->jmp_target_addr[n] = addr; 621 tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw); 622 } 623 624 static inline void tb_add_jump(TranslationBlock *tb, int n, 625 TranslationBlock *tb_next) 626 { 627 uintptr_t old; 628 629 qemu_thread_jit_write(); 630 assert(n < ARRAY_SIZE(tb->jmp_list_next)); 631 qemu_spin_lock(&tb_next->jmp_lock); 632 633 /* make sure the destination TB is valid */ 634 if (tb_next->cflags & CF_INVALID) { 635 goto out_unlock_next; 636 } 637 /* Atomically claim the jump destination slot only if it was NULL */ 638 old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, 639 (uintptr_t)tb_next); 640 if (old) { 641 goto out_unlock_next; 642 } 643 644 /* patch the native jump address */ 645 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); 646 647 /* add in TB jmp list */ 648 tb->jmp_list_next[n] = tb_next->jmp_list_head; 649 tb_next->jmp_list_head = (uintptr_t)tb | n; 650 651 qemu_spin_unlock(&tb_next->jmp_lock); 652 653 qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n", 654 tb->tc.ptr, n, tb_next->tc.ptr); 655 return; 656 657 out_unlock_next: 658 qemu_spin_unlock(&tb_next->jmp_lock); 659 return; 660 } 661 662 static inline bool cpu_handle_halt(CPUState *cpu) 663 { 664 #ifndef CONFIG_USER_ONLY 665 if (cpu->halted) { 666 #if defined(TARGET_I386) 667 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { 668 X86CPU *x86_cpu = X86_CPU(cpu); 669 bql_lock(); 670 apic_poll_irq(x86_cpu->apic_state); 671 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); 672 bql_unlock(); 673 } 674 #endif /* TARGET_I386 */ 675 if (!cpu_has_work(cpu)) { 676 return true; 677 } 678 679 cpu->halted = 0; 680 } 681 #endif /* !CONFIG_USER_ONLY */ 682 683 return false; 684 } 685 686 static inline void cpu_handle_debug_exception(CPUState *cpu) 687 { 688 CPUClass *cc = CPU_GET_CLASS(cpu); 689 CPUWatchpoint *wp; 690 691 if (!cpu->watchpoint_hit) { 692 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 693 wp->flags &= ~BP_WATCHPOINT_HIT; 694 } 695 } 696 697 if (cc->tcg_ops->debug_excp_handler) { 698 cc->tcg_ops->debug_excp_handler(cpu); 699 } 700 } 701 702 static inline bool cpu_handle_exception(CPUState *cpu, int *ret) 703 { 704 if (cpu->exception_index < 0) { 705 #ifndef CONFIG_USER_ONLY 706 if (replay_has_exception() 707 && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) { 708 /* Execute just one insn to trigger exception pending in the log */ 709 cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) 710 | CF_NOIRQ | 1; 711 } 712 #endif 713 return false; 714 } 715 if (cpu->exception_index >= EXCP_INTERRUPT) { 716 /* exit request from the cpu execution loop */ 717 *ret = cpu->exception_index; 718 if (*ret == EXCP_DEBUG) { 719 cpu_handle_debug_exception(cpu); 720 } 721 cpu->exception_index = -1; 722 return true; 723 } else { 724 #if defined(CONFIG_USER_ONLY) 725 /* if user mode only, we simulate a fake exception 726 which will be handled outside the cpu execution 727 loop */ 728 #if defined(TARGET_I386) 729 CPUClass *cc = CPU_GET_CLASS(cpu); 730 cc->tcg_ops->fake_user_interrupt(cpu); 731 #endif /* TARGET_I386 */ 732 *ret = cpu->exception_index; 733 cpu->exception_index = -1; 734 return true; 735 #else 736 if (replay_exception()) { 737 CPUClass *cc = CPU_GET_CLASS(cpu); 738 bql_lock(); 739 cc->tcg_ops->do_interrupt(cpu); 740 bql_unlock(); 741 cpu->exception_index = -1; 742 743 if (unlikely(cpu->singlestep_enabled)) { 744 /* 745 * After processing the exception, ensure an EXCP_DEBUG is 746 * raised when single-stepping so that GDB doesn't miss the 747 * next instruction. 748 */ 749 *ret = EXCP_DEBUG; 750 cpu_handle_debug_exception(cpu); 751 return true; 752 } 753 } else if (!replay_has_interrupt()) { 754 /* give a chance to iothread in replay mode */ 755 *ret = EXCP_INTERRUPT; 756 return true; 757 } 758 #endif 759 } 760 761 return false; 762 } 763 764 #ifndef CONFIG_USER_ONLY 765 /* 766 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a 767 * "real" interrupt event later. It does not need to be recorded for 768 * replay purposes. 769 */ 770 static inline bool need_replay_interrupt(int interrupt_request) 771 { 772 #if defined(TARGET_I386) 773 return !(interrupt_request & CPU_INTERRUPT_POLL); 774 #else 775 return true; 776 #endif 777 } 778 #endif /* !CONFIG_USER_ONLY */ 779 780 static inline bool icount_exit_request(CPUState *cpu) 781 { 782 if (!icount_enabled()) { 783 return false; 784 } 785 if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) { 786 return false; 787 } 788 return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0; 789 } 790 791 static inline bool cpu_handle_interrupt(CPUState *cpu, 792 TranslationBlock **last_tb) 793 { 794 /* 795 * If we have requested custom cflags with CF_NOIRQ we should 796 * skip checking here. Any pending interrupts will get picked up 797 * by the next TB we execute under normal cflags. 798 */ 799 if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { 800 return false; 801 } 802 803 /* Clear the interrupt flag now since we're processing 804 * cpu->interrupt_request and cpu->exit_request. 805 * Ensure zeroing happens before reading cpu->exit_request or 806 * cpu->interrupt_request (see also smp_wmb in cpu_exit()) 807 */ 808 qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0); 809 810 if (unlikely(qatomic_read(&cpu->interrupt_request))) { 811 int interrupt_request; 812 bql_lock(); 813 interrupt_request = cpu->interrupt_request; 814 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { 815 /* Mask out external interrupts for this step. */ 816 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; 817 } 818 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 819 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; 820 cpu->exception_index = EXCP_DEBUG; 821 bql_unlock(); 822 return true; 823 } 824 #if !defined(CONFIG_USER_ONLY) 825 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { 826 /* Do nothing */ 827 } else if (interrupt_request & CPU_INTERRUPT_HALT) { 828 replay_interrupt(); 829 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; 830 cpu->halted = 1; 831 cpu->exception_index = EXCP_HLT; 832 bql_unlock(); 833 return true; 834 } 835 #if defined(TARGET_I386) 836 else if (interrupt_request & CPU_INTERRUPT_INIT) { 837 X86CPU *x86_cpu = X86_CPU(cpu); 838 CPUArchState *env = &x86_cpu->env; 839 replay_interrupt(); 840 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); 841 do_cpu_init(x86_cpu); 842 cpu->exception_index = EXCP_HALTED; 843 bql_unlock(); 844 return true; 845 } 846 #else 847 else if (interrupt_request & CPU_INTERRUPT_RESET) { 848 replay_interrupt(); 849 cpu_reset(cpu); 850 bql_unlock(); 851 return true; 852 } 853 #endif /* !TARGET_I386 */ 854 /* The target hook has 3 exit conditions: 855 False when the interrupt isn't processed, 856 True when it is, and we should restart on a new TB, 857 and via longjmp via cpu_loop_exit. */ 858 else { 859 CPUClass *cc = CPU_GET_CLASS(cpu); 860 861 if (cc->tcg_ops->cpu_exec_interrupt && 862 cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { 863 if (need_replay_interrupt(interrupt_request)) { 864 replay_interrupt(); 865 } 866 /* 867 * After processing the interrupt, ensure an EXCP_DEBUG is 868 * raised when single-stepping so that GDB doesn't miss the 869 * next instruction. 870 */ 871 if (unlikely(cpu->singlestep_enabled)) { 872 cpu->exception_index = EXCP_DEBUG; 873 bql_unlock(); 874 return true; 875 } 876 cpu->exception_index = -1; 877 *last_tb = NULL; 878 } 879 /* The target hook may have updated the 'cpu->interrupt_request'; 880 * reload the 'interrupt_request' value */ 881 interrupt_request = cpu->interrupt_request; 882 } 883 #endif /* !CONFIG_USER_ONLY */ 884 if (interrupt_request & CPU_INTERRUPT_EXITTB) { 885 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; 886 /* ensure that no TB jump will be modified as 887 the program flow was changed */ 888 *last_tb = NULL; 889 } 890 891 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ 892 bql_unlock(); 893 } 894 895 /* Finally, check if we need to exit to the main loop. */ 896 if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) { 897 qatomic_set(&cpu->exit_request, 0); 898 if (cpu->exception_index == -1) { 899 cpu->exception_index = EXCP_INTERRUPT; 900 } 901 return true; 902 } 903 904 return false; 905 } 906 907 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, 908 vaddr pc, TranslationBlock **last_tb, 909 int *tb_exit) 910 { 911 int32_t insns_left; 912 913 trace_exec_tb(tb, pc); 914 tb = cpu_tb_exec(cpu, tb, tb_exit); 915 if (*tb_exit != TB_EXIT_REQUESTED) { 916 *last_tb = tb; 917 return; 918 } 919 920 *last_tb = NULL; 921 insns_left = qatomic_read(&cpu->neg.icount_decr.u32); 922 if (insns_left < 0) { 923 /* Something asked us to stop executing chained TBs; just 924 * continue round the main loop. Whatever requested the exit 925 * will also have set something else (eg exit_request or 926 * interrupt_request) which will be handled by 927 * cpu_handle_interrupt. cpu_handle_interrupt will also 928 * clear cpu->icount_decr.u16.high. 929 */ 930 return; 931 } 932 933 /* Instruction counter expired. */ 934 assert(icount_enabled()); 935 #ifndef CONFIG_USER_ONLY 936 /* Ensure global icount has gone forward */ 937 icount_update(cpu); 938 /* Refill decrementer and continue execution. */ 939 insns_left = MIN(0xffff, cpu->icount_budget); 940 cpu->neg.icount_decr.u16.low = insns_left; 941 cpu->icount_extra = cpu->icount_budget - insns_left; 942 943 /* 944 * If the next tb has more instructions than we have left to 945 * execute we need to ensure we find/generate a TB with exactly 946 * insns_left instructions in it. 947 */ 948 if (insns_left > 0 && insns_left < tb->icount) { 949 assert(insns_left <= CF_COUNT_MASK); 950 assert(cpu->icount_extra == 0); 951 cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; 952 } 953 #endif 954 } 955 956 /* main execution loop */ 957 958 static int __attribute__((noinline)) 959 cpu_exec_loop(CPUState *cpu, SyncClocks *sc) 960 { 961 int ret; 962 963 /* if an exception is pending, we execute it here */ 964 while (!cpu_handle_exception(cpu, &ret)) { 965 TranslationBlock *last_tb = NULL; 966 int tb_exit = 0; 967 968 while (!cpu_handle_interrupt(cpu, &last_tb)) { 969 TranslationBlock *tb; 970 vaddr pc; 971 uint64_t cs_base; 972 uint32_t flags, cflags; 973 974 cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags); 975 976 /* 977 * When requested, use an exact setting for cflags for the next 978 * execution. This is used for icount, precise smc, and stop- 979 * after-access watchpoints. Since this request should never 980 * have CF_INVALID set, -1 is a convenient invalid value that 981 * does not require tcg headers for cpu_common_reset. 982 */ 983 cflags = cpu->cflags_next_tb; 984 if (cflags == -1) { 985 cflags = curr_cflags(cpu); 986 } else { 987 cpu->cflags_next_tb = -1; 988 } 989 990 if (check_for_breakpoints(cpu, pc, &cflags)) { 991 break; 992 } 993 994 tb = tb_lookup(cpu, pc, cs_base, flags, cflags); 995 if (tb == NULL) { 996 CPUJumpCache *jc; 997 uint32_t h; 998 999 mmap_lock(); 1000 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); 1001 mmap_unlock(); 1002 1003 /* 1004 * We add the TB in the virtual pc hash table 1005 * for the fast lookup 1006 */ 1007 h = tb_jmp_cache_hash_func(pc); 1008 jc = cpu->tb_jmp_cache; 1009 jc->array[h].pc = pc; 1010 qatomic_set(&jc->array[h].tb, tb); 1011 } 1012 1013 #ifndef CONFIG_USER_ONLY 1014 /* 1015 * We don't take care of direct jumps when address mapping 1016 * changes in system emulation. So it's not safe to make a 1017 * direct jump to a TB spanning two pages because the mapping 1018 * for the second page can change. 1019 */ 1020 if (tb_page_addr1(tb) != -1) { 1021 last_tb = NULL; 1022 } 1023 #endif 1024 /* See if we can patch the calling TB. */ 1025 if (last_tb) { 1026 tb_add_jump(last_tb, tb_exit, tb); 1027 } 1028 1029 cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); 1030 1031 /* Try to align the host and virtual clocks 1032 if the guest is in advance */ 1033 align_clocks(sc, cpu); 1034 } 1035 } 1036 return ret; 1037 } 1038 1039 static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc) 1040 { 1041 /* Prepare setjmp context for exception handling. */ 1042 if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) { 1043 cpu_exec_longjmp_cleanup(cpu); 1044 } 1045 1046 return cpu_exec_loop(cpu, sc); 1047 } 1048 1049 int cpu_exec(CPUState *cpu) 1050 { 1051 int ret; 1052 SyncClocks sc = { 0 }; 1053 1054 /* replay_interrupt may need current_cpu */ 1055 current_cpu = cpu; 1056 1057 if (cpu_handle_halt(cpu)) { 1058 return EXCP_HALTED; 1059 } 1060 1061 RCU_READ_LOCK_GUARD(); 1062 cpu_exec_enter(cpu); 1063 1064 /* 1065 * Calculate difference between guest clock and host clock. 1066 * This delay includes the delay of the last cycle, so 1067 * what we have to do is sleep until it is 0. As for the 1068 * advance/delay we gain here, we try to fix it next time. 1069 */ 1070 init_delay_params(&sc, cpu); 1071 1072 ret = cpu_exec_setjmp(cpu, &sc); 1073 1074 cpu_exec_exit(cpu); 1075 return ret; 1076 } 1077 1078 bool tcg_exec_realizefn(CPUState *cpu, Error **errp) 1079 { 1080 static bool tcg_target_initialized; 1081 CPUClass *cc = CPU_GET_CLASS(cpu); 1082 1083 if (!tcg_target_initialized) { 1084 cc->tcg_ops->initialize(); 1085 tcg_target_initialized = true; 1086 } 1087 1088 cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1); 1089 tlb_init(cpu); 1090 #ifndef CONFIG_USER_ONLY 1091 tcg_iommu_init_notifier_list(cpu); 1092 #endif /* !CONFIG_USER_ONLY */ 1093 /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ 1094 1095 return true; 1096 } 1097 1098 /* undo the initializations in reverse order */ 1099 void tcg_exec_unrealizefn(CPUState *cpu) 1100 { 1101 #ifndef CONFIG_USER_ONLY 1102 tcg_iommu_free_notifier_list(cpu); 1103 #endif /* !CONFIG_USER_ONLY */ 1104 1105 tlb_destroy(cpu); 1106 g_free_rcu(cpu->tb_jmp_cache, rcu); 1107 } 1108