1 /* 2 * Host code generation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 22 #include "trace.h" 23 #include "disas/disas.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg.h" 26 #if defined(CONFIG_USER_ONLY) 27 #include "qemu.h" 28 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 29 #include <sys/param.h> 30 #if __FreeBSD_version >= 700104 31 #define HAVE_KINFO_GETVMMAP 32 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 33 #include <sys/proc.h> 34 #include <machine/profile.h> 35 #define _KERNEL 36 #include <sys/user.h> 37 #undef _KERNEL 38 #undef sigqueue 39 #include <libutil.h> 40 #endif 41 #endif 42 #else 43 #include "system/ram_addr.h" 44 #endif 45 46 #include "cpu-param.h" 47 #include "exec/cputlb.h" 48 #include "exec/page-protection.h" 49 #include "exec/mmap-lock.h" 50 #include "tb-internal.h" 51 #include "tlb-bounds.h" 52 #include "exec/translator.h" 53 #include "exec/tb-flush.h" 54 #include "qemu/bitmap.h" 55 #include "qemu/qemu-print.h" 56 #include "qemu/main-loop.h" 57 #include "qemu/cacheinfo.h" 58 #include "qemu/timer.h" 59 #include "exec/log.h" 60 #include "exec/icount.h" 61 #include "system/tcg.h" 62 #include "qapi/error.h" 63 #include "accel/tcg/cpu-ops.h" 64 #include "tb-jmp-cache.h" 65 #include "tb-hash.h" 66 #include "tb-context.h" 67 #include "tb-internal.h" 68 #include "internal-common.h" 69 #include "internal-target.h" 70 #include "tcg/perf.h" 71 #include "tcg/insn-start-words.h" 72 73 TBContext tb_ctx; 74 75 /* 76 * Encode VAL as a signed leb128 sequence at P. 77 * Return P incremented past the encoded value. 78 */ 79 static uint8_t *encode_sleb128(uint8_t *p, int64_t val) 80 { 81 int more, byte; 82 83 do { 84 byte = val & 0x7f; 85 val >>= 7; 86 more = !((val == 0 && (byte & 0x40) == 0) 87 || (val == -1 && (byte & 0x40) != 0)); 88 if (more) { 89 byte |= 0x80; 90 } 91 *p++ = byte; 92 } while (more); 93 94 return p; 95 } 96 97 /* 98 * Decode a signed leb128 sequence at *PP; increment *PP past the 99 * decoded value. Return the decoded value. 100 */ 101 static int64_t decode_sleb128(const uint8_t **pp) 102 { 103 const uint8_t *p = *pp; 104 int64_t val = 0; 105 int byte, shift = 0; 106 107 do { 108 byte = *p++; 109 val |= (int64_t)(byte & 0x7f) << shift; 110 shift += 7; 111 } while (byte & 0x80); 112 if (shift < TARGET_LONG_BITS && (byte & 0x40)) { 113 val |= -(int64_t)1 << shift; 114 } 115 116 *pp = p; 117 return val; 118 } 119 120 /* Encode the data collected about the instructions while compiling TB. 121 Place the data at BLOCK, and return the number of bytes consumed. 122 123 The logical table consists of TARGET_INSN_START_WORDS target_ulong's, 124 which come from the target's insn_start data, followed by a uintptr_t 125 which comes from the host pc of the end of the code implementing the insn. 126 127 Each line of the table is encoded as sleb128 deltas from the previous 128 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. 129 That is, the first column is seeded with the guest pc, the last column 130 with the host pc, and the middle columns with zeros. */ 131 132 static int encode_search(TranslationBlock *tb, uint8_t *block) 133 { 134 uint8_t *highwater = tcg_ctx->code_gen_highwater; 135 uint64_t *insn_data = tcg_ctx->gen_insn_data; 136 uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off; 137 uint8_t *p = block; 138 int i, j, n; 139 140 for (i = 0, n = tb->icount; i < n; ++i) { 141 uint64_t prev, curr; 142 143 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 144 if (i == 0) { 145 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0); 146 } else { 147 prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j]; 148 } 149 curr = insn_data[i * TARGET_INSN_START_WORDS + j]; 150 p = encode_sleb128(p, curr - prev); 151 } 152 prev = (i == 0 ? 0 : insn_end_off[i - 1]); 153 curr = insn_end_off[i]; 154 p = encode_sleb128(p, curr - prev); 155 156 /* Test for (pending) buffer overflow. The assumption is that any 157 one row beginning below the high water mark cannot overrun 158 the buffer completely. Thus we can test for overflow after 159 encoding a row without having to check during encoding. */ 160 if (unlikely(p > highwater)) { 161 return -1; 162 } 163 } 164 165 return p - block; 166 } 167 168 static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc, 169 uint64_t *data) 170 { 171 uintptr_t iter_pc = (uintptr_t)tb->tc.ptr; 172 const uint8_t *p = tb->tc.ptr + tb->tc.size; 173 int i, j, num_insns = tb->icount; 174 175 host_pc -= GETPC_ADJ; 176 177 if (host_pc < iter_pc) { 178 return -1; 179 } 180 181 memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS); 182 if (!(tb_cflags(tb) & CF_PCREL)) { 183 data[0] = tb->pc; 184 } 185 186 /* 187 * Reconstruct the stored insn data while looking for the point 188 * at which the end of the insn exceeds host_pc. 189 */ 190 for (i = 0; i < num_insns; ++i) { 191 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 192 data[j] += decode_sleb128(&p); 193 } 194 iter_pc += decode_sleb128(&p); 195 if (iter_pc > host_pc) { 196 return num_insns - i; 197 } 198 } 199 return -1; 200 } 201 202 /* 203 * The cpu state corresponding to 'host_pc' is restored in 204 * preparation for exiting the TB. 205 */ 206 void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 207 uintptr_t host_pc) 208 { 209 uint64_t data[TARGET_INSN_START_WORDS]; 210 int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data); 211 212 if (insns_left < 0) { 213 return; 214 } 215 216 if (tb_cflags(tb) & CF_USE_ICOUNT) { 217 assert(icount_enabled()); 218 /* 219 * Reset the cycle counter to the start of the block and 220 * shift if to the number of actually executed instructions. 221 */ 222 cpu->neg.icount_decr.u16.low += insns_left; 223 } 224 225 cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data); 226 } 227 228 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) 229 { 230 /* 231 * The host_pc has to be in the rx region of the code buffer. 232 * If it is not we will not be able to resolve it here. 233 * The two cases where host_pc will not be correct are: 234 * 235 * - fault during translation (instruction fetch) 236 * - fault from helper (not using GETPC() macro) 237 * 238 * Either way we need return early as we can't resolve it here. 239 */ 240 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { 241 TranslationBlock *tb = tcg_tb_lookup(host_pc); 242 if (tb) { 243 cpu_restore_state_from_tb(cpu, tb, host_pc); 244 return true; 245 } 246 } 247 return false; 248 } 249 250 bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data) 251 { 252 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { 253 TranslationBlock *tb = tcg_tb_lookup(host_pc); 254 if (tb) { 255 return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0; 256 } 257 } 258 return false; 259 } 260 261 void page_init(void) 262 { 263 page_table_config_init(); 264 } 265 266 /* 267 * Isolate the portion of code gen which can setjmp/longjmp. 268 * Return the size of the generated code, or negative on error. 269 */ 270 static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb, 271 vaddr pc, void *host_pc, 272 int *max_insns, int64_t *ti) 273 { 274 int ret = sigsetjmp(tcg_ctx->jmp_trans, 0); 275 if (unlikely(ret != 0)) { 276 return ret; 277 } 278 279 tcg_func_start(tcg_ctx); 280 281 CPUState *cs = env_cpu(env); 282 tcg_ctx->cpu = cs; 283 cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc); 284 285 assert(tb->size != 0); 286 tcg_ctx->cpu = NULL; 287 *max_insns = tb->icount; 288 289 return tcg_gen_code(tcg_ctx, tb, pc); 290 } 291 292 /* Called with mmap_lock held for user mode emulation. */ 293 TranslationBlock *tb_gen_code(CPUState *cpu, 294 vaddr pc, uint64_t cs_base, 295 uint32_t flags, int cflags) 296 { 297 CPUArchState *env = cpu_env(cpu); 298 TranslationBlock *tb, *existing_tb; 299 tb_page_addr_t phys_pc, phys_p2; 300 tcg_insn_unit *gen_code_buf; 301 int gen_code_size, search_size, max_insns; 302 int64_t ti; 303 void *host_pc; 304 305 assert_memory_lock(); 306 qemu_thread_jit_write(); 307 308 phys_pc = get_page_addr_code_hostp(env, pc, &host_pc); 309 310 if (phys_pc == -1) { 311 /* Generate a one-shot TB with 1 insn in it */ 312 cflags = (cflags & ~CF_COUNT_MASK) | 1; 313 } 314 315 max_insns = cflags & CF_COUNT_MASK; 316 if (max_insns == 0) { 317 max_insns = TCG_MAX_INSNS; 318 } 319 QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); 320 321 buffer_overflow: 322 assert_no_pages_locked(); 323 tb = tcg_tb_alloc(tcg_ctx); 324 if (unlikely(!tb)) { 325 /* flush must be done */ 326 tb_flush(cpu); 327 mmap_unlock(); 328 /* Make the execution loop process the flush as soon as possible. */ 329 cpu->exception_index = EXCP_INTERRUPT; 330 cpu_loop_exit(cpu); 331 } 332 333 gen_code_buf = tcg_ctx->code_gen_ptr; 334 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); 335 if (!(cflags & CF_PCREL)) { 336 tb->pc = pc; 337 } 338 tb->cs_base = cs_base; 339 tb->flags = flags; 340 tb->cflags = cflags; 341 tb_set_page_addr0(tb, phys_pc); 342 tb_set_page_addr1(tb, -1); 343 if (phys_pc != -1) { 344 tb_lock_page0(phys_pc); 345 } 346 347 tcg_ctx->gen_tb = tb; 348 tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64; 349 #ifdef CONFIG_SOFTMMU 350 tcg_ctx->page_bits = TARGET_PAGE_BITS; 351 tcg_ctx->page_mask = TARGET_PAGE_MASK; 352 tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS; 353 #endif 354 tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS; 355 #ifdef TCG_GUEST_DEFAULT_MO 356 tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO; 357 #else 358 tcg_ctx->guest_mo = TCG_MO_ALL; 359 #endif 360 361 restart_translate: 362 trace_translate_block(tb, pc, tb->tc.ptr); 363 364 gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti); 365 if (unlikely(gen_code_size < 0)) { 366 switch (gen_code_size) { 367 case -1: 368 /* 369 * Overflow of code_gen_buffer, or the current slice of it. 370 * 371 * TODO: We don't need to re-do tcg_ops->translate_code, nor 372 * should we re-do the tcg optimization currently hidden 373 * inside tcg_gen_code. All that should be required is to 374 * flush the TBs, allocate a new TB, re-initialize it per 375 * above, and re-do the actual code generation. 376 */ 377 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, 378 "Restarting code generation for " 379 "code_gen_buffer overflow\n"); 380 tb_unlock_pages(tb); 381 tcg_ctx->gen_tb = NULL; 382 goto buffer_overflow; 383 384 case -2: 385 /* 386 * The code generated for the TranslationBlock is too large. 387 * The maximum size allowed by the unwind info is 64k. 388 * There may be stricter constraints from relocations 389 * in the tcg backend. 390 * 391 * Try again with half as many insns as we attempted this time. 392 * If a single insn overflows, there's a bug somewhere... 393 */ 394 assert(max_insns > 1); 395 max_insns /= 2; 396 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, 397 "Restarting code generation with " 398 "smaller translation block (max %d insns)\n", 399 max_insns); 400 401 /* 402 * The half-sized TB may not cross pages. 403 * TODO: Fix all targets that cross pages except with 404 * the first insn, at which point this can't be reached. 405 */ 406 phys_p2 = tb_page_addr1(tb); 407 if (unlikely(phys_p2 != -1)) { 408 tb_unlock_page1(phys_pc, phys_p2); 409 tb_set_page_addr1(tb, -1); 410 } 411 goto restart_translate; 412 413 case -3: 414 /* 415 * We had a page lock ordering problem. In order to avoid 416 * deadlock we had to drop the lock on page0, which means 417 * that everything we translated so far is compromised. 418 * Restart with locks held on both pages. 419 */ 420 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, 421 "Restarting code generation with re-locked pages"); 422 goto restart_translate; 423 424 default: 425 g_assert_not_reached(); 426 } 427 } 428 tcg_ctx->gen_tb = NULL; 429 430 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); 431 if (unlikely(search_size < 0)) { 432 tb_unlock_pages(tb); 433 goto buffer_overflow; 434 } 435 tb->tc.size = gen_code_size; 436 437 /* 438 * For CF_PCREL, attribute all executions of the generated code 439 * to its first mapping. 440 */ 441 perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf)); 442 443 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && 444 qemu_log_in_addr_range(pc)) { 445 FILE *logfile = qemu_log_trylock(); 446 if (logfile) { 447 int code_size, data_size; 448 const tcg_target_ulong *rx_data_gen_ptr; 449 size_t chunk_start; 450 int insn = 0; 451 452 if (tcg_ctx->data_gen_ptr) { 453 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); 454 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; 455 data_size = gen_code_size - code_size; 456 } else { 457 rx_data_gen_ptr = 0; 458 code_size = gen_code_size; 459 data_size = 0; 460 } 461 462 /* Dump header and the first instruction */ 463 fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); 464 fprintf(logfile, 465 " -- guest addr 0x%016" PRIx64 " + tb prologue\n", 466 tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); 467 chunk_start = tcg_ctx->gen_insn_end_off[insn]; 468 disas(logfile, tb->tc.ptr, chunk_start); 469 470 /* 471 * Dump each instruction chunk, wrapping up empty chunks into 472 * the next instruction. The whole array is offset so the 473 * first entry is the beginning of the 2nd instruction. 474 */ 475 while (insn < tb->icount) { 476 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; 477 if (chunk_end > chunk_start) { 478 fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n", 479 tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]); 480 disas(logfile, tb->tc.ptr + chunk_start, 481 chunk_end - chunk_start); 482 chunk_start = chunk_end; 483 } 484 insn++; 485 } 486 487 if (chunk_start < code_size) { 488 fprintf(logfile, " -- tb slow paths + alignment\n"); 489 disas(logfile, tb->tc.ptr + chunk_start, 490 code_size - chunk_start); 491 } 492 493 /* Finally dump any data we may have after the block */ 494 if (data_size) { 495 int i; 496 fprintf(logfile, " data: [size=%d]\n", data_size); 497 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { 498 if (sizeof(tcg_target_ulong) == 8) { 499 fprintf(logfile, 500 "0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", 501 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); 502 } else if (sizeof(tcg_target_ulong) == 4) { 503 fprintf(logfile, 504 "0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", 505 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); 506 } else { 507 qemu_build_not_reached(); 508 } 509 } 510 } 511 fprintf(logfile, "\n"); 512 qemu_log_unlock(logfile); 513 } 514 } 515 516 qatomic_set(&tcg_ctx->code_gen_ptr, (void *) 517 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, 518 CODE_GEN_ALIGN)); 519 520 /* init jump list */ 521 qemu_spin_init(&tb->jmp_lock); 522 tb->jmp_list_head = (uintptr_t)NULL; 523 tb->jmp_list_next[0] = (uintptr_t)NULL; 524 tb->jmp_list_next[1] = (uintptr_t)NULL; 525 tb->jmp_dest[0] = (uintptr_t)NULL; 526 tb->jmp_dest[1] = (uintptr_t)NULL; 527 528 /* init original jump addresses which have been set during tcg_gen_code() */ 529 if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) { 530 tb_reset_jump(tb, 0); 531 } 532 if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) { 533 tb_reset_jump(tb, 1); 534 } 535 536 /* 537 * Insert TB into the corresponding region tree before publishing it 538 * through QHT. Otherwise rewinding happened in the TB might fail to 539 * lookup itself using host PC. 540 */ 541 tcg_tb_insert(tb); 542 543 /* 544 * If the TB is not associated with a physical RAM page then it must be 545 * a temporary one-insn TB. 546 * 547 * Such TBs must be added to region trees in order to make sure that 548 * restore_state_to_opc() - which on some architectures is not limited to 549 * rewinding, but also affects exception handling! - is called when such a 550 * TB causes an exception. 551 * 552 * At the same time, temporary one-insn TBs must be executed at most once, 553 * because subsequent reads from, e.g., I/O memory may return different 554 * values. So return early before attempting to link to other TBs or add 555 * to the QHT. 556 */ 557 if (tb_page_addr0(tb) == -1) { 558 assert_no_pages_locked(); 559 return tb; 560 } 561 562 /* 563 * No explicit memory barrier is required -- tb_link_page() makes the 564 * TB visible in a consistent state. 565 */ 566 existing_tb = tb_link_page(tb); 567 assert_no_pages_locked(); 568 569 /* if the TB already exists, discard what we just translated */ 570 if (unlikely(existing_tb != tb)) { 571 uintptr_t orig_aligned = (uintptr_t)gen_code_buf; 572 573 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); 574 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); 575 tcg_tb_remove(tb); 576 return existing_tb; 577 } 578 return tb; 579 } 580 581 /* user-mode: call with mmap_lock held */ 582 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) 583 { 584 TranslationBlock *tb; 585 586 assert_memory_lock(); 587 588 tb = tcg_tb_lookup(retaddr); 589 if (tb) { 590 /* We can use retranslation to find the PC. */ 591 cpu_restore_state_from_tb(cpu, tb, retaddr); 592 tb_phys_invalidate(tb, -1); 593 } else { 594 /* The exception probably happened in a helper. The CPU state should 595 have been saved before calling it. Fetch the PC from there. */ 596 CPUArchState *env = cpu_env(cpu); 597 vaddr pc; 598 uint64_t cs_base; 599 tb_page_addr_t addr; 600 uint32_t flags; 601 602 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 603 addr = get_page_addr_code(env, pc); 604 if (addr != -1) { 605 tb_invalidate_phys_range(addr, addr); 606 } 607 } 608 } 609 610 #ifndef CONFIG_USER_ONLY 611 /* 612 * In deterministic execution mode, instructions doing device I/Os 613 * must be at the end of the TB. 614 * 615 * Called by softmmu_template.h, with iothread mutex not held. 616 */ 617 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) 618 { 619 TranslationBlock *tb; 620 CPUClass *cc; 621 uint32_t n; 622 623 tb = tcg_tb_lookup(retaddr); 624 if (!tb) { 625 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", 626 (void *)retaddr); 627 } 628 cpu_restore_state_from_tb(cpu, tb, retaddr); 629 630 /* 631 * Some guests must re-execute the branch when re-executing a delay 632 * slot instruction. When this is the case, adjust icount and N 633 * to account for the re-execution of the branch. 634 */ 635 n = 1; 636 cc = cpu->cc; 637 if (cc->tcg_ops->io_recompile_replay_branch && 638 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { 639 cpu->neg.icount_decr.u16.low++; 640 n = 2; 641 } 642 643 /* 644 * Exit the loop and potentially generate a new TB executing the 645 * just the I/O insns. We also limit instrumentation to memory 646 * operations only (which execute after completion) so we don't 647 * double instrument the instruction. Also don't let an IRQ sneak 648 * in before we execute it. 649 */ 650 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_NOIRQ | n; 651 652 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 653 vaddr pc = cpu->cc->get_pc(cpu); 654 if (qemu_log_in_addr_range(pc)) { 655 qemu_log("cpu_io_recompile: rewound execution of TB to %016" 656 VADDR_PRIx "\n", pc); 657 } 658 } 659 660 cpu_loop_exit_noexc(cpu); 661 } 662 663 #endif /* CONFIG_USER_ONLY */ 664 665 /* 666 * Called by generic code at e.g. cpu reset after cpu creation, 667 * therefore we must be prepared to allocate the jump cache. 668 */ 669 void tcg_flush_jmp_cache(CPUState *cpu) 670 { 671 CPUJumpCache *jc = cpu->tb_jmp_cache; 672 673 /* During early initialization, the cache may not yet be allocated. */ 674 if (unlikely(jc == NULL)) { 675 return; 676 } 677 678 for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) { 679 qatomic_set(&jc->array[i].tb, NULL); 680 } 681 } 682