1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 #include "trace.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg.h"
26 #if defined(CONFIG_USER_ONLY)
27 #include "qemu.h"
28 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
29 #include <sys/param.h>
30 #if __FreeBSD_version >= 700104
31 #define HAVE_KINFO_GETVMMAP
32 #define sigqueue sigqueue_freebsd /* avoid redefinition */
33 #include <sys/proc.h>
34 #include <machine/profile.h>
35 #define _KERNEL
36 #include <sys/user.h>
37 #undef _KERNEL
38 #undef sigqueue
39 #include <libutil.h>
40 #endif
41 #endif
42 #else
43 #include "exec/ram_addr.h"
44 #endif
45
46 #include "exec/cputlb.h"
47 #include "exec/translate-all.h"
48 #include "exec/translator.h"
49 #include "exec/tb-flush.h"
50 #include "qemu/bitmap.h"
51 #include "qemu/qemu-print.h"
52 #include "qemu/main-loop.h"
53 #include "qemu/cacheinfo.h"
54 #include "qemu/timer.h"
55 #include "exec/log.h"
56 #include "sysemu/cpus.h"
57 #include "sysemu/cpu-timers.h"
58 #include "sysemu/tcg.h"
59 #include "qapi/error.h"
60 #include "hw/core/tcg-cpu-ops.h"
61 #include "tb-jmp-cache.h"
62 #include "tb-hash.h"
63 #include "tb-context.h"
64 #include "internal-common.h"
65 #include "internal-target.h"
66 #include "perf.h"
67 #include "tcg/insn-start-words.h"
68
69 TBContext tb_ctx;
70
71 /*
72 * Encode VAL as a signed leb128 sequence at P.
73 * Return P incremented past the encoded value.
74 */
encode_sleb128(uint8_t * p,int64_t val)75 static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
76 {
77 int more, byte;
78
79 do {
80 byte = val & 0x7f;
81 val >>= 7;
82 more = !((val == 0 && (byte & 0x40) == 0)
83 || (val == -1 && (byte & 0x40) != 0));
84 if (more) {
85 byte |= 0x80;
86 }
87 *p++ = byte;
88 } while (more);
89
90 return p;
91 }
92
93 /*
94 * Decode a signed leb128 sequence at *PP; increment *PP past the
95 * decoded value. Return the decoded value.
96 */
decode_sleb128(const uint8_t ** pp)97 static int64_t decode_sleb128(const uint8_t **pp)
98 {
99 const uint8_t *p = *pp;
100 int64_t val = 0;
101 int byte, shift = 0;
102
103 do {
104 byte = *p++;
105 val |= (int64_t)(byte & 0x7f) << shift;
106 shift += 7;
107 } while (byte & 0x80);
108 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
109 val |= -(int64_t)1 << shift;
110 }
111
112 *pp = p;
113 return val;
114 }
115
116 /* Encode the data collected about the instructions while compiling TB.
117 Place the data at BLOCK, and return the number of bytes consumed.
118
119 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
120 which come from the target's insn_start data, followed by a uintptr_t
121 which comes from the host pc of the end of the code implementing the insn.
122
123 Each line of the table is encoded as sleb128 deltas from the previous
124 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
125 That is, the first column is seeded with the guest pc, the last column
126 with the host pc, and the middle columns with zeros. */
127
encode_search(TranslationBlock * tb,uint8_t * block)128 static int encode_search(TranslationBlock *tb, uint8_t *block)
129 {
130 uint8_t *highwater = tcg_ctx->code_gen_highwater;
131 uint64_t *insn_data = tcg_ctx->gen_insn_data;
132 uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
133 uint8_t *p = block;
134 int i, j, n;
135
136 for (i = 0, n = tb->icount; i < n; ++i) {
137 uint64_t prev, curr;
138
139 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
140 if (i == 0) {
141 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
142 } else {
143 prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
144 }
145 curr = insn_data[i * TARGET_INSN_START_WORDS + j];
146 p = encode_sleb128(p, curr - prev);
147 }
148 prev = (i == 0 ? 0 : insn_end_off[i - 1]);
149 curr = insn_end_off[i];
150 p = encode_sleb128(p, curr - prev);
151
152 /* Test for (pending) buffer overflow. The assumption is that any
153 one row beginning below the high water mark cannot overrun
154 the buffer completely. Thus we can test for overflow after
155 encoding a row without having to check during encoding. */
156 if (unlikely(p > highwater)) {
157 return -1;
158 }
159 }
160
161 return p - block;
162 }
163
cpu_unwind_data_from_tb(TranslationBlock * tb,uintptr_t host_pc,uint64_t * data)164 static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
165 uint64_t *data)
166 {
167 uintptr_t iter_pc = (uintptr_t)tb->tc.ptr;
168 const uint8_t *p = tb->tc.ptr + tb->tc.size;
169 int i, j, num_insns = tb->icount;
170
171 host_pc -= GETPC_ADJ;
172
173 if (host_pc < iter_pc) {
174 return -1;
175 }
176
177 memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
178 if (!(tb_cflags(tb) & CF_PCREL)) {
179 data[0] = tb->pc;
180 }
181
182 /*
183 * Reconstruct the stored insn data while looking for the point
184 * at which the end of the insn exceeds host_pc.
185 */
186 for (i = 0; i < num_insns; ++i) {
187 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
188 data[j] += decode_sleb128(&p);
189 }
190 iter_pc += decode_sleb128(&p);
191 if (iter_pc > host_pc) {
192 return num_insns - i;
193 }
194 }
195 return -1;
196 }
197
198 /*
199 * The cpu state corresponding to 'host_pc' is restored in
200 * preparation for exiting the TB.
201 */
cpu_restore_state_from_tb(CPUState * cpu,TranslationBlock * tb,uintptr_t host_pc)202 void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
203 uintptr_t host_pc)
204 {
205 uint64_t data[TARGET_INSN_START_WORDS];
206 int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
207
208 if (insns_left < 0) {
209 return;
210 }
211
212 if (tb_cflags(tb) & CF_USE_ICOUNT) {
213 assert(icount_enabled());
214 /*
215 * Reset the cycle counter to the start of the block and
216 * shift if to the number of actually executed instructions.
217 */
218 cpu->neg.icount_decr.u16.low += insns_left;
219 }
220
221 cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
222 }
223
cpu_restore_state(CPUState * cpu,uintptr_t host_pc)224 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
225 {
226 /*
227 * The host_pc has to be in the rx region of the code buffer.
228 * If it is not we will not be able to resolve it here.
229 * The two cases where host_pc will not be correct are:
230 *
231 * - fault during translation (instruction fetch)
232 * - fault from helper (not using GETPC() macro)
233 *
234 * Either way we need return early as we can't resolve it here.
235 */
236 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
237 TranslationBlock *tb = tcg_tb_lookup(host_pc);
238 if (tb) {
239 cpu_restore_state_from_tb(cpu, tb, host_pc);
240 return true;
241 }
242 }
243 return false;
244 }
245
cpu_unwind_state_data(CPUState * cpu,uintptr_t host_pc,uint64_t * data)246 bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
247 {
248 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
249 TranslationBlock *tb = tcg_tb_lookup(host_pc);
250 if (tb) {
251 return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0;
252 }
253 }
254 return false;
255 }
256
page_init(void)257 void page_init(void)
258 {
259 page_size_init();
260 page_table_config_init();
261 }
262
263 /*
264 * Isolate the portion of code gen which can setjmp/longjmp.
265 * Return the size of the generated code, or negative on error.
266 */
setjmp_gen_code(CPUArchState * env,TranslationBlock * tb,vaddr pc,void * host_pc,int * max_insns,int64_t * ti)267 static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
268 vaddr pc, void *host_pc,
269 int *max_insns, int64_t *ti)
270 {
271 int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
272 if (unlikely(ret != 0)) {
273 return ret;
274 }
275
276 tcg_func_start(tcg_ctx);
277
278 tcg_ctx->cpu = env_cpu(env);
279 gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
280 assert(tb->size != 0);
281 tcg_ctx->cpu = NULL;
282 *max_insns = tb->icount;
283
284 return tcg_gen_code(tcg_ctx, tb, pc);
285 }
286
287 /* Called with mmap_lock held for user mode emulation. */
tb_gen_code(CPUState * cpu,vaddr pc,uint64_t cs_base,uint32_t flags,int cflags)288 TranslationBlock *tb_gen_code(CPUState *cpu,
289 vaddr pc, uint64_t cs_base,
290 uint32_t flags, int cflags)
291 {
292 CPUArchState *env = cpu_env(cpu);
293 TranslationBlock *tb, *existing_tb;
294 tb_page_addr_t phys_pc, phys_p2;
295 tcg_insn_unit *gen_code_buf;
296 int gen_code_size, search_size, max_insns;
297 int64_t ti;
298 void *host_pc;
299
300 assert_memory_lock();
301 qemu_thread_jit_write();
302
303 phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
304
305 if (phys_pc == -1) {
306 /* Generate a one-shot TB with 1 insn in it */
307 cflags = (cflags & ~CF_COUNT_MASK) | 1;
308 }
309
310 max_insns = cflags & CF_COUNT_MASK;
311 if (max_insns == 0) {
312 max_insns = TCG_MAX_INSNS;
313 }
314 QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
315
316 buffer_overflow:
317 assert_no_pages_locked();
318 tb = tcg_tb_alloc(tcg_ctx);
319 if (unlikely(!tb)) {
320 /* flush must be done */
321 tb_flush(cpu);
322 mmap_unlock();
323 /* Make the execution loop process the flush as soon as possible. */
324 cpu->exception_index = EXCP_INTERRUPT;
325 cpu_loop_exit(cpu);
326 }
327
328 gen_code_buf = tcg_ctx->code_gen_ptr;
329 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
330 tb->pc = pc;
331 tb->cs_base = cs_base;
332 tb->flags = flags;
333 tb->cflags = cflags;
334 tb_set_page_addr0(tb, phys_pc);
335 tb_set_page_addr1(tb, -1);
336 if (phys_pc != -1) {
337 tb_lock_page0(phys_pc);
338 }
339
340 tcg_ctx->gen_tb = tb;
341 tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
342 #ifdef CONFIG_SOFTMMU
343 tcg_ctx->page_bits = TARGET_PAGE_BITS;
344 tcg_ctx->page_mask = TARGET_PAGE_MASK;
345 tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
346 #endif
347 tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
348 #ifdef TCG_GUEST_DEFAULT_MO
349 tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
350 #else
351 tcg_ctx->guest_mo = TCG_MO_ALL;
352 #endif
353
354 restart_translate:
355 trace_translate_block(tb, pc, tb->tc.ptr);
356
357 gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
358 if (unlikely(gen_code_size < 0)) {
359 switch (gen_code_size) {
360 case -1:
361 /*
362 * Overflow of code_gen_buffer, or the current slice of it.
363 *
364 * TODO: We don't need to re-do gen_intermediate_code, nor
365 * should we re-do the tcg optimization currently hidden
366 * inside tcg_gen_code. All that should be required is to
367 * flush the TBs, allocate a new TB, re-initialize it per
368 * above, and re-do the actual code generation.
369 */
370 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
371 "Restarting code generation for "
372 "code_gen_buffer overflow\n");
373 tb_unlock_pages(tb);
374 tcg_ctx->gen_tb = NULL;
375 goto buffer_overflow;
376
377 case -2:
378 /*
379 * The code generated for the TranslationBlock is too large.
380 * The maximum size allowed by the unwind info is 64k.
381 * There may be stricter constraints from relocations
382 * in the tcg backend.
383 *
384 * Try again with half as many insns as we attempted this time.
385 * If a single insn overflows, there's a bug somewhere...
386 */
387 assert(max_insns > 1);
388 max_insns /= 2;
389 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
390 "Restarting code generation with "
391 "smaller translation block (max %d insns)\n",
392 max_insns);
393
394 /*
395 * The half-sized TB may not cross pages.
396 * TODO: Fix all targets that cross pages except with
397 * the first insn, at which point this can't be reached.
398 */
399 phys_p2 = tb_page_addr1(tb);
400 if (unlikely(phys_p2 != -1)) {
401 tb_unlock_page1(phys_pc, phys_p2);
402 tb_set_page_addr1(tb, -1);
403 }
404 goto restart_translate;
405
406 case -3:
407 /*
408 * We had a page lock ordering problem. In order to avoid
409 * deadlock we had to drop the lock on page0, which means
410 * that everything we translated so far is compromised.
411 * Restart with locks held on both pages.
412 */
413 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
414 "Restarting code generation with re-locked pages");
415 goto restart_translate;
416
417 default:
418 g_assert_not_reached();
419 }
420 }
421 tcg_ctx->gen_tb = NULL;
422
423 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
424 if (unlikely(search_size < 0)) {
425 tb_unlock_pages(tb);
426 goto buffer_overflow;
427 }
428 tb->tc.size = gen_code_size;
429
430 /*
431 * For CF_PCREL, attribute all executions of the generated code
432 * to its first mapping.
433 */
434 perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
435
436 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
437 qemu_log_in_addr_range(pc)) {
438 FILE *logfile = qemu_log_trylock();
439 if (logfile) {
440 int code_size, data_size;
441 const tcg_target_ulong *rx_data_gen_ptr;
442 size_t chunk_start;
443 int insn = 0;
444
445 if (tcg_ctx->data_gen_ptr) {
446 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
447 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
448 data_size = gen_code_size - code_size;
449 } else {
450 rx_data_gen_ptr = 0;
451 code_size = gen_code_size;
452 data_size = 0;
453 }
454
455 /* Dump header and the first instruction */
456 fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
457 fprintf(logfile,
458 " -- guest addr 0x%016" PRIx64 " + tb prologue\n",
459 tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
460 chunk_start = tcg_ctx->gen_insn_end_off[insn];
461 disas(logfile, tb->tc.ptr, chunk_start);
462
463 /*
464 * Dump each instruction chunk, wrapping up empty chunks into
465 * the next instruction. The whole array is offset so the
466 * first entry is the beginning of the 2nd instruction.
467 */
468 while (insn < tb->icount) {
469 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
470 if (chunk_end > chunk_start) {
471 fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
472 tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
473 disas(logfile, tb->tc.ptr + chunk_start,
474 chunk_end - chunk_start);
475 chunk_start = chunk_end;
476 }
477 insn++;
478 }
479
480 if (chunk_start < code_size) {
481 fprintf(logfile, " -- tb slow paths + alignment\n");
482 disas(logfile, tb->tc.ptr + chunk_start,
483 code_size - chunk_start);
484 }
485
486 /* Finally dump any data we may have after the block */
487 if (data_size) {
488 int i;
489 fprintf(logfile, " data: [size=%d]\n", data_size);
490 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
491 if (sizeof(tcg_target_ulong) == 8) {
492 fprintf(logfile,
493 "0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
494 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
495 } else if (sizeof(tcg_target_ulong) == 4) {
496 fprintf(logfile,
497 "0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
498 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
499 } else {
500 qemu_build_not_reached();
501 }
502 }
503 }
504 fprintf(logfile, "\n");
505 qemu_log_unlock(logfile);
506 }
507 }
508
509 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
510 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
511 CODE_GEN_ALIGN));
512
513 /* init jump list */
514 qemu_spin_init(&tb->jmp_lock);
515 tb->jmp_list_head = (uintptr_t)NULL;
516 tb->jmp_list_next[0] = (uintptr_t)NULL;
517 tb->jmp_list_next[1] = (uintptr_t)NULL;
518 tb->jmp_dest[0] = (uintptr_t)NULL;
519 tb->jmp_dest[1] = (uintptr_t)NULL;
520
521 /* init original jump addresses which have been set during tcg_gen_code() */
522 if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
523 tb_reset_jump(tb, 0);
524 }
525 if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
526 tb_reset_jump(tb, 1);
527 }
528
529 /*
530 * If the TB is not associated with a physical RAM page then it must be
531 * a temporary one-insn TB, and we have nothing left to do. Return early
532 * before attempting to link to other TBs or add to the lookup table.
533 */
534 if (tb_page_addr0(tb) == -1) {
535 assert_no_pages_locked();
536 return tb;
537 }
538
539 /*
540 * Insert TB into the corresponding region tree before publishing it
541 * through QHT. Otherwise rewinding happened in the TB might fail to
542 * lookup itself using host PC.
543 */
544 tcg_tb_insert(tb);
545
546 /*
547 * No explicit memory barrier is required -- tb_link_page() makes the
548 * TB visible in a consistent state.
549 */
550 existing_tb = tb_link_page(tb);
551 assert_no_pages_locked();
552
553 /* if the TB already exists, discard what we just translated */
554 if (unlikely(existing_tb != tb)) {
555 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
556
557 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
558 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
559 tcg_tb_remove(tb);
560 return existing_tb;
561 }
562 return tb;
563 }
564
565 /* user-mode: call with mmap_lock held */
tb_check_watchpoint(CPUState * cpu,uintptr_t retaddr)566 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
567 {
568 TranslationBlock *tb;
569
570 assert_memory_lock();
571
572 tb = tcg_tb_lookup(retaddr);
573 if (tb) {
574 /* We can use retranslation to find the PC. */
575 cpu_restore_state_from_tb(cpu, tb, retaddr);
576 tb_phys_invalidate(tb, -1);
577 } else {
578 /* The exception probably happened in a helper. The CPU state should
579 have been saved before calling it. Fetch the PC from there. */
580 CPUArchState *env = cpu_env(cpu);
581 vaddr pc;
582 uint64_t cs_base;
583 tb_page_addr_t addr;
584 uint32_t flags;
585
586 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
587 addr = get_page_addr_code(env, pc);
588 if (addr != -1) {
589 tb_invalidate_phys_range(addr, addr);
590 }
591 }
592 }
593
594 #ifndef CONFIG_USER_ONLY
595 /*
596 * In deterministic execution mode, instructions doing device I/Os
597 * must be at the end of the TB.
598 *
599 * Called by softmmu_template.h, with iothread mutex not held.
600 */
cpu_io_recompile(CPUState * cpu,uintptr_t retaddr)601 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
602 {
603 TranslationBlock *tb;
604 CPUClass *cc;
605 uint32_t n;
606
607 tb = tcg_tb_lookup(retaddr);
608 if (!tb) {
609 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
610 (void *)retaddr);
611 }
612 cpu_restore_state_from_tb(cpu, tb, retaddr);
613
614 /*
615 * Some guests must re-execute the branch when re-executing a delay
616 * slot instruction. When this is the case, adjust icount and N
617 * to account for the re-execution of the branch.
618 */
619 n = 1;
620 cc = CPU_GET_CLASS(cpu);
621 if (cc->tcg_ops->io_recompile_replay_branch &&
622 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
623 cpu->neg.icount_decr.u16.low++;
624 n = 2;
625 }
626
627 /*
628 * Exit the loop and potentially generate a new TB executing the
629 * just the I/O insns. We also limit instrumentation to memory
630 * operations only (which execute after completion) so we don't
631 * double instrument the instruction.
632 */
633 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
634
635 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
636 vaddr pc = log_pc(cpu, tb);
637 if (qemu_log_in_addr_range(pc)) {
638 qemu_log("cpu_io_recompile: rewound execution of TB to %016"
639 VADDR_PRIx "\n", pc);
640 }
641 }
642
643 cpu_loop_exit_noexc(cpu);
644 }
645
646 #else /* CONFIG_USER_ONLY */
647
cpu_interrupt(CPUState * cpu,int mask)648 void cpu_interrupt(CPUState *cpu, int mask)
649 {
650 g_assert(qemu_mutex_iothread_locked());
651 cpu->interrupt_request |= mask;
652 qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
653 }
654
655 #endif /* CONFIG_USER_ONLY */
656
657 /*
658 * Called by generic code at e.g. cpu reset after cpu creation,
659 * therefore we must be prepared to allocate the jump cache.
660 */
tcg_flush_jmp_cache(CPUState * cpu)661 void tcg_flush_jmp_cache(CPUState *cpu)
662 {
663 CPUJumpCache *jc = cpu->tb_jmp_cache;
664
665 /* During early initialization, the cache may not yet be allocated. */
666 if (unlikely(jc == NULL)) {
667 return;
668 }
669
670 for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
671 qatomic_set(&jc->array[i].tb, NULL);
672 }
673 }
674