1244f1441SYang Zhong /*
2244f1441SYang Zhong * Host code generation
3244f1441SYang Zhong *
4244f1441SYang Zhong * Copyright (c) 2003 Fabrice Bellard
5244f1441SYang Zhong *
6244f1441SYang Zhong * This library is free software; you can redistribute it and/or
7244f1441SYang Zhong * modify it under the terms of the GNU Lesser General Public
8244f1441SYang Zhong * License as published by the Free Software Foundation; either
9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version.
10244f1441SYang Zhong *
11244f1441SYang Zhong * This library is distributed in the hope that it will be useful,
12244f1441SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of
13244f1441SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14244f1441SYang Zhong * Lesser General Public License for more details.
15244f1441SYang Zhong *
16244f1441SYang Zhong * You should have received a copy of the GNU Lesser General Public
17244f1441SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18244f1441SYang Zhong */
1914a48c1dSMarkus Armbruster
20244f1441SYang Zhong #include "qemu/osdep.h"
21244f1441SYang Zhong
22244f1441SYang Zhong #include "trace.h"
23244f1441SYang Zhong #include "disas/disas.h"
24244f1441SYang Zhong #include "exec/exec-all.h"
25dcb32f1dSPhilippe Mathieu-Daudé #include "tcg/tcg.h"
26244f1441SYang Zhong #if defined(CONFIG_USER_ONLY)
27244f1441SYang Zhong #include "qemu.h"
28244f1441SYang Zhong #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
29244f1441SYang Zhong #include <sys/param.h>
30244f1441SYang Zhong #if __FreeBSD_version >= 700104
31244f1441SYang Zhong #define HAVE_KINFO_GETVMMAP
32244f1441SYang Zhong #define sigqueue sigqueue_freebsd /* avoid redefinition */
33244f1441SYang Zhong #include <sys/proc.h>
34244f1441SYang Zhong #include <machine/profile.h>
35244f1441SYang Zhong #define _KERNEL
36244f1441SYang Zhong #include <sys/user.h>
37244f1441SYang Zhong #undef _KERNEL
38244f1441SYang Zhong #undef sigqueue
39244f1441SYang Zhong #include <libutil.h>
40244f1441SYang Zhong #endif
41244f1441SYang Zhong #endif
42244f1441SYang Zhong #else
438bca9a03SPaolo Bonzini #include "exec/ram_addr.h"
44244f1441SYang Zhong #endif
45244f1441SYang Zhong
46244f1441SYang Zhong #include "exec/cputlb.h"
473b9bd3f4SPaolo Bonzini #include "exec/translate-all.h"
48306c8721SRichard Henderson #include "exec/translator.h"
49548c9609SAlex Bennée #include "exec/tb-flush.h"
50244f1441SYang Zhong #include "qemu/bitmap.h"
513de2faa9SMarkus Armbruster #include "qemu/qemu-print.h"
52244f1441SYang Zhong #include "qemu/main-loop.h"
53ad768e6fSPeter Maydell #include "qemu/cacheinfo.h"
54533206f0SRichard W.M. Jones #include "qemu/timer.h"
55244f1441SYang Zhong #include "exec/log.h"
56244f1441SYang Zhong #include "sysemu/cpus.h"
57740b1759SClaudio Fontana #include "sysemu/cpu-timers.h"
5814a48c1dSMarkus Armbruster #include "sysemu/tcg.h"
596bc14423SRichard Henderson #include "qapi/error.h"
60d9bcb58aSRichard Henderson #include "hw/core/tcg-cpu-ops.h"
61a976a99aSRichard Henderson #include "tb-jmp-cache.h"
62e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h"
63e5ceadffSPhilippe Mathieu-Daudé #include "tb-context.h"
645934660fSPhilippe Mathieu-Daudé #include "internal-common.h"
654c268d6dSPhilippe Mathieu-Daudé #include "internal-target.h"
66327b75a4SIlya Leoshkevich #include "tcg/perf.h"
67747bd69dSRichard Henderson #include "tcg/insn-start-words.h"
68244f1441SYang Zhong
6944ded3d0SEmilio G. Cota TBContext tb_ctx;
70244f1441SYang Zhong
71c9ad8d27SRichard Henderson /*
72c9ad8d27SRichard Henderson * Encode VAL as a signed leb128 sequence at P.
73c9ad8d27SRichard Henderson * Return P incremented past the encoded value.
74c9ad8d27SRichard Henderson */
encode_sleb128(uint8_t * p,int64_t val)75c9ad8d27SRichard Henderson static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
76244f1441SYang Zhong {
77244f1441SYang Zhong int more, byte;
78244f1441SYang Zhong
79244f1441SYang Zhong do {
80244f1441SYang Zhong byte = val & 0x7f;
81244f1441SYang Zhong val >>= 7;
82244f1441SYang Zhong more = !((val == 0 && (byte & 0x40) == 0)
83244f1441SYang Zhong || (val == -1 && (byte & 0x40) != 0));
84244f1441SYang Zhong if (more) {
85244f1441SYang Zhong byte |= 0x80;
86244f1441SYang Zhong }
87244f1441SYang Zhong *p++ = byte;
88244f1441SYang Zhong } while (more);
89244f1441SYang Zhong
90244f1441SYang Zhong return p;
91244f1441SYang Zhong }
92244f1441SYang Zhong
93c9ad8d27SRichard Henderson /*
94c9ad8d27SRichard Henderson * Decode a signed leb128 sequence at *PP; increment *PP past the
95c9ad8d27SRichard Henderson * decoded value. Return the decoded value.
96c9ad8d27SRichard Henderson */
decode_sleb128(const uint8_t ** pp)97c9ad8d27SRichard Henderson static int64_t decode_sleb128(const uint8_t **pp)
98244f1441SYang Zhong {
99db0c51a3SRichard Henderson const uint8_t *p = *pp;
100c9ad8d27SRichard Henderson int64_t val = 0;
101244f1441SYang Zhong int byte, shift = 0;
102244f1441SYang Zhong
103244f1441SYang Zhong do {
104244f1441SYang Zhong byte = *p++;
105c9ad8d27SRichard Henderson val |= (int64_t)(byte & 0x7f) << shift;
106244f1441SYang Zhong shift += 7;
107244f1441SYang Zhong } while (byte & 0x80);
108244f1441SYang Zhong if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
109c9ad8d27SRichard Henderson val |= -(int64_t)1 << shift;
110244f1441SYang Zhong }
111244f1441SYang Zhong
112244f1441SYang Zhong *pp = p;
113244f1441SYang Zhong return val;
114244f1441SYang Zhong }
115244f1441SYang Zhong
116244f1441SYang Zhong /* Encode the data collected about the instructions while compiling TB.
117244f1441SYang Zhong Place the data at BLOCK, and return the number of bytes consumed.
118244f1441SYang Zhong
11955bbc861SEmilio G. Cota The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
120244f1441SYang Zhong which come from the target's insn_start data, followed by a uintptr_t
121244f1441SYang Zhong which comes from the host pc of the end of the code implementing the insn.
122244f1441SYang Zhong
123244f1441SYang Zhong Each line of the table is encoded as sleb128 deltas from the previous
124e7e168f4SEmilio G. Cota line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
125244f1441SYang Zhong That is, the first column is seeded with the guest pc, the last column
126244f1441SYang Zhong with the host pc, and the middle columns with zeros. */
127244f1441SYang Zhong
encode_search(TranslationBlock * tb,uint8_t * block)128244f1441SYang Zhong static int encode_search(TranslationBlock *tb, uint8_t *block)
129244f1441SYang Zhong {
130b1311c4aSEmilio G. Cota uint8_t *highwater = tcg_ctx->code_gen_highwater;
131747bd69dSRichard Henderson uint64_t *insn_data = tcg_ctx->gen_insn_data;
132747bd69dSRichard Henderson uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
133244f1441SYang Zhong uint8_t *p = block;
134244f1441SYang Zhong int i, j, n;
135244f1441SYang Zhong
136244f1441SYang Zhong for (i = 0, n = tb->icount; i < n; ++i) {
137747bd69dSRichard Henderson uint64_t prev, curr;
138244f1441SYang Zhong
139244f1441SYang Zhong for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
140244f1441SYang Zhong if (i == 0) {
141279513c7SAnton Johansson prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
142244f1441SYang Zhong } else {
143747bd69dSRichard Henderson prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
144244f1441SYang Zhong }
145747bd69dSRichard Henderson curr = insn_data[i * TARGET_INSN_START_WORDS + j];
146747bd69dSRichard Henderson p = encode_sleb128(p, curr - prev);
147244f1441SYang Zhong }
148747bd69dSRichard Henderson prev = (i == 0 ? 0 : insn_end_off[i - 1]);
149747bd69dSRichard Henderson curr = insn_end_off[i];
150747bd69dSRichard Henderson p = encode_sleb128(p, curr - prev);
151244f1441SYang Zhong
152244f1441SYang Zhong /* Test for (pending) buffer overflow. The assumption is that any
153244f1441SYang Zhong one row beginning below the high water mark cannot overrun
154244f1441SYang Zhong the buffer completely. Thus we can test for overflow after
155244f1441SYang Zhong encoding a row without having to check during encoding. */
156244f1441SYang Zhong if (unlikely(p > highwater)) {
157244f1441SYang Zhong return -1;
158244f1441SYang Zhong }
159244f1441SYang Zhong }
160244f1441SYang Zhong
161244f1441SYang Zhong return p - block;
162244f1441SYang Zhong }
163244f1441SYang Zhong
cpu_unwind_data_from_tb(TranslationBlock * tb,uintptr_t host_pc,uint64_t * data)1646392bd6bSRichard Henderson static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
1656392bd6bSRichard Henderson uint64_t *data)
166244f1441SYang Zhong {
1676392bd6bSRichard Henderson uintptr_t iter_pc = (uintptr_t)tb->tc.ptr;
168db0c51a3SRichard Henderson const uint8_t *p = tb->tc.ptr + tb->tc.size;
169244f1441SYang Zhong int i, j, num_insns = tb->icount;
170244f1441SYang Zhong
1716392bd6bSRichard Henderson host_pc -= GETPC_ADJ;
172244f1441SYang Zhong
1736392bd6bSRichard Henderson if (host_pc < iter_pc) {
174244f1441SYang Zhong return -1;
175244f1441SYang Zhong }
176244f1441SYang Zhong
1776392bd6bSRichard Henderson memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
1784be79026SAnton Johansson if (!(tb_cflags(tb) & CF_PCREL)) {
179279513c7SAnton Johansson data[0] = tb->pc;
1808ed558ecSRichard Henderson }
1818ed558ecSRichard Henderson
1826392bd6bSRichard Henderson /*
1836392bd6bSRichard Henderson * Reconstruct the stored insn data while looking for the point
1846392bd6bSRichard Henderson * at which the end of the insn exceeds host_pc.
1856392bd6bSRichard Henderson */
186244f1441SYang Zhong for (i = 0; i < num_insns; ++i) {
187244f1441SYang Zhong for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
188244f1441SYang Zhong data[j] += decode_sleb128(&p);
189244f1441SYang Zhong }
1906392bd6bSRichard Henderson iter_pc += decode_sleb128(&p);
1916392bd6bSRichard Henderson if (iter_pc > host_pc) {
1926392bd6bSRichard Henderson return num_insns - i;
193244f1441SYang Zhong }
194244f1441SYang Zhong }
195244f1441SYang Zhong return -1;
1966392bd6bSRichard Henderson }
197244f1441SYang Zhong
1986392bd6bSRichard Henderson /*
199cfa29dd5SRichard Henderson * The cpu state corresponding to 'host_pc' is restored in
200cfa29dd5SRichard Henderson * preparation for exiting the TB.
2016392bd6bSRichard Henderson */
cpu_restore_state_from_tb(CPUState * cpu,TranslationBlock * tb,uintptr_t host_pc)2026392bd6bSRichard Henderson void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
203cfa29dd5SRichard Henderson uintptr_t host_pc)
2046392bd6bSRichard Henderson {
2056392bd6bSRichard Henderson uint64_t data[TARGET_INSN_START_WORDS];
2066392bd6bSRichard Henderson int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
2076392bd6bSRichard Henderson
2086392bd6bSRichard Henderson if (insns_left < 0) {
2096392bd6bSRichard Henderson return;
2106392bd6bSRichard Henderson }
2116392bd6bSRichard Henderson
212cfa29dd5SRichard Henderson if (tb_cflags(tb) & CF_USE_ICOUNT) {
213740b1759SClaudio Fontana assert(icount_enabled());
2146392bd6bSRichard Henderson /*
2156392bd6bSRichard Henderson * Reset the cycle counter to the start of the block and
2166392bd6bSRichard Henderson * shift if to the number of actually executed instructions.
2176392bd6bSRichard Henderson */
218a953b5faSRichard Henderson cpu->neg.icount_decr.u16.low += insns_left;
219244f1441SYang Zhong }
220d2925689SRichard Henderson
22104f10575SRichard Henderson cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
222244f1441SYang Zhong }
223244f1441SYang Zhong
cpu_restore_state(CPUState * cpu,uintptr_t host_pc)2243d419a4dSRichard Henderson bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
225244f1441SYang Zhong {
2264846cd37SRichard Henderson /*
227db0c51a3SRichard Henderson * The host_pc has to be in the rx region of the code buffer.
2284846cd37SRichard Henderson * If it is not we will not be able to resolve it here.
2294846cd37SRichard Henderson * The two cases where host_pc will not be correct are:
230d25f2a72SAlex Bennée *
231d25f2a72SAlex Bennée * - fault during translation (instruction fetch)
232d25f2a72SAlex Bennée * - fault from helper (not using GETPC() macro)
233d25f2a72SAlex Bennée *
2340ac20318SEmilio G. Cota * Either way we need return early as we can't resolve it here.
235244f1441SYang Zhong */
236db0c51a3SRichard Henderson if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
2374846cd37SRichard Henderson TranslationBlock *tb = tcg_tb_lookup(host_pc);
238244f1441SYang Zhong if (tb) {
239cfa29dd5SRichard Henderson cpu_restore_state_from_tb(cpu, tb, host_pc);
2404846cd37SRichard Henderson return true;
241244f1441SYang Zhong }
242d25f2a72SAlex Bennée }
2434846cd37SRichard Henderson return false;
244244f1441SYang Zhong }
245244f1441SYang Zhong
cpu_unwind_state_data(CPUState * cpu,uintptr_t host_pc,uint64_t * data)2466392bd6bSRichard Henderson bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
2476392bd6bSRichard Henderson {
2486392bd6bSRichard Henderson if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
2496392bd6bSRichard Henderson TranslationBlock *tb = tcg_tb_lookup(host_pc);
2506392bd6bSRichard Henderson if (tb) {
2516392bd6bSRichard Henderson return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0;
2526392bd6bSRichard Henderson }
2536392bd6bSRichard Henderson }
2546392bd6bSRichard Henderson return false;
2556392bd6bSRichard Henderson }
2566392bd6bSRichard Henderson
page_init(void)257fa79cde6SRichard Henderson void page_init(void)
258244f1441SYang Zhong {
259244f1441SYang Zhong page_table_config_init();
260244f1441SYang Zhong }
261244f1441SYang Zhong
262344b63b3SRichard Henderson /*
263344b63b3SRichard Henderson * Isolate the portion of code gen which can setjmp/longjmp.
264344b63b3SRichard Henderson * Return the size of the generated code, or negative on error.
265344b63b3SRichard Henderson */
setjmp_gen_code(CPUArchState * env,TranslationBlock * tb,vaddr pc,void * host_pc,int * max_insns,int64_t * ti)266344b63b3SRichard Henderson static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
267256d11f9SAnton Johansson vaddr pc, void *host_pc,
268344b63b3SRichard Henderson int *max_insns, int64_t *ti)
269344b63b3SRichard Henderson {
270344b63b3SRichard Henderson int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
271344b63b3SRichard Henderson if (unlikely(ret != 0)) {
272344b63b3SRichard Henderson return ret;
273344b63b3SRichard Henderson }
274344b63b3SRichard Henderson
275344b63b3SRichard Henderson tcg_func_start(tcg_ctx);
276344b63b3SRichard Henderson
277344b63b3SRichard Henderson tcg_ctx->cpu = env_cpu(env);
278597f9b2dSRichard Henderson gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
279344b63b3SRichard Henderson assert(tb->size != 0);
280344b63b3SRichard Henderson tcg_ctx->cpu = NULL;
281344b63b3SRichard Henderson *max_insns = tb->icount;
282344b63b3SRichard Henderson
283344b63b3SRichard Henderson return tcg_gen_code(tcg_ctx, tb, pc);
284344b63b3SRichard Henderson }
285344b63b3SRichard Henderson
286244f1441SYang Zhong /* Called with mmap_lock held for user mode emulation. */
tb_gen_code(CPUState * cpu,vaddr pc,uint64_t cs_base,uint32_t flags,int cflags)287244f1441SYang Zhong TranslationBlock *tb_gen_code(CPUState *cpu,
288256d11f9SAnton Johansson vaddr pc, uint64_t cs_base,
289244f1441SYang Zhong uint32_t flags, int cflags)
290244f1441SYang Zhong {
291b77af26eSRichard Henderson CPUArchState *env = cpu_env(cpu);
29295590e24SEmilio G. Cota TranslationBlock *tb, *existing_tb;
293deba7870SRichard Henderson tb_page_addr_t phys_pc, phys_p2;
294244f1441SYang Zhong tcg_insn_unit *gen_code_buf;
2958b86d6d2SRichard Henderson int gen_code_size, search_size, max_insns;
296344b63b3SRichard Henderson int64_t ti;
297306c8721SRichard Henderson void *host_pc;
298fe9b676fSRichard Henderson
299244f1441SYang Zhong assert_memory_lock();
300653b87ebSRoman Bolshakov qemu_thread_jit_write();
301244f1441SYang Zhong
302306c8721SRichard Henderson phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
303244f1441SYang Zhong
3049739e376SPeter Maydell if (phys_pc == -1) {
305873d64acSAlex Bennée /* Generate a one-shot TB with 1 insn in it */
306cf9b5790SRichard Henderson cflags = (cflags & ~CF_COUNT_MASK) | 1;
3079739e376SPeter Maydell }
3089739e376SPeter Maydell
3098b86d6d2SRichard Henderson max_insns = cflags & CF_COUNT_MASK;
3108b86d6d2SRichard Henderson if (max_insns == 0) {
3118b86d6d2SRichard Henderson max_insns = TCG_MAX_INSNS;
3128b86d6d2SRichard Henderson }
31378ff82bbSRichard Henderson QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
31478ff82bbSRichard Henderson
315e8feb96fSEmilio G. Cota buffer_overflow:
316deba7870SRichard Henderson assert_no_pages_locked();
317fe9b676fSRichard Henderson tb = tcg_tb_alloc(tcg_ctx);
318244f1441SYang Zhong if (unlikely(!tb)) {
319244f1441SYang Zhong /* flush must be done */
320244f1441SYang Zhong tb_flush(cpu);
321244f1441SYang Zhong mmap_unlock();
322244f1441SYang Zhong /* Make the execution loop process the flush as soon as possible. */
323244f1441SYang Zhong cpu->exception_index = EXCP_INTERRUPT;
324244f1441SYang Zhong cpu_loop_exit(cpu);
325244f1441SYang Zhong }
326244f1441SYang Zhong
327b1311c4aSEmilio G. Cota gen_code_buf = tcg_ctx->code_gen_ptr;
328db0c51a3SRichard Henderson tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
3294be79026SAnton Johansson if (!(cflags & CF_PCREL)) {
330db7a99cdSPeter Maydell tb->pc = pc;
3314be79026SAnton Johansson }
332244f1441SYang Zhong tb->cs_base = cs_base;
333244f1441SYang Zhong tb->flags = flags;
334244f1441SYang Zhong tb->cflags = cflags;
33528905cfbSRichard Henderson tb_set_page_addr0(tb, phys_pc);
33628905cfbSRichard Henderson tb_set_page_addr1(tb, -1);
337deba7870SRichard Henderson if (phys_pc != -1) {
338deba7870SRichard Henderson tb_lock_page0(phys_pc);
339deba7870SRichard Henderson }
340deba7870SRichard Henderson
341b7e4afbdSRichard Henderson tcg_ctx->gen_tb = tb;
342ff0c61bfSRichard Henderson tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
343aece72b7SRichard Henderson #ifdef CONFIG_SOFTMMU
344aece72b7SRichard Henderson tcg_ctx->page_bits = TARGET_PAGE_BITS;
345aece72b7SRichard Henderson tcg_ctx->page_mask = TARGET_PAGE_MASK;
346a66efde1SRichard Henderson tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
347aece72b7SRichard Henderson #endif
348747bd69dSRichard Henderson tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
34928ea568aSRichard Henderson #ifdef TCG_GUEST_DEFAULT_MO
35028ea568aSRichard Henderson tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
35128ea568aSRichard Henderson #else
35228ea568aSRichard Henderson tcg_ctx->guest_mo = TCG_MO_ALL;
35328ea568aSRichard Henderson #endif
3544baf3978SRichard Henderson
355deba7870SRichard Henderson restart_translate:
356fbf59aadSRichard Henderson trace_translate_block(tb, pc, tb->tc.ptr);
357244f1441SYang Zhong
358344b63b3SRichard Henderson gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
359244f1441SYang Zhong if (unlikely(gen_code_size < 0)) {
3606e6c4efeSRichard Henderson switch (gen_code_size) {
3616e6c4efeSRichard Henderson case -1:
3626e6c4efeSRichard Henderson /*
3636e6c4efeSRichard Henderson * Overflow of code_gen_buffer, or the current slice of it.
3646e6c4efeSRichard Henderson *
3656e6c4efeSRichard Henderson * TODO: We don't need to re-do gen_intermediate_code, nor
3666e6c4efeSRichard Henderson * should we re-do the tcg optimization currently hidden
3676e6c4efeSRichard Henderson * inside tcg_gen_code. All that should be required is to
3686e6c4efeSRichard Henderson * flush the TBs, allocate a new TB, re-initialize it per
3696e6c4efeSRichard Henderson * above, and re-do the actual code generation.
3706e6c4efeSRichard Henderson */
371ae30e866SRichard Henderson qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
372ae30e866SRichard Henderson "Restarting code generation for "
373ae30e866SRichard Henderson "code_gen_buffer overflow\n");
374deba7870SRichard Henderson tb_unlock_pages(tb);
375ad17868eSRichard Henderson tcg_ctx->gen_tb = NULL;
376244f1441SYang Zhong goto buffer_overflow;
3776e6c4efeSRichard Henderson
3786e6c4efeSRichard Henderson case -2:
3796e6c4efeSRichard Henderson /*
3806e6c4efeSRichard Henderson * The code generated for the TranslationBlock is too large.
3816e6c4efeSRichard Henderson * The maximum size allowed by the unwind info is 64k.
3826e6c4efeSRichard Henderson * There may be stricter constraints from relocations
3836e6c4efeSRichard Henderson * in the tcg backend.
3846e6c4efeSRichard Henderson *
3856e6c4efeSRichard Henderson * Try again with half as many insns as we attempted this time.
3866e6c4efeSRichard Henderson * If a single insn overflows, there's a bug somewhere...
3876e6c4efeSRichard Henderson */
3886e6c4efeSRichard Henderson assert(max_insns > 1);
3896e6c4efeSRichard Henderson max_insns /= 2;
390ae30e866SRichard Henderson qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
391ae30e866SRichard Henderson "Restarting code generation with "
392ae30e866SRichard Henderson "smaller translation block (max %d insns)\n",
393ae30e866SRichard Henderson max_insns);
394deba7870SRichard Henderson
395deba7870SRichard Henderson /*
396deba7870SRichard Henderson * The half-sized TB may not cross pages.
397deba7870SRichard Henderson * TODO: Fix all targets that cross pages except with
398deba7870SRichard Henderson * the first insn, at which point this can't be reached.
399deba7870SRichard Henderson */
400deba7870SRichard Henderson phys_p2 = tb_page_addr1(tb);
401deba7870SRichard Henderson if (unlikely(phys_p2 != -1)) {
402deba7870SRichard Henderson tb_unlock_page1(phys_pc, phys_p2);
403deba7870SRichard Henderson tb_set_page_addr1(tb, -1);
404deba7870SRichard Henderson }
405deba7870SRichard Henderson goto restart_translate;
406deba7870SRichard Henderson
407deba7870SRichard Henderson case -3:
408deba7870SRichard Henderson /*
409deba7870SRichard Henderson * We had a page lock ordering problem. In order to avoid
410deba7870SRichard Henderson * deadlock we had to drop the lock on page0, which means
411deba7870SRichard Henderson * that everything we translated so far is compromised.
412deba7870SRichard Henderson * Restart with locks held on both pages.
413deba7870SRichard Henderson */
414deba7870SRichard Henderson qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
415deba7870SRichard Henderson "Restarting code generation with re-locked pages");
416deba7870SRichard Henderson goto restart_translate;
4176e6c4efeSRichard Henderson
4186e6c4efeSRichard Henderson default:
4196e6c4efeSRichard Henderson g_assert_not_reached();
4206e6c4efeSRichard Henderson }
421244f1441SYang Zhong }
422deba7870SRichard Henderson tcg_ctx->gen_tb = NULL;
423deba7870SRichard Henderson
424244f1441SYang Zhong search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
425244f1441SYang Zhong if (unlikely(search_size < 0)) {
426deba7870SRichard Henderson tb_unlock_pages(tb);
427244f1441SYang Zhong goto buffer_overflow;
428244f1441SYang Zhong }
4292ac01d6dSEmilio G. Cota tb->tc.size = gen_code_size;
430244f1441SYang Zhong
4315584e2dbSIlya Leoshkevich /*
4324be79026SAnton Johansson * For CF_PCREL, attribute all executions of the generated code
4334be79026SAnton Johansson * to its first mapping.
4345584e2dbSIlya Leoshkevich */
4355584e2dbSIlya Leoshkevich perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
4365584e2dbSIlya Leoshkevich
437244f1441SYang Zhong if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
438fbf59aadSRichard Henderson qemu_log_in_addr_range(pc)) {
439c60f599bSRichard Henderson FILE *logfile = qemu_log_trylock();
44078b54858SRichard Henderson if (logfile) {
441db0c51a3SRichard Henderson int code_size, data_size;
442db0c51a3SRichard Henderson const tcg_target_ulong *rx_data_gen_ptr;
4434c389f6eSRichard Henderson size_t chunk_start;
4445f0df033SAlex Bennée int insn = 0;
4454c389f6eSRichard Henderson
446b1311c4aSEmilio G. Cota if (tcg_ctx->data_gen_ptr) {
447db0c51a3SRichard Henderson rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
448db0c51a3SRichard Henderson code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
4495f0df033SAlex Bennée data_size = gen_code_size - code_size;
4505f0df033SAlex Bennée } else {
451db0c51a3SRichard Henderson rx_data_gen_ptr = 0;
4525f0df033SAlex Bennée code_size = gen_code_size;
453db0c51a3SRichard Henderson data_size = 0;
4545f0df033SAlex Bennée }
45557a26946SRichard Henderson
4565f0df033SAlex Bennée /* Dump header and the first instruction */
45778b54858SRichard Henderson fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
45878b54858SRichard Henderson fprintf(logfile,
459c9ad8d27SRichard Henderson " -- guest addr 0x%016" PRIx64 " + tb prologue\n",
460747bd69dSRichard Henderson tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
4615f0df033SAlex Bennée chunk_start = tcg_ctx->gen_insn_end_off[insn];
46278b54858SRichard Henderson disas(logfile, tb->tc.ptr, chunk_start);
46357a26946SRichard Henderson
4645f0df033SAlex Bennée /*
4655f0df033SAlex Bennée * Dump each instruction chunk, wrapping up empty chunks into
4665f0df033SAlex Bennée * the next instruction. The whole array is offset so the
4675f0df033SAlex Bennée * first entry is the beginning of the 2nd instruction.
4685f0df033SAlex Bennée */
4694c389f6eSRichard Henderson while (insn < tb->icount) {
4705f0df033SAlex Bennée size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
4715f0df033SAlex Bennée if (chunk_end > chunk_start) {
472c9ad8d27SRichard Henderson fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
473747bd69dSRichard Henderson tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
47478b54858SRichard Henderson disas(logfile, tb->tc.ptr + chunk_start,
47578b54858SRichard Henderson chunk_end - chunk_start);
4765f0df033SAlex Bennée chunk_start = chunk_end;
4775f0df033SAlex Bennée }
4785f0df033SAlex Bennée insn++;
4795f0df033SAlex Bennée }
4805f0df033SAlex Bennée
4814c389f6eSRichard Henderson if (chunk_start < code_size) {
48278b54858SRichard Henderson fprintf(logfile, " -- tb slow paths + alignment\n");
48378b54858SRichard Henderson disas(logfile, tb->tc.ptr + chunk_start,
48478b54858SRichard Henderson code_size - chunk_start);
4854c389f6eSRichard Henderson }
4864c389f6eSRichard Henderson
4875f0df033SAlex Bennée /* Finally dump any data we may have after the block */
4885f0df033SAlex Bennée if (data_size) {
4895f0df033SAlex Bennée int i;
49078b54858SRichard Henderson fprintf(logfile, " data: [size=%d]\n", data_size);
491db0c51a3SRichard Henderson for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
4926c6a4a76SPhilippe Mathieu-Daudé if (sizeof(tcg_target_ulong) == 8) {
49378b54858SRichard Henderson fprintf(logfile,
49478b54858SRichard Henderson "0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
495db0c51a3SRichard Henderson (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
4966c6a4a76SPhilippe Mathieu-Daudé } else if (sizeof(tcg_target_ulong) == 4) {
49778b54858SRichard Henderson fprintf(logfile,
49878b54858SRichard Henderson "0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
4996c6a4a76SPhilippe Mathieu-Daudé (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
5006c6a4a76SPhilippe Mathieu-Daudé } else {
5016c6a4a76SPhilippe Mathieu-Daudé qemu_build_not_reached();
5026c6a4a76SPhilippe Mathieu-Daudé }
50357a26946SRichard Henderson }
50457a26946SRichard Henderson }
50578b54858SRichard Henderson fprintf(logfile, "\n");
506fc59d2d8SRobert Foley qemu_log_unlock(logfile);
507244f1441SYang Zhong }
50878b54858SRichard Henderson }
509244f1441SYang Zhong
510d73415a3SStefan Hajnoczi qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
511244f1441SYang Zhong ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
512e8feb96fSEmilio G. Cota CODE_GEN_ALIGN));
513244f1441SYang Zhong
514244f1441SYang Zhong /* init jump list */
515194125e3SEmilio G. Cota qemu_spin_init(&tb->jmp_lock);
516194125e3SEmilio G. Cota tb->jmp_list_head = (uintptr_t)NULL;
517244f1441SYang Zhong tb->jmp_list_next[0] = (uintptr_t)NULL;
518244f1441SYang Zhong tb->jmp_list_next[1] = (uintptr_t)NULL;
519194125e3SEmilio G. Cota tb->jmp_dest[0] = (uintptr_t)NULL;
520194125e3SEmilio G. Cota tb->jmp_dest[1] = (uintptr_t)NULL;
521244f1441SYang Zhong
522696c7066SStefan Weil /* init original jump addresses which have been set during tcg_gen_code() */
5233a50f424SRichard Henderson if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
524244f1441SYang Zhong tb_reset_jump(tb, 0);
525244f1441SYang Zhong }
5263a50f424SRichard Henderson if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
527244f1441SYang Zhong tb_reset_jump(tb, 1);
528244f1441SYang Zhong }
529244f1441SYang Zhong
530873d64acSAlex Bennée /*
53150627f1bSRichard Henderson * If the TB is not associated with a physical RAM page then it must be
53250627f1bSRichard Henderson * a temporary one-insn TB, and we have nothing left to do. Return early
53350627f1bSRichard Henderson * before attempting to link to other TBs or add to the lookup table.
534873d64acSAlex Bennée */
53528905cfbSRichard Henderson if (tb_page_addr0(tb) == -1) {
536deba7870SRichard Henderson assert_no_pages_locked();
537873d64acSAlex Bennée return tb;
538873d64acSAlex Bennée }
539873d64acSAlex Bennée
540f4cba756SLiren Wei /*
541f4cba756SLiren Wei * Insert TB into the corresponding region tree before publishing it
542f4cba756SLiren Wei * through QHT. Otherwise rewinding happened in the TB might fail to
543f4cba756SLiren Wei * lookup itself using host PC.
544f4cba756SLiren Wei */
545f4cba756SLiren Wei tcg_tb_insert(tb);
546f4cba756SLiren Wei
5470ac20318SEmilio G. Cota /*
5480ac20318SEmilio G. Cota * No explicit memory barrier is required -- tb_link_page() makes the
5490ac20318SEmilio G. Cota * TB visible in a consistent state.
550244f1441SYang Zhong */
551deba7870SRichard Henderson existing_tb = tb_link_page(tb);
552deba7870SRichard Henderson assert_no_pages_locked();
553deba7870SRichard Henderson
55495590e24SEmilio G. Cota /* if the TB already exists, discard what we just translated */
55595590e24SEmilio G. Cota if (unlikely(existing_tb != tb)) {
55695590e24SEmilio G. Cota uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
55795590e24SEmilio G. Cota
55895590e24SEmilio G. Cota orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
559d73415a3SStefan Hajnoczi qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
560f4cba756SLiren Wei tcg_tb_remove(tb);
56195590e24SEmilio G. Cota return existing_tb;
56295590e24SEmilio G. Cota }
563244f1441SYang Zhong return tb;
564244f1441SYang Zhong }
565244f1441SYang Zhong
5660ac20318SEmilio G. Cota /* user-mode: call with mmap_lock held */
tb_check_watchpoint(CPUState * cpu,uintptr_t retaddr)567ae57db63SRichard Henderson void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
568244f1441SYang Zhong {
569244f1441SYang Zhong TranslationBlock *tb;
570244f1441SYang Zhong
5710ac20318SEmilio G. Cota assert_memory_lock();
5720ac20318SEmilio G. Cota
573ae57db63SRichard Henderson tb = tcg_tb_lookup(retaddr);
574244f1441SYang Zhong if (tb) {
575244f1441SYang Zhong /* We can use retranslation to find the PC. */
576cfa29dd5SRichard Henderson cpu_restore_state_from_tb(cpu, tb, retaddr);
577244f1441SYang Zhong tb_phys_invalidate(tb, -1);
578244f1441SYang Zhong } else {
579244f1441SYang Zhong /* The exception probably happened in a helper. The CPU state should
580244f1441SYang Zhong have been saved before calling it. Fetch the PC from there. */
581b77af26eSRichard Henderson CPUArchState *env = cpu_env(cpu);
582bb5de525SAnton Johansson vaddr pc;
583bb5de525SAnton Johansson uint64_t cs_base;
584244f1441SYang Zhong tb_page_addr_t addr;
585244f1441SYang Zhong uint32_t flags;
586244f1441SYang Zhong
587244f1441SYang Zhong cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
588244f1441SYang Zhong addr = get_page_addr_code(env, pc);
589c360a0fdSPeter Maydell if (addr != -1) {
590e506ad6aSRichard Henderson tb_invalidate_phys_range(addr, addr);
591244f1441SYang Zhong }
592244f1441SYang Zhong }
593c360a0fdSPeter Maydell }
594244f1441SYang Zhong
595244f1441SYang Zhong #ifndef CONFIG_USER_ONLY
596cfd405eaSAlex Bennée /*
597cfd405eaSAlex Bennée * In deterministic execution mode, instructions doing device I/Os
598244f1441SYang Zhong * must be at the end of the TB.
599244f1441SYang Zhong *
600244f1441SYang Zhong * Called by softmmu_template.h, with iothread mutex not held.
601244f1441SYang Zhong */
cpu_io_recompile(CPUState * cpu,uintptr_t retaddr)602244f1441SYang Zhong void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
603244f1441SYang Zhong {
604244f1441SYang Zhong TranslationBlock *tb;
605d9bcb58aSRichard Henderson CPUClass *cc;
60687f963beSRichard Henderson uint32_t n;
607244f1441SYang Zhong
608be2cdc5eSEmilio G. Cota tb = tcg_tb_lookup(retaddr);
609244f1441SYang Zhong if (!tb) {
610244f1441SYang Zhong cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
611244f1441SYang Zhong (void *)retaddr);
612244f1441SYang Zhong }
613cfa29dd5SRichard Henderson cpu_restore_state_from_tb(cpu, tb, retaddr);
61487f963beSRichard Henderson
615d9bcb58aSRichard Henderson /*
616d9bcb58aSRichard Henderson * Some guests must re-execute the branch when re-executing a delay
617d9bcb58aSRichard Henderson * slot instruction. When this is the case, adjust icount and N
618d9bcb58aSRichard Henderson * to account for the re-execution of the branch.
619d9bcb58aSRichard Henderson */
62087f963beSRichard Henderson n = 1;
621d9bcb58aSRichard Henderson cc = CPU_GET_CLASS(cpu);
622d9bcb58aSRichard Henderson if (cc->tcg_ops->io_recompile_replay_branch &&
623d9bcb58aSRichard Henderson cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
624a953b5faSRichard Henderson cpu->neg.icount_decr.u16.low++;
625d9bcb58aSRichard Henderson n = 2;
626d9bcb58aSRichard Henderson }
627244f1441SYang Zhong
628cfd405eaSAlex Bennée /*
629cfd405eaSAlex Bennée * Exit the loop and potentially generate a new TB executing the
630cfd405eaSAlex Bennée * just the I/O insns. We also limit instrumentation to memory
631cfd405eaSAlex Bennée * operations only (which execute after completion) so we don't
632cfd405eaSAlex Bennée * double instrument the instruction.
633cfd405eaSAlex Bennée */
634cf9b5790SRichard Henderson cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
6359b990ee5SRichard Henderson
636fbf59aadSRichard Henderson if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
637*dafa0eccSRichard Henderson vaddr pc = cpu->cc->get_pc(cpu);
638fbf59aadSRichard Henderson if (qemu_log_in_addr_range(pc)) {
639e60a7d0dSPeter Maydell qemu_log("cpu_io_recompile: rewound execution of TB to %016"
640256d11f9SAnton Johansson VADDR_PRIx "\n", pc);
641fbf59aadSRichard Henderson }
642fbf59aadSRichard Henderson }
6431d705e8aSPeter Maydell
644244f1441SYang Zhong cpu_loop_exit_noexc(cpu);
645244f1441SYang Zhong }
646244f1441SYang Zhong
647244f1441SYang Zhong #endif /* CONFIG_USER_ONLY */
6482cd53943SThomas Huth
649a976a99aSRichard Henderson /*
650a976a99aSRichard Henderson * Called by generic code at e.g. cpu reset after cpu creation,
651a976a99aSRichard Henderson * therefore we must be prepared to allocate the jump cache.
652a976a99aSRichard Henderson */
tcg_flush_jmp_cache(CPUState * cpu)653a976a99aSRichard Henderson void tcg_flush_jmp_cache(CPUState *cpu)
654a976a99aSRichard Henderson {
655a976a99aSRichard Henderson CPUJumpCache *jc = cpu->tb_jmp_cache;
656a976a99aSRichard Henderson
6574e4fa6c1SRichard Henderson /* During early initialization, the cache may not yet be allocated. */
6584e4fa6c1SRichard Henderson if (unlikely(jc == NULL)) {
6594e4fa6c1SRichard Henderson return;
6604e4fa6c1SRichard Henderson }
6614e4fa6c1SRichard Henderson
662a976a99aSRichard Henderson for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
663a976a99aSRichard Henderson qatomic_set(&jc->array[i].tb, NULL);
664a976a99aSRichard Henderson }
665a976a99aSRichard Henderson }
666