1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2 /*
3 * Definition of TranslationBlock.
4 * Copyright (c) 2003 Fabrice Bellard
5 */
6
7 #ifndef EXEC_TRANSLATION_BLOCK_H
8 #define EXEC_TRANSLATION_BLOCK_H
9
10 #include "qemu/atomic.h"
11 #include "qemu/thread.h"
12 #include "exec/cpu-common.h"
13 #include "exec/vaddr.h"
14 #ifdef CONFIG_USER_ONLY
15 #include "qemu/interval-tree.h"
16 #endif
17
18 /*
19 * Page tracking code uses ram addresses in system mode, and virtual
20 * addresses in userspace mode. Define tb_page_addr_t to be an
21 * appropriate type.
22 */
23 #if defined(CONFIG_USER_ONLY)
24 typedef vaddr tb_page_addr_t;
25 #define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
26 #else
27 typedef ram_addr_t tb_page_addr_t;
28 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
29 #endif
30
31 /*
32 * Translation Cache-related fields of a TB.
33 * This struct exists just for convenience; we keep track of TB's in a binary
34 * search tree, and the only fields needed to compare TB's in the tree are
35 * @ptr and @size.
36 * Note: the address of search data can be obtained by adding @size to @ptr.
37 */
38 struct tb_tc {
39 const void *ptr; /* pointer to the translated code */
40 size_t size;
41 };
42
43 struct TranslationBlock {
44 /*
45 * Guest PC corresponding to this block. This must be the true
46 * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
47 * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
48 * privilege, must store those bits elsewhere.
49 *
50 * If CF_PCREL, the opcodes for the TranslationBlock are written
51 * such that the TB is associated only with the physical page and
52 * may be run in any virtual address context. In this case, PC
53 * must always be taken from ENV in a target-specific manner.
54 * Unwind information is taken as offsets from the page, to be
55 * deposited into the "current" PC.
56 */
57 vaddr pc;
58
59 /*
60 * Target-specific data associated with the TranslationBlock, e.g.:
61 * x86: the original user, the Code Segment virtual base,
62 * arm: an extension of tb->flags,
63 * s390x: instruction data for EXECUTE,
64 * sparc: the next pc of the instruction queue (for delay slots).
65 */
66 uint64_t cs_base;
67
68 uint32_t flags; /* flags defining in which context the code was generated */
69 uint32_t cflags; /* compile flags */
70
71 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
72 #define CF_COUNT_MASK 0x000001ff
73 #define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
74 #define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
75 #define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
76 #define CF_MEMI_ONLY 0x00001000 /* Only instrument memory ops */
77 #define CF_USE_ICOUNT 0x00002000
78 #define CF_INVALID 0x00004000 /* TB is stale. Set with @jmp_lock held */
79 #define CF_PARALLEL 0x00008000 /* Generate code for a parallel context */
80 #define CF_NOIRQ 0x00010000 /* Generate an uninterruptible TB */
81 #define CF_PCREL 0x00020000 /* Opcodes in TB are PC-relative */
82 #define CF_BP_PAGE 0x00040000 /* Breakpoint present in code page */
83 #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
84 #define CF_CLUSTER_SHIFT 24
85
86 /*
87 * Above fields used for comparing
88 */
89
90 /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
91 uint16_t size;
92 uint16_t icount;
93
94 struct tb_tc tc;
95
96 /*
97 * Track tb_page_addr_t intervals that intersect this TB.
98 * For user-only, the virtual addresses are always contiguous,
99 * and we use a unified interval tree. For system, we use a
100 * linked list headed in each PageDesc. Within the list, the lsb
101 * of the previous pointer tells the index of page_next[], and the
102 * list is protected by the PageDesc lock(s).
103 */
104 #ifdef CONFIG_USER_ONLY
105 IntervalTreeNode itree;
106 #else
107 uintptr_t page_next[2];
108 tb_page_addr_t page_addr[2];
109 #endif
110
111 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
112 QemuSpin jmp_lock;
113
114 /* The following data are used to directly call another TB from
115 * the code of this one. This can be done either by emitting direct or
116 * indirect native jump instructions. These jumps are reset so that the TB
117 * just continues its execution. The TB can be linked to another one by
118 * setting one of the jump targets (or patching the jump instruction). Only
119 * two of such jumps are supported.
120 */
121 #define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
122 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
123 uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
124 uintptr_t jmp_target_addr[2]; /* target address */
125
126 /*
127 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
128 * Each TB can have two outgoing jumps, and therefore can participate
129 * in two lists. The list entries are kept in jmp_list_next[2]. The least
130 * significant bit (LSB) of the pointers in these lists is used to encode
131 * which of the two list entries is to be used in the pointed TB.
132 *
133 * List traversals are protected by jmp_lock. The destination TB of each
134 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
135 * can be acquired from any origin TB.
136 *
137 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
138 * being invalidated, so that no further outgoing jumps from it can be set.
139 *
140 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
141 * to a destination TB that has CF_INVALID set.
142 */
143 uintptr_t jmp_list_head;
144 uintptr_t jmp_list_next[2];
145 uintptr_t jmp_dest[2];
146 };
147
148 /* The alignment given to TranslationBlock during allocation. */
149 #define CODE_GEN_ALIGN 16
150
151 /* Hide the qatomic_read to make code a little easier on the eyes */
tb_cflags(const TranslationBlock * tb)152 static inline uint32_t tb_cflags(const TranslationBlock *tb)
153 {
154 return qatomic_read(&tb->cflags);
155 }
156
157 bool tcg_cflags_has(CPUState *cpu, uint32_t flags);
158 void tcg_cflags_set(CPUState *cpu, uint32_t flags);
159
160 #endif /* EXEC_TRANSLATION_BLOCK_H */
161