1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2 /*
3  * Definition of TranslationBlock.
4  *  Copyright (c) 2003 Fabrice Bellard
5  */
6 
7 #ifndef EXEC_TRANSLATION_BLOCK_H
8 #define EXEC_TRANSLATION_BLOCK_H
9 
10 #include "qemu/thread.h"
11 #include "exec/cpu-common.h"
12 #ifdef CONFIG_USER_ONLY
13 #include "qemu/interval-tree.h"
14 #endif
15 
16 /*
17  * Page tracking code uses ram addresses in system mode, and virtual
18  * addresses in userspace mode.  Define tb_page_addr_t to be an
19  * appropriate type.
20  */
21 #if defined(CONFIG_USER_ONLY)
22 typedef vaddr tb_page_addr_t;
23 #define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
24 #else
25 typedef ram_addr_t tb_page_addr_t;
26 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
27 #endif
28 
29 /*
30  * Translation Cache-related fields of a TB.
31  * This struct exists just for convenience; we keep track of TB's in a binary
32  * search tree, and the only fields needed to compare TB's in the tree are
33  * @ptr and @size.
34  * Note: the address of search data can be obtained by adding @size to @ptr.
35  */
36 struct tb_tc {
37     const void *ptr;    /* pointer to the translated code */
38     size_t size;
39 };
40 
41 struct TranslationBlock {
42     /*
43      * Guest PC corresponding to this block.  This must be the true
44      * virtual address.  Therefore e.g. x86 stores EIP + CS_BASE, and
45      * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
46      * privilege, must store those bits elsewhere.
47      *
48      * If CF_PCREL, the opcodes for the TranslationBlock are written
49      * such that the TB is associated only with the physical page and
50      * may be run in any virtual address context.  In this case, PC
51      * must always be taken from ENV in a target-specific manner.
52      * Unwind information is taken as offsets from the page, to be
53      * deposited into the "current" PC.
54      */
55     vaddr pc;
56 
57     /*
58      * Target-specific data associated with the TranslationBlock, e.g.:
59      * x86: the original user, the Code Segment virtual base,
60      * arm: an extension of tb->flags,
61      * s390x: instruction data for EXECUTE,
62      * sparc: the next pc of the instruction queue (for delay slots).
63      */
64     uint64_t cs_base;
65 
66     uint32_t flags; /* flags defining in which context the code was generated */
67     uint32_t cflags;    /* compile flags */
68 
69 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
70 #define CF_COUNT_MASK    0x000001ff
71 #define CF_NO_GOTO_TB    0x00000200 /* Do not chain with goto_tb */
72 #define CF_NO_GOTO_PTR   0x00000400 /* Do not chain with goto_ptr */
73 #define CF_SINGLE_STEP   0x00000800 /* gdbstub single-step in effect */
74 #define CF_MEMI_ONLY     0x00001000 /* Only instrument memory ops */
75 #define CF_USE_ICOUNT    0x00002000
76 #define CF_INVALID       0x00004000 /* TB is stale. Set with @jmp_lock held */
77 #define CF_PARALLEL      0x00008000 /* Generate code for a parallel context */
78 #define CF_NOIRQ         0x00010000 /* Generate an uninterruptible TB */
79 #define CF_PCREL         0x00020000 /* Opcodes in TB are PC-relative */
80 #define CF_BP_PAGE       0x00040000 /* Breakpoint present in code page */
81 #define CF_CLUSTER_MASK  0xff000000 /* Top 8 bits are cluster ID */
82 #define CF_CLUSTER_SHIFT 24
83 
84     /*
85      * Above fields used for comparing
86      */
87 
88     /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
89     uint16_t size;
90     uint16_t icount;
91 
92     struct tb_tc tc;
93 
94     /*
95      * Track tb_page_addr_t intervals that intersect this TB.
96      * For user-only, the virtual addresses are always contiguous,
97      * and we use a unified interval tree.  For system, we use a
98      * linked list headed in each PageDesc.  Within the list, the lsb
99      * of the previous pointer tells the index of page_next[], and the
100      * list is protected by the PageDesc lock(s).
101      */
102 #ifdef CONFIG_USER_ONLY
103     IntervalTreeNode itree;
104 #else
105     uintptr_t page_next[2];
106     tb_page_addr_t page_addr[2];
107 #endif
108 
109     /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
110     QemuSpin jmp_lock;
111 
112     /* The following data are used to directly call another TB from
113      * the code of this one. This can be done either by emitting direct or
114      * indirect native jump instructions. These jumps are reset so that the TB
115      * just continues its execution. The TB can be linked to another one by
116      * setting one of the jump targets (or patching the jump instruction). Only
117      * two of such jumps are supported.
118      */
119 #define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
120     uint16_t jmp_reset_offset[2]; /* offset of original jump target */
121     uint16_t jmp_insn_offset[2];  /* offset of direct jump insn */
122     uintptr_t jmp_target_addr[2]; /* target address */
123 
124     /*
125      * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
126      * Each TB can have two outgoing jumps, and therefore can participate
127      * in two lists. The list entries are kept in jmp_list_next[2]. The least
128      * significant bit (LSB) of the pointers in these lists is used to encode
129      * which of the two list entries is to be used in the pointed TB.
130      *
131      * List traversals are protected by jmp_lock. The destination TB of each
132      * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
133      * can be acquired from any origin TB.
134      *
135      * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
136      * being invalidated, so that no further outgoing jumps from it can be set.
137      *
138      * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
139      * to a destination TB that has CF_INVALID set.
140      */
141     uintptr_t jmp_list_head;
142     uintptr_t jmp_list_next[2];
143     uintptr_t jmp_dest[2];
144 };
145 
146 /* The alignment given to TranslationBlock during allocation. */
147 #define CODE_GEN_ALIGN  16
148 
149 /* Hide the qatomic_read to make code a little easier on the eyes */
tb_cflags(const TranslationBlock * tb)150 static inline uint32_t tb_cflags(const TranslationBlock *tb)
151 {
152     return qatomic_read(&tb->cflags);
153 }
154 
155 #endif /* EXEC_TRANSLATION_BLOCK_H */
156