xref: /openbmc/qemu/include/exec/exec-all.h (revision acb0ef58)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef _EXEC_ALL_H_
21 #define _EXEC_ALL_H_
22 
23 #include "qemu-common.h"
24 
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
26 #define DEBUG_DISAS
27 
28 /* Page tracking code uses ram addresses in system mode, and virtual
29    addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
30    type.  */
31 #if defined(CONFIG_USER_ONLY)
32 typedef abi_ulong tb_page_addr_t;
33 #else
34 typedef ram_addr_t tb_page_addr_t;
35 #endif
36 
37 /* is_jmp field values */
38 #define DISAS_NEXT    0 /* next instruction can be analyzed */
39 #define DISAS_JUMP    1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE  2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
42 
43 struct TranslationBlock;
44 typedef struct TranslationBlock TranslationBlock;
45 
46 /* XXX: make safe guess about sizes */
47 #define MAX_OP_PER_INSTR 266
48 
49 #if HOST_LONG_BITS == 32
50 #define MAX_OPC_PARAM_PER_ARG 2
51 #else
52 #define MAX_OPC_PARAM_PER_ARG 1
53 #endif
54 #define MAX_OPC_PARAM_IARGS 5
55 #define MAX_OPC_PARAM_OARGS 1
56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57 
58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59  * and up to 4 + N parameters on 64-bit archs
60  * (N = number of input arguments + output arguments).  */
61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62 #define OPC_BUF_SIZE 640
63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64 
65 /* Maximum size a TCG op can expand to.  This is complicated because a
66    single op may require several host instructions and register reloads.
67    For now take a wild guess at 192 bytes, which should allow at least
68    a couple of fixup instructions per argument.  */
69 #define TCG_MAX_OP_SIZE 192
70 
71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
72 
73 #include "qemu/log.h"
74 
75 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76 void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
78                           int pc_pos);
79 
80 void cpu_gen_init(void);
81 int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
82                  int *gen_code_size_ptr);
83 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
84 void page_size_init(void);
85 
86 void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
87 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
88 TranslationBlock *tb_gen_code(CPUState *cpu,
89                               target_ulong pc, target_ulong cs_base, int flags,
90                               int cflags);
91 void cpu_exec_init(CPUArchState *env);
92 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
93 int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
94 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
95                                    int is_cpu_write_access);
96 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
97                               int is_cpu_write_access);
98 #if !defined(CONFIG_USER_ONLY)
99 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
100 /* cputlb.c */
101 void tlb_flush_page(CPUState *cpu, target_ulong addr);
102 void tlb_flush(CPUState *cpu, int flush_global);
103 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
104                   hwaddr paddr, int prot,
105                   int mmu_idx, target_ulong size);
106 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
107 #else
108 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
109 {
110 }
111 
112 static inline void tlb_flush(CPUState *cpu, int flush_global)
113 {
114 }
115 #endif
116 
117 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
118 
119 #define CODE_GEN_PHYS_HASH_BITS     15
120 #define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
121 
122 /* estimated block size for TB allocation */
123 /* XXX: use a per code average code fragment size and modulate it
124    according to the host CPU */
125 #if defined(CONFIG_SOFTMMU)
126 #define CODE_GEN_AVG_BLOCK_SIZE 128
127 #else
128 #define CODE_GEN_AVG_BLOCK_SIZE 64
129 #endif
130 
131 #if defined(__arm__) || defined(_ARCH_PPC) \
132     || defined(__x86_64__) || defined(__i386__) \
133     || defined(__sparc__) || defined(__aarch64__) \
134     || defined(__s390x__) || defined(__mips__) \
135     || defined(CONFIG_TCG_INTERPRETER)
136 #define USE_DIRECT_JUMP
137 #endif
138 
139 struct TranslationBlock {
140     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
141     target_ulong cs_base; /* CS base for this block */
142     uint64_t flags; /* flags defining in which context the code was generated */
143     uint16_t size;      /* size of target code for this block (1 <=
144                            size <= TARGET_PAGE_SIZE) */
145     uint16_t cflags;    /* compile flags */
146 #define CF_COUNT_MASK  0x7fff
147 #define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
148 
149     void *tc_ptr;    /* pointer to the translated code */
150     /* next matching tb for physical address. */
151     struct TranslationBlock *phys_hash_next;
152     /* first and second physical page containing code. The lower bit
153        of the pointer tells the index in page_next[] */
154     struct TranslationBlock *page_next[2];
155     tb_page_addr_t page_addr[2];
156 
157     /* the following data are used to directly call another TB from
158        the code of this one. */
159     uint16_t tb_next_offset[2]; /* offset of original jump target */
160 #ifdef USE_DIRECT_JUMP
161     uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
162 #else
163     uintptr_t tb_next[2]; /* address of jump generated code */
164 #endif
165     /* list of TBs jumping to this one. This is a circular list using
166        the two least significant bits of the pointers to tell what is
167        the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
168        jmp_first */
169     struct TranslationBlock *jmp_next[2];
170     struct TranslationBlock *jmp_first;
171     uint32_t icount;
172 };
173 
174 #include "exec/spinlock.h"
175 
176 typedef struct TBContext TBContext;
177 
178 struct TBContext {
179 
180     TranslationBlock *tbs;
181     TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
182     int nb_tbs;
183     /* any access to the tbs or the page table must use this lock */
184     spinlock_t tb_lock;
185 
186     /* statistics */
187     int tb_flush_count;
188     int tb_phys_invalidate_count;
189 
190     int tb_invalidated_flag;
191 };
192 
193 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
194 {
195     target_ulong tmp;
196     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
197     return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
198 }
199 
200 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
201 {
202     target_ulong tmp;
203     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
204     return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
205 	    | (tmp & TB_JMP_ADDR_MASK));
206 }
207 
208 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
209 {
210     return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
211 }
212 
213 void tb_free(TranslationBlock *tb);
214 void tb_flush(CPUArchState *env);
215 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
216 
217 #if defined(USE_DIRECT_JUMP)
218 
219 #if defined(CONFIG_TCG_INTERPRETER)
220 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
221 {
222     /* patch the branch destination */
223     *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
224     /* no need to flush icache explicitly */
225 }
226 #elif defined(_ARCH_PPC)
227 void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
228 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
229 #elif defined(__i386__) || defined(__x86_64__)
230 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
231 {
232     /* patch the branch destination */
233     stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
234     /* no need to flush icache explicitly */
235 }
236 #elif defined(__s390x__)
237 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
238 {
239     /* patch the branch destination */
240     intptr_t disp = addr - (jmp_addr - 2);
241     stl_be_p((void*)jmp_addr, disp / 2);
242     /* no need to flush icache explicitly */
243 }
244 #elif defined(__aarch64__)
245 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
246 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target
247 #elif defined(__arm__)
248 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
249 {
250 #if !QEMU_GNUC_PREREQ(4, 1)
251     register unsigned long _beg __asm ("a1");
252     register unsigned long _end __asm ("a2");
253     register unsigned long _flg __asm ("a3");
254 #endif
255 
256     /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
257     *(uint32_t *)jmp_addr =
258         (*(uint32_t *)jmp_addr & ~0xffffff)
259         | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
260 
261 #if QEMU_GNUC_PREREQ(4, 1)
262     __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
263 #else
264     /* flush icache */
265     _beg = jmp_addr;
266     _end = jmp_addr + 4;
267     _flg = 0;
268     __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
269 #endif
270 }
271 #elif defined(__sparc__) || defined(__mips__)
272 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
273 #else
274 #error tb_set_jmp_target1 is missing
275 #endif
276 
277 static inline void tb_set_jmp_target(TranslationBlock *tb,
278                                      int n, uintptr_t addr)
279 {
280     uint16_t offset = tb->tb_jmp_offset[n];
281     tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
282 }
283 
284 #else
285 
286 /* set the jump target */
287 static inline void tb_set_jmp_target(TranslationBlock *tb,
288                                      int n, uintptr_t addr)
289 {
290     tb->tb_next[n] = addr;
291 }
292 
293 #endif
294 
295 static inline void tb_add_jump(TranslationBlock *tb, int n,
296                                TranslationBlock *tb_next)
297 {
298     /* NOTE: this test is only needed for thread safety */
299     if (!tb->jmp_next[n]) {
300         /* patch the native jump address */
301         tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
302 
303         /* add in TB jmp circular list */
304         tb->jmp_next[n] = tb_next->jmp_first;
305         tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
306     }
307 }
308 
309 /* GETRA is the true target of the return instruction that we'll execute,
310    defined here for simplicity of defining the follow-up macros.  */
311 #if defined(CONFIG_TCG_INTERPRETER)
312 extern uintptr_t tci_tb_ptr;
313 # define GETRA() tci_tb_ptr
314 #else
315 # define GETRA() \
316     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
317 #endif
318 
319 /* The true return address will often point to a host insn that is part of
320    the next translated guest insn.  Adjust the address backward to point to
321    the middle of the call insn.  Subtracting one would do the job except for
322    several compressed mode architectures (arm, mips) which set the low bit
323    to indicate the compressed mode; subtracting two works around that.  It
324    is also the case that there are no host isas that contain a call insn
325    smaller than 4 bytes, so we don't worry about special-casing this.  */
326 #if defined(CONFIG_TCG_INTERPRETER)
327 # define GETPC_ADJ   0
328 #else
329 # define GETPC_ADJ   2
330 #endif
331 
332 #define GETPC()  (GETRA() - GETPC_ADJ)
333 
334 #if !defined(CONFIG_USER_ONLY)
335 
336 void phys_mem_set_alloc(void *(*alloc)(size_t));
337 
338 struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
339 bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
340                  uint64_t *pvalue, unsigned size);
341 bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
342                   uint64_t value, unsigned size);
343 
344 void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
345               uintptr_t retaddr);
346 
347 #endif
348 
349 #if defined(CONFIG_USER_ONLY)
350 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
351 {
352     return addr;
353 }
354 #else
355 /* cputlb.c */
356 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
357 #endif
358 
359 typedef void (CPUDebugExcpHandler)(CPUArchState *env);
360 
361 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
362 
363 /* vl.c */
364 extern int singlestep;
365 
366 /* cpu-exec.c */
367 extern volatile sig_atomic_t exit_request;
368 
369 /**
370  * cpu_can_do_io:
371  * @cpu: The CPU for which to check IO.
372  *
373  * Deterministic execution requires that IO only be performed on the last
374  * instruction of a TB so that interrupts take effect immediately.
375  *
376  * Returns: %true if memory-mapped IO is safe, %false otherwise.
377  */
378 static inline bool cpu_can_do_io(CPUState *cpu)
379 {
380     if (!use_icount) {
381         return true;
382     }
383     /* If not executing code then assume we are ok.  */
384     if (cpu->current_tb == NULL) {
385         return true;
386     }
387     return cpu->can_do_io != 0;
388 }
389 
390 #endif
391