xref: /openbmc/qemu/include/exec/exec-all.h (revision f4427280)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef _EXEC_ALL_H_
21 #define _EXEC_ALL_H_
22 
23 #include "qemu-common.h"
24 
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
26 #define DEBUG_DISAS
27 
28 /* Page tracking code uses ram addresses in system mode, and virtual
29    addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
30    type.  */
31 #if defined(CONFIG_USER_ONLY)
32 typedef abi_ulong tb_page_addr_t;
33 #else
34 typedef ram_addr_t tb_page_addr_t;
35 #endif
36 
37 /* is_jmp field values */
38 #define DISAS_NEXT    0 /* next instruction can be analyzed */
39 #define DISAS_JUMP    1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE  2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
42 
43 struct TranslationBlock;
44 typedef struct TranslationBlock TranslationBlock;
45 
46 /* XXX: make safe guess about sizes */
47 #define MAX_OP_PER_INSTR 266
48 
49 #if HOST_LONG_BITS == 32
50 #define MAX_OPC_PARAM_PER_ARG 2
51 #else
52 #define MAX_OPC_PARAM_PER_ARG 1
53 #endif
54 #define MAX_OPC_PARAM_IARGS 5
55 #define MAX_OPC_PARAM_OARGS 1
56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57 
58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59  * and up to 4 + N parameters on 64-bit archs
60  * (N = number of input arguments + output arguments).  */
61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62 #define OPC_BUF_SIZE 640
63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64 
65 /* Maximum size a TCG op can expand to.  This is complicated because a
66    single op may require several host instructions and register reloads.
67    For now take a wild guess at 192 bytes, which should allow at least
68    a couple of fixup instructions per argument.  */
69 #define TCG_MAX_OP_SIZE 192
70 
71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
72 
73 #include "qemu/log.h"
74 
75 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76 void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
78                           int pc_pos);
79 
80 void cpu_gen_init(void);
81 int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
82                  int *gen_code_size_ptr);
83 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
84 void page_size_init(void);
85 
86 void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
87 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
88 TranslationBlock *tb_gen_code(CPUState *cpu,
89                               target_ulong pc, target_ulong cs_base, int flags,
90                               int cflags);
91 void cpu_exec_init(CPUState *cpu, Error **errp);
92 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
93 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
94 
95 #if !defined(CONFIG_USER_ONLY)
96 bool qemu_in_vcpu_thread(void);
97 void cpu_reload_memory_map(CPUState *cpu);
98 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
99 /* cputlb.c */
100 /**
101  * tlb_flush_page:
102  * @cpu: CPU whose TLB should be flushed
103  * @addr: virtual address of page to be flushed
104  *
105  * Flush one page from the TLB of the specified CPU, for all
106  * MMU indexes.
107  */
108 void tlb_flush_page(CPUState *cpu, target_ulong addr);
109 /**
110  * tlb_flush:
111  * @cpu: CPU whose TLB should be flushed
112  * @flush_global: ignored
113  *
114  * Flush the entire TLB for the specified CPU.
115  * The flush_global flag is in theory an indicator of whether the whole
116  * TLB should be flushed, or only those entries not marked global.
117  * In practice QEMU does not implement any global/not global flag for
118  * TLB entries, and the argument is ignored.
119  */
120 void tlb_flush(CPUState *cpu, int flush_global);
121 /**
122  * tlb_flush_page_by_mmuidx:
123  * @cpu: CPU whose TLB should be flushed
124  * @addr: virtual address of page to be flushed
125  * @...: list of MMU indexes to flush, terminated by a negative value
126  *
127  * Flush one page from the TLB of the specified CPU, for the specified
128  * MMU indexes.
129  */
130 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
131 /**
132  * tlb_flush_by_mmuidx:
133  * @cpu: CPU whose TLB should be flushed
134  * @...: list of MMU indexes to flush, terminated by a negative value
135  *
136  * Flush all entries from the TLB of the specified CPU, for the specified
137  * MMU indexes.
138  */
139 void tlb_flush_by_mmuidx(CPUState *cpu, ...);
140 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
141                   hwaddr paddr, int prot,
142                   int mmu_idx, target_ulong size);
143 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
144                              hwaddr paddr, MemTxAttrs attrs,
145                              int prot, int mmu_idx, target_ulong size);
146 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
147 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
148                  uintptr_t retaddr);
149 #else
150 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
151 {
152 }
153 
154 static inline void tlb_flush(CPUState *cpu, int flush_global)
155 {
156 }
157 
158 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
159                                             target_ulong addr, ...)
160 {
161 }
162 
163 static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
164 {
165 }
166 #endif
167 
168 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
169 
170 #define CODE_GEN_PHYS_HASH_BITS     15
171 #define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
172 
173 /* estimated block size for TB allocation */
174 /* XXX: use a per code average code fragment size and modulate it
175    according to the host CPU */
176 #if defined(CONFIG_SOFTMMU)
177 #define CODE_GEN_AVG_BLOCK_SIZE 128
178 #else
179 #define CODE_GEN_AVG_BLOCK_SIZE 64
180 #endif
181 
182 #if defined(__arm__) || defined(_ARCH_PPC) \
183     || defined(__x86_64__) || defined(__i386__) \
184     || defined(__sparc__) || defined(__aarch64__) \
185     || defined(__s390x__) || defined(__mips__) \
186     || defined(CONFIG_TCG_INTERPRETER)
187 #define USE_DIRECT_JUMP
188 #endif
189 
190 struct TranslationBlock {
191     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
192     target_ulong cs_base; /* CS base for this block */
193     uint64_t flags; /* flags defining in which context the code was generated */
194     uint16_t size;      /* size of target code for this block (1 <=
195                            size <= TARGET_PAGE_SIZE) */
196     uint16_t icount;
197     uint32_t cflags;    /* compile flags */
198 #define CF_COUNT_MASK  0x7fff
199 #define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
200 #define CF_NOCACHE     0x10000 /* To be freed after execution */
201 #define CF_USE_ICOUNT  0x20000
202 
203     void *tc_ptr;    /* pointer to the translated code */
204     /* next matching tb for physical address. */
205     struct TranslationBlock *phys_hash_next;
206     /* original tb when cflags has CF_NOCACHE */
207     struct TranslationBlock *orig_tb;
208     /* first and second physical page containing code. The lower bit
209        of the pointer tells the index in page_next[] */
210     struct TranslationBlock *page_next[2];
211     tb_page_addr_t page_addr[2];
212 
213     /* the following data are used to directly call another TB from
214        the code of this one. */
215     uint16_t tb_next_offset[2]; /* offset of original jump target */
216 #ifdef USE_DIRECT_JUMP
217     uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
218 #else
219     uintptr_t tb_next[2]; /* address of jump generated code */
220 #endif
221     /* list of TBs jumping to this one. This is a circular list using
222        the two least significant bits of the pointers to tell what is
223        the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
224        jmp_first */
225     struct TranslationBlock *jmp_next[2];
226     struct TranslationBlock *jmp_first;
227 };
228 
229 #include "exec/spinlock.h"
230 
231 typedef struct TBContext TBContext;
232 
233 struct TBContext {
234 
235     TranslationBlock *tbs;
236     TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
237     int nb_tbs;
238     /* any access to the tbs or the page table must use this lock */
239     spinlock_t tb_lock;
240 
241     /* statistics */
242     int tb_flush_count;
243     int tb_phys_invalidate_count;
244 
245     int tb_invalidated_flag;
246 };
247 
248 void tb_free(TranslationBlock *tb);
249 void tb_flush(CPUState *cpu);
250 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
251 
252 #if defined(USE_DIRECT_JUMP)
253 
254 #if defined(CONFIG_TCG_INTERPRETER)
255 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
256 {
257     /* patch the branch destination */
258     *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
259     /* no need to flush icache explicitly */
260 }
261 #elif defined(_ARCH_PPC)
262 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
263 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
264 #elif defined(__i386__) || defined(__x86_64__)
265 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
266 {
267     /* patch the branch destination */
268     stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
269     /* no need to flush icache explicitly */
270 }
271 #elif defined(__s390x__)
272 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
273 {
274     /* patch the branch destination */
275     intptr_t disp = addr - (jmp_addr - 2);
276     stl_be_p((void*)jmp_addr, disp / 2);
277     /* no need to flush icache explicitly */
278 }
279 #elif defined(__aarch64__)
280 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
281 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target
282 #elif defined(__arm__)
283 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
284 {
285 #if !QEMU_GNUC_PREREQ(4, 1)
286     register unsigned long _beg __asm ("a1");
287     register unsigned long _end __asm ("a2");
288     register unsigned long _flg __asm ("a3");
289 #endif
290 
291     /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
292     *(uint32_t *)jmp_addr =
293         (*(uint32_t *)jmp_addr & ~0xffffff)
294         | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
295 
296 #if QEMU_GNUC_PREREQ(4, 1)
297     __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
298 #else
299     /* flush icache */
300     _beg = jmp_addr;
301     _end = jmp_addr + 4;
302     _flg = 0;
303     __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
304 #endif
305 }
306 #elif defined(__sparc__) || defined(__mips__)
307 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
308 #else
309 #error tb_set_jmp_target1 is missing
310 #endif
311 
312 static inline void tb_set_jmp_target(TranslationBlock *tb,
313                                      int n, uintptr_t addr)
314 {
315     uint16_t offset = tb->tb_jmp_offset[n];
316     tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
317 }
318 
319 #else
320 
321 /* set the jump target */
322 static inline void tb_set_jmp_target(TranslationBlock *tb,
323                                      int n, uintptr_t addr)
324 {
325     tb->tb_next[n] = addr;
326 }
327 
328 #endif
329 
330 static inline void tb_add_jump(TranslationBlock *tb, int n,
331                                TranslationBlock *tb_next)
332 {
333     /* NOTE: this test is only needed for thread safety */
334     if (!tb->jmp_next[n]) {
335         /* patch the native jump address */
336         tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
337 
338         /* add in TB jmp circular list */
339         tb->jmp_next[n] = tb_next->jmp_first;
340         tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
341     }
342 }
343 
344 /* GETRA is the true target of the return instruction that we'll execute,
345    defined here for simplicity of defining the follow-up macros.  */
346 #if defined(CONFIG_TCG_INTERPRETER)
347 extern uintptr_t tci_tb_ptr;
348 # define GETRA() tci_tb_ptr
349 #else
350 # define GETRA() \
351     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
352 #endif
353 
354 /* The true return address will often point to a host insn that is part of
355    the next translated guest insn.  Adjust the address backward to point to
356    the middle of the call insn.  Subtracting one would do the job except for
357    several compressed mode architectures (arm, mips) which set the low bit
358    to indicate the compressed mode; subtracting two works around that.  It
359    is also the case that there are no host isas that contain a call insn
360    smaller than 4 bytes, so we don't worry about special-casing this.  */
361 #define GETPC_ADJ   2
362 
363 #define GETPC()  (GETRA() - GETPC_ADJ)
364 
365 #if !defined(CONFIG_USER_ONLY)
366 
367 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
368 
369 struct MemoryRegion *iotlb_to_region(CPUState *cpu,
370                                      hwaddr index);
371 
372 void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
373               uintptr_t retaddr);
374 
375 #endif
376 
377 #if defined(CONFIG_USER_ONLY)
378 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
379 {
380     return addr;
381 }
382 #else
383 /* cputlb.c */
384 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
385 #endif
386 
387 /* vl.c */
388 extern int singlestep;
389 
390 /* cpu-exec.c */
391 extern volatile sig_atomic_t exit_request;
392 
393 #if !defined(CONFIG_USER_ONLY)
394 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
395 #endif
396 #endif
397