xref: /openbmc/qemu/include/exec/exec-all.h (revision 77cbb28a)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef _EXEC_ALL_H_
21 #define _EXEC_ALL_H_
22 
23 #include "qemu-common.h"
24 
25 /* allow to see translation results - the slowdown should be negligible, so we leave it */
26 #define DEBUG_DISAS
27 
28 /* Page tracking code uses ram addresses in system mode, and virtual
29    addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
30    type.  */
31 #if defined(CONFIG_USER_ONLY)
32 typedef abi_ulong tb_page_addr_t;
33 #else
34 typedef ram_addr_t tb_page_addr_t;
35 #endif
36 
37 /* is_jmp field values */
38 #define DISAS_NEXT    0 /* next instruction can be analyzed */
39 #define DISAS_JUMP    1 /* only pc was modified dynamically */
40 #define DISAS_UPDATE  2 /* cpu state was modified dynamically */
41 #define DISAS_TB_JUMP 3 /* only pc was modified statically */
42 
43 struct TranslationBlock;
44 typedef struct TranslationBlock TranslationBlock;
45 
46 /* XXX: make safe guess about sizes */
47 #define MAX_OP_PER_INSTR 208
48 
49 #if HOST_LONG_BITS == 32
50 #define MAX_OPC_PARAM_PER_ARG 2
51 #else
52 #define MAX_OPC_PARAM_PER_ARG 1
53 #endif
54 #define MAX_OPC_PARAM_IARGS 5
55 #define MAX_OPC_PARAM_OARGS 1
56 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57 
58 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59  * and up to 4 + N parameters on 64-bit archs
60  * (N = number of input arguments + output arguments).  */
61 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
62 #define OPC_BUF_SIZE 640
63 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64 
65 /* Maximum size a TCG op can expand to.  This is complicated because a
66    single op may require several host instructions and register reloads.
67    For now take a wild guess at 192 bytes, which should allow at least
68    a couple of fixup instructions per argument.  */
69 #define TCG_MAX_OP_SIZE 192
70 
71 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
72 
73 #include "qemu/log.h"
74 
75 void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76 void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
78                           int pc_pos);
79 
80 void cpu_gen_init(void);
81 int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
82                  int *gen_code_size_ptr);
83 bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
84 
85 void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
86 void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
87 TranslationBlock *tb_gen_code(CPUArchState *env,
88                               target_ulong pc, target_ulong cs_base, int flags,
89                               int cflags);
90 void cpu_exec_init(CPUArchState *env);
91 void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
92 int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
93 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
94                                    int is_cpu_write_access);
95 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
96                               int is_cpu_write_access);
97 #if !defined(CONFIG_USER_ONLY)
98 /* cputlb.c */
99 void tlb_flush_page(CPUArchState *env, target_ulong addr);
100 void tlb_flush(CPUArchState *env, int flush_global);
101 void tlb_set_page(CPUArchState *env, target_ulong vaddr,
102                   hwaddr paddr, int prot,
103                   int mmu_idx, target_ulong size);
104 void tb_invalidate_phys_addr(hwaddr addr);
105 #else
106 static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
107 {
108 }
109 
110 static inline void tlb_flush(CPUArchState *env, int flush_global)
111 {
112 }
113 #endif
114 
115 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
116 
117 #define CODE_GEN_PHYS_HASH_BITS     15
118 #define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
119 
120 /* estimated block size for TB allocation */
121 /* XXX: use a per code average code fragment size and modulate it
122    according to the host CPU */
123 #if defined(CONFIG_SOFTMMU)
124 #define CODE_GEN_AVG_BLOCK_SIZE 128
125 #else
126 #define CODE_GEN_AVG_BLOCK_SIZE 64
127 #endif
128 
129 #if defined(__arm__) || defined(_ARCH_PPC) \
130     || defined(__x86_64__) || defined(__i386__) \
131     || defined(__sparc__) || defined(__aarch64__) \
132     || defined(CONFIG_TCG_INTERPRETER)
133 #define USE_DIRECT_JUMP
134 #endif
135 
136 struct TranslationBlock {
137     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
138     target_ulong cs_base; /* CS base for this block */
139     uint64_t flags; /* flags defining in which context the code was generated */
140     uint16_t size;      /* size of target code for this block (1 <=
141                            size <= TARGET_PAGE_SIZE) */
142     uint16_t cflags;    /* compile flags */
143 #define CF_COUNT_MASK  0x7fff
144 #define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
145 
146     uint8_t *tc_ptr;    /* pointer to the translated code */
147     /* next matching tb for physical address. */
148     struct TranslationBlock *phys_hash_next;
149     /* first and second physical page containing code. The lower bit
150        of the pointer tells the index in page_next[] */
151     struct TranslationBlock *page_next[2];
152     tb_page_addr_t page_addr[2];
153 
154     /* the following data are used to directly call another TB from
155        the code of this one. */
156     uint16_t tb_next_offset[2]; /* offset of original jump target */
157 #ifdef USE_DIRECT_JUMP
158     uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
159 #else
160     uintptr_t tb_next[2]; /* address of jump generated code */
161 #endif
162     /* list of TBs jumping to this one. This is a circular list using
163        the two least significant bits of the pointers to tell what is
164        the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
165        jmp_first */
166     struct TranslationBlock *jmp_next[2];
167     struct TranslationBlock *jmp_first;
168     uint32_t icount;
169 };
170 
171 #include "exec/spinlock.h"
172 
173 typedef struct TBContext TBContext;
174 
175 struct TBContext {
176 
177     TranslationBlock *tbs;
178     TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
179     int nb_tbs;
180     /* any access to the tbs or the page table must use this lock */
181     spinlock_t tb_lock;
182 
183     /* statistics */
184     int tb_flush_count;
185     int tb_phys_invalidate_count;
186 
187     int tb_invalidated_flag;
188 };
189 
190 static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
191 {
192     target_ulong tmp;
193     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
194     return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
195 }
196 
197 static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
198 {
199     target_ulong tmp;
200     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
201     return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
202 	    | (tmp & TB_JMP_ADDR_MASK));
203 }
204 
205 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
206 {
207     return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
208 }
209 
210 void tb_free(TranslationBlock *tb);
211 void tb_flush(CPUArchState *env);
212 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
213 
214 #if defined(USE_DIRECT_JUMP)
215 
216 #if defined(CONFIG_TCG_INTERPRETER)
217 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
218 {
219     /* patch the branch destination */
220     *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
221     /* no need to flush icache explicitly */
222 }
223 #elif defined(_ARCH_PPC)
224 void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
225 #define tb_set_jmp_target1 ppc_tb_set_jmp_target
226 #elif defined(__i386__) || defined(__x86_64__)
227 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
228 {
229     /* patch the branch destination */
230     *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
231     /* no need to flush icache explicitly */
232 }
233 #elif defined(__aarch64__)
234 void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
235 #define tb_set_jmp_target1 aarch64_tb_set_jmp_target
236 #elif defined(__arm__)
237 static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
238 {
239 #if !QEMU_GNUC_PREREQ(4, 1)
240     register unsigned long _beg __asm ("a1");
241     register unsigned long _end __asm ("a2");
242     register unsigned long _flg __asm ("a3");
243 #endif
244 
245     /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
246     *(uint32_t *)jmp_addr =
247         (*(uint32_t *)jmp_addr & ~0xffffff)
248         | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
249 
250 #if QEMU_GNUC_PREREQ(4, 1)
251     __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
252 #else
253     /* flush icache */
254     _beg = jmp_addr;
255     _end = jmp_addr + 4;
256     _flg = 0;
257     __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
258 #endif
259 }
260 #elif defined(__sparc__)
261 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
262 #else
263 #error tb_set_jmp_target1 is missing
264 #endif
265 
266 static inline void tb_set_jmp_target(TranslationBlock *tb,
267                                      int n, uintptr_t addr)
268 {
269     uint16_t offset = tb->tb_jmp_offset[n];
270     tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
271 }
272 
273 #else
274 
275 /* set the jump target */
276 static inline void tb_set_jmp_target(TranslationBlock *tb,
277                                      int n, uintptr_t addr)
278 {
279     tb->tb_next[n] = addr;
280 }
281 
282 #endif
283 
284 static inline void tb_add_jump(TranslationBlock *tb, int n,
285                                TranslationBlock *tb_next)
286 {
287     /* NOTE: this test is only needed for thread safety */
288     if (!tb->jmp_next[n]) {
289         /* patch the native jump address */
290         tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
291 
292         /* add in TB jmp circular list */
293         tb->jmp_next[n] = tb_next->jmp_first;
294         tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
295     }
296 }
297 
298 /* The return address may point to the start of the next instruction.
299    Subtracting one gets us the call instruction itself.  */
300 #if defined(CONFIG_TCG_INTERPRETER)
301 extern uintptr_t tci_tb_ptr;
302 # define GETPC() tci_tb_ptr
303 #elif defined(__s390__) && !defined(__s390x__)
304 # define GETPC() \
305     (((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
306 #elif defined(__arm__)
307 /* Thumb return addresses have the low bit set, so we need to subtract two.
308    This is still safe in ARM mode because instructions are 4 bytes.  */
309 # define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
310 #else
311 # define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
312 #endif
313 
314 #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
315 /* qemu_ld/st optimization split code generation to fast and slow path, thus,
316    it needs special handling for an MMU helper which is called from the slow
317    path, to get the fast path's pc without any additional argument.
318    It uses a tricky solution which embeds the fast path pc into the slow path.
319 
320    Code flow in slow path:
321    (1) pre-process
322    (2) call MMU helper
323    (3) jump to (5)
324    (4) fast path information (implementation specific)
325    (5) post-process (e.g. stack adjust)
326    (6) jump to corresponding code of the next of fast path
327  */
328 # if defined(__i386__) || defined(__x86_64__)
329 #  define GETPC_EXT()  GETPC()
330 # elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
331 #  define GETRA() ((uintptr_t)__builtin_return_address(0))
332 #  define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
333 # elif defined(__arm__)
334 /* We define two insns between the return address and the branch back to
335    straight-line.  Find and decode that branch insn.  */
336 #  define GETRA()       ((uintptr_t)__builtin_return_address(0))
337 #  define GETPC_LDST()  tcg_getpc_ldst(GETRA())
338 static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
339 {
340     int32_t b;
341     ra += 8;                    /* skip the two insns */
342     b = *(int32_t *)ra;         /* load the branch insn */
343     b = (b << 8) >> (8 - 2);    /* extract the displacement */
344     ra += 8;                    /* branches are relative to pc+8 */
345     ra += b;                    /* apply the displacement */
346     ra -= 4;                    /* return a pointer into the current opcode,
347                                    not the start of the next opcode  */
348     return ra;
349 }
350 # elif defined(__aarch64__)
351 #  define GETRA()       ((uintptr_t)__builtin_return_address(0))
352 #  define GETPC_LDST()  tcg_getpc_ldst(GETRA())
353 static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
354 {
355     int32_t b;
356     ra += 4;                    /* skip one instruction */
357     b = *(int32_t *)ra;         /* load the branch insn */
358     b = (b << 6) >> (6 - 2);    /* extract the displacement */
359     ra += b;                    /* apply the displacement  */
360     ra -= 4;                    /* return a pointer into the current opcode,
361                                    not the start of the next opcode  */
362     return ra;
363 }
364 # else
365 #  error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
366 # endif
367 bool is_tcg_gen_code(uintptr_t pc_ptr);
368 # ifndef GETPC_EXT
369 #  define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
370 # endif
371 #else
372 # define GETPC_EXT() GETPC()
373 #endif
374 
375 #if !defined(CONFIG_USER_ONLY)
376 
377 struct MemoryRegion *iotlb_to_region(hwaddr index);
378 bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
379                  uint64_t *pvalue, unsigned size);
380 bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
381                   uint64_t value, unsigned size);
382 
383 void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
384               uintptr_t retaddr);
385 
386 #include "exec/softmmu_defs.h"
387 
388 #define ACCESS_TYPE (NB_MMU_MODES + 1)
389 #define MEMSUFFIX _code
390 
391 #define DATA_SIZE 1
392 #include "exec/softmmu_header.h"
393 
394 #define DATA_SIZE 2
395 #include "exec/softmmu_header.h"
396 
397 #define DATA_SIZE 4
398 #include "exec/softmmu_header.h"
399 
400 #define DATA_SIZE 8
401 #include "exec/softmmu_header.h"
402 
403 #undef ACCESS_TYPE
404 #undef MEMSUFFIX
405 
406 #endif
407 
408 #if defined(CONFIG_USER_ONLY)
409 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
410 {
411     return addr;
412 }
413 #else
414 /* cputlb.c */
415 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
416 #endif
417 
418 typedef void (CPUDebugExcpHandler)(CPUArchState *env);
419 
420 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
421 
422 /* vl.c */
423 extern int singlestep;
424 
425 /* cpu-exec.c */
426 extern volatile sig_atomic_t exit_request;
427 
428 /* Deterministic execution requires that IO only be performed on the last
429    instruction of a TB so that interrupts take effect immediately.  */
430 static inline int can_do_io(CPUArchState *env)
431 {
432     CPUState *cpu = ENV_GET_CPU(env);
433 
434     if (!use_icount) {
435         return 1;
436     }
437     /* If not executing code then assume we are ok.  */
438     if (cpu->current_tb == NULL) {
439         return 1;
440     }
441     return env->can_do_io != 0;
442 }
443 
444 #endif
445