xref: /openbmc/qemu/accel/tcg/internal-target.h (revision 259ebed4)
1 /*
2  * Internal execution defines for qemu (target specific)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * SPDX-License-Identifier: LGPL-2.1-or-later
7  */
8 
9 #ifndef ACCEL_TCG_INTERNAL_TARGET_H
10 #define ACCEL_TCG_INTERNAL_TARGET_H
11 
12 #include "exec/exec-all.h"
13 #include "exec/translate-all.h"
14 
15 /*
16  * Access to the various translations structures need to be serialised
17  * via locks for consistency.  In user-mode emulation access to the
18  * memory related structures are protected with mmap_lock.
19  * In !user-mode we use per-page locks.
20  */
21 #ifdef CONFIG_USER_ONLY
22 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
23 #else
24 #define assert_memory_lock()
25 #endif
26 
27 #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
28 void assert_no_pages_locked(void);
29 #else
30 static inline void assert_no_pages_locked(void) { }
31 #endif
32 
33 #ifdef CONFIG_USER_ONLY
34 static inline void page_table_config_init(void) { }
35 #else
36 void page_table_config_init(void);
37 #endif
38 
39 #ifdef CONFIG_USER_ONLY
40 /*
41  * For user-only, page_protect sets the page read-only.
42  * Since most execution is already on read-only pages, and we'd need to
43  * account for other TBs on the same page, defer undoing any page protection
44  * until we receive the write fault.
45  */
46 static inline void tb_lock_page0(tb_page_addr_t p0)
47 {
48     page_protect(p0);
49 }
50 
51 static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
52 {
53     page_protect(p1);
54 }
55 
56 static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
57 static inline void tb_unlock_pages(TranslationBlock *tb) { }
58 #else
59 void tb_lock_page0(tb_page_addr_t);
60 void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
61 void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
62 void tb_unlock_pages(TranslationBlock *);
63 #endif
64 
65 #ifdef CONFIG_SOFTMMU
66 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
67                                    unsigned size,
68                                    uintptr_t retaddr);
69 G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
70 #endif /* CONFIG_SOFTMMU */
71 
72 TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
73                               uint64_t cs_base, uint32_t flags,
74                               int cflags);
75 void page_init(void);
76 void tb_htable_init(void);
77 void tb_reset_jump(TranslationBlock *tb, int n);
78 TranslationBlock *tb_link_page(TranslationBlock *tb);
79 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
80 void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
81                                uintptr_t host_pc);
82 
83 bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
84 void tcg_exec_unrealizefn(CPUState *cpu);
85 
86 /* Return the current PC from CPU, which may be cached in TB. */
87 static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
88 {
89     if (tb_cflags(tb) & CF_PCREL) {
90         return cpu->cc->get_pc(cpu);
91     } else {
92         return tb->pc;
93     }
94 }
95 
96 extern bool one_insn_per_tb;
97 
98 /**
99  * tcg_req_mo:
100  * @type: TCGBar
101  *
102  * Filter @type to the barrier that is required for the guest
103  * memory ordering vs the host memory ordering.  A non-zero
104  * result indicates that some barrier is required.
105  *
106  * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
107  * guest requires strict ordering.
108  *
109  * This is a macro so that it's constant even without optimization.
110  */
111 #ifdef TCG_GUEST_DEFAULT_MO
112 # define tcg_req_mo(type) \
113     ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
114 #else
115 # define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
116 #endif
117 
118 /**
119  * cpu_req_mo:
120  * @type: TCGBar
121  *
122  * If tcg_req_mo indicates a barrier for @type is required
123  * for the guest memory model, issue a host memory barrier.
124  */
125 #define cpu_req_mo(type)          \
126     do {                          \
127         if (tcg_req_mo(type)) {   \
128             smp_mb();             \
129         }                         \
130     } while (0)
131 
132 #endif /* ACCEL_TCG_INTERNAL_H */
133