1*4c268d6dSPhilippe Mathieu-Daudé /*
2*4c268d6dSPhilippe Mathieu-Daudé * Internal execution defines for qemu (target specific)
3*4c268d6dSPhilippe Mathieu-Daudé *
4*4c268d6dSPhilippe Mathieu-Daudé * Copyright (c) 2003 Fabrice Bellard
5*4c268d6dSPhilippe Mathieu-Daudé *
6*4c268d6dSPhilippe Mathieu-Daudé * SPDX-License-Identifier: LGPL-2.1-or-later
7*4c268d6dSPhilippe Mathieu-Daudé */
8*4c268d6dSPhilippe Mathieu-Daudé
9*4c268d6dSPhilippe Mathieu-Daudé #ifndef ACCEL_TCG_INTERNAL_TARGET_H
10*4c268d6dSPhilippe Mathieu-Daudé #define ACCEL_TCG_INTERNAL_TARGET_H
11*4c268d6dSPhilippe Mathieu-Daudé
12*4c268d6dSPhilippe Mathieu-Daudé #include "exec/exec-all.h"
13*4c268d6dSPhilippe Mathieu-Daudé #include "exec/translate-all.h"
14*4c268d6dSPhilippe Mathieu-Daudé
15*4c268d6dSPhilippe Mathieu-Daudé /*
16*4c268d6dSPhilippe Mathieu-Daudé * Access to the various translations structures need to be serialised
17*4c268d6dSPhilippe Mathieu-Daudé * via locks for consistency. In user-mode emulation access to the
18*4c268d6dSPhilippe Mathieu-Daudé * memory related structures are protected with mmap_lock.
19*4c268d6dSPhilippe Mathieu-Daudé * In !user-mode we use per-page locks.
20*4c268d6dSPhilippe Mathieu-Daudé */
21*4c268d6dSPhilippe Mathieu-Daudé #ifdef CONFIG_USER_ONLY
22*4c268d6dSPhilippe Mathieu-Daudé #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
23*4c268d6dSPhilippe Mathieu-Daudé #else
24*4c268d6dSPhilippe Mathieu-Daudé #define assert_memory_lock()
25*4c268d6dSPhilippe Mathieu-Daudé #endif
26*4c268d6dSPhilippe Mathieu-Daudé
27*4c268d6dSPhilippe Mathieu-Daudé #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
28*4c268d6dSPhilippe Mathieu-Daudé void assert_no_pages_locked(void);
29*4c268d6dSPhilippe Mathieu-Daudé #else
assert_no_pages_locked(void)30*4c268d6dSPhilippe Mathieu-Daudé static inline void assert_no_pages_locked(void) { }
31*4c268d6dSPhilippe Mathieu-Daudé #endif
32*4c268d6dSPhilippe Mathieu-Daudé
33*4c268d6dSPhilippe Mathieu-Daudé #ifdef CONFIG_USER_ONLY
page_table_config_init(void)34*4c268d6dSPhilippe Mathieu-Daudé static inline void page_table_config_init(void) { }
35*4c268d6dSPhilippe Mathieu-Daudé #else
36*4c268d6dSPhilippe Mathieu-Daudé void page_table_config_init(void);
37*4c268d6dSPhilippe Mathieu-Daudé #endif
38*4c268d6dSPhilippe Mathieu-Daudé
39*4c268d6dSPhilippe Mathieu-Daudé #ifdef CONFIG_USER_ONLY
40*4c268d6dSPhilippe Mathieu-Daudé /*
41*4c268d6dSPhilippe Mathieu-Daudé * For user-only, page_protect sets the page read-only.
42*4c268d6dSPhilippe Mathieu-Daudé * Since most execution is already on read-only pages, and we'd need to
43*4c268d6dSPhilippe Mathieu-Daudé * account for other TBs on the same page, defer undoing any page protection
44*4c268d6dSPhilippe Mathieu-Daudé * until we receive the write fault.
45*4c268d6dSPhilippe Mathieu-Daudé */
tb_lock_page0(tb_page_addr_t p0)46*4c268d6dSPhilippe Mathieu-Daudé static inline void tb_lock_page0(tb_page_addr_t p0)
47*4c268d6dSPhilippe Mathieu-Daudé {
48*4c268d6dSPhilippe Mathieu-Daudé page_protect(p0);
49*4c268d6dSPhilippe Mathieu-Daudé }
50*4c268d6dSPhilippe Mathieu-Daudé
tb_lock_page1(tb_page_addr_t p0,tb_page_addr_t p1)51*4c268d6dSPhilippe Mathieu-Daudé static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
52*4c268d6dSPhilippe Mathieu-Daudé {
53*4c268d6dSPhilippe Mathieu-Daudé page_protect(p1);
54*4c268d6dSPhilippe Mathieu-Daudé }
55*4c268d6dSPhilippe Mathieu-Daudé
tb_unlock_page1(tb_page_addr_t p0,tb_page_addr_t p1)56*4c268d6dSPhilippe Mathieu-Daudé static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
tb_unlock_pages(TranslationBlock * tb)57*4c268d6dSPhilippe Mathieu-Daudé static inline void tb_unlock_pages(TranslationBlock *tb) { }
58*4c268d6dSPhilippe Mathieu-Daudé #else
59*4c268d6dSPhilippe Mathieu-Daudé void tb_lock_page0(tb_page_addr_t);
60*4c268d6dSPhilippe Mathieu-Daudé void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
61*4c268d6dSPhilippe Mathieu-Daudé void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
62*4c268d6dSPhilippe Mathieu-Daudé void tb_unlock_pages(TranslationBlock *);
63*4c268d6dSPhilippe Mathieu-Daudé #endif
64*4c268d6dSPhilippe Mathieu-Daudé
65*4c268d6dSPhilippe Mathieu-Daudé #ifdef CONFIG_SOFTMMU
66*4c268d6dSPhilippe Mathieu-Daudé void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
67*4c268d6dSPhilippe Mathieu-Daudé unsigned size,
68*4c268d6dSPhilippe Mathieu-Daudé uintptr_t retaddr);
69*4c268d6dSPhilippe Mathieu-Daudé G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
70*4c268d6dSPhilippe Mathieu-Daudé #endif /* CONFIG_SOFTMMU */
71*4c268d6dSPhilippe Mathieu-Daudé
72*4c268d6dSPhilippe Mathieu-Daudé bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
73*4c268d6dSPhilippe Mathieu-Daudé
74*4c268d6dSPhilippe Mathieu-Daudé /* Return the current PC from CPU, which may be cached in TB. */
log_pc(CPUState * cpu,const TranslationBlock * tb)75*4c268d6dSPhilippe Mathieu-Daudé static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
76*4c268d6dSPhilippe Mathieu-Daudé {
77*4c268d6dSPhilippe Mathieu-Daudé if (tb_cflags(tb) & CF_PCREL) {
78*4c268d6dSPhilippe Mathieu-Daudé return cpu->cc->get_pc(cpu);
79*4c268d6dSPhilippe Mathieu-Daudé } else {
80*4c268d6dSPhilippe Mathieu-Daudé return tb->pc;
81*4c268d6dSPhilippe Mathieu-Daudé }
82*4c268d6dSPhilippe Mathieu-Daudé }
83*4c268d6dSPhilippe Mathieu-Daudé
84*4c268d6dSPhilippe Mathieu-Daudé /**
85*4c268d6dSPhilippe Mathieu-Daudé * tcg_req_mo:
86*4c268d6dSPhilippe Mathieu-Daudé * @type: TCGBar
87*4c268d6dSPhilippe Mathieu-Daudé *
88*4c268d6dSPhilippe Mathieu-Daudé * Filter @type to the barrier that is required for the guest
89*4c268d6dSPhilippe Mathieu-Daudé * memory ordering vs the host memory ordering. A non-zero
90*4c268d6dSPhilippe Mathieu-Daudé * result indicates that some barrier is required.
91*4c268d6dSPhilippe Mathieu-Daudé *
92*4c268d6dSPhilippe Mathieu-Daudé * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
93*4c268d6dSPhilippe Mathieu-Daudé * guest requires strict ordering.
94*4c268d6dSPhilippe Mathieu-Daudé *
95*4c268d6dSPhilippe Mathieu-Daudé * This is a macro so that it's constant even without optimization.
96*4c268d6dSPhilippe Mathieu-Daudé */
97*4c268d6dSPhilippe Mathieu-Daudé #ifdef TCG_GUEST_DEFAULT_MO
98*4c268d6dSPhilippe Mathieu-Daudé # define tcg_req_mo(type) \
99*4c268d6dSPhilippe Mathieu-Daudé ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
100*4c268d6dSPhilippe Mathieu-Daudé #else
101*4c268d6dSPhilippe Mathieu-Daudé # define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
102*4c268d6dSPhilippe Mathieu-Daudé #endif
103*4c268d6dSPhilippe Mathieu-Daudé
104*4c268d6dSPhilippe Mathieu-Daudé /**
105*4c268d6dSPhilippe Mathieu-Daudé * cpu_req_mo:
106*4c268d6dSPhilippe Mathieu-Daudé * @type: TCGBar
107*4c268d6dSPhilippe Mathieu-Daudé *
108*4c268d6dSPhilippe Mathieu-Daudé * If tcg_req_mo indicates a barrier for @type is required
109*4c268d6dSPhilippe Mathieu-Daudé * for the guest memory model, issue a host memory barrier.
110*4c268d6dSPhilippe Mathieu-Daudé */
111*4c268d6dSPhilippe Mathieu-Daudé #define cpu_req_mo(type) \
112*4c268d6dSPhilippe Mathieu-Daudé do { \
113*4c268d6dSPhilippe Mathieu-Daudé if (tcg_req_mo(type)) { \
114*4c268d6dSPhilippe Mathieu-Daudé smp_mb(); \
115*4c268d6dSPhilippe Mathieu-Daudé } \
116*4c268d6dSPhilippe Mathieu-Daudé } while (0)
117*4c268d6dSPhilippe Mathieu-Daudé
118*4c268d6dSPhilippe Mathieu-Daudé #endif /* ACCEL_TCG_INTERNAL_H */
119