xref: /openbmc/qemu/include/exec/cpu-all.h (revision 407bc4bf9027f7ac4333e47cd900d773b99a23e3)
1 /*
2  * defines common to all virtual CPUs
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef CPU_ALL_H
20 #define CPU_ALL_H
21 
22 #include "exec/page-protection.h"
23 #include "exec/cpu-common.h"
24 #include "exec/memory.h"
25 #include "exec/tswap.h"
26 #include "hw/core/cpu.h"
27 
28 /* some important defines:
29  *
30  * HOST_BIG_ENDIAN : whether the host cpu is big endian and
31  * otherwise little endian.
32  *
33  * TARGET_BIG_ENDIAN : same for the target cpu
34  */
35 
36 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
37 #define BSWAP_NEEDED
38 #endif
39 
40 /* Target-endianness CPU memory access functions. These fit into the
41  * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
42  */
43 #if TARGET_BIG_ENDIAN
44 #define lduw_p(p) lduw_be_p(p)
45 #define ldsw_p(p) ldsw_be_p(p)
46 #define ldl_p(p) ldl_be_p(p)
47 #define ldq_p(p) ldq_be_p(p)
48 #define stw_p(p, v) stw_be_p(p, v)
49 #define stl_p(p, v) stl_be_p(p, v)
50 #define stq_p(p, v) stq_be_p(p, v)
51 #define ldn_p(p, sz) ldn_be_p(p, sz)
52 #define stn_p(p, sz, v) stn_be_p(p, sz, v)
53 #else
54 #define lduw_p(p) lduw_le_p(p)
55 #define ldsw_p(p) ldsw_le_p(p)
56 #define ldl_p(p) ldl_le_p(p)
57 #define ldq_p(p) ldq_le_p(p)
58 #define stw_p(p, v) stw_le_p(p, v)
59 #define stl_p(p, v) stl_le_p(p, v)
60 #define stq_p(p, v) stq_le_p(p, v)
61 #define ldn_p(p, sz) ldn_le_p(p, sz)
62 #define stn_p(p, sz, v) stn_le_p(p, sz, v)
63 #endif
64 
65 /* MMU memory access macros */
66 
67 #if !defined(CONFIG_USER_ONLY)
68 
69 #include "exec/hwaddr.h"
70 
71 #define SUFFIX
72 #define ARG1         as
73 #define ARG1_DECL    AddressSpace *as
74 #define TARGET_ENDIANNESS
75 #include "exec/memory_ldst.h.inc"
76 
77 #define SUFFIX       _cached_slow
78 #define ARG1         cache
79 #define ARG1_DECL    MemoryRegionCache *cache
80 #define TARGET_ENDIANNESS
81 #include "exec/memory_ldst.h.inc"
82 
83 static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
84 {
85     address_space_stl_notdirty(as, addr, val,
86                                MEMTXATTRS_UNSPECIFIED, NULL);
87 }
88 
89 #define SUFFIX
90 #define ARG1         as
91 #define ARG1_DECL    AddressSpace *as
92 #define TARGET_ENDIANNESS
93 #include "exec/memory_ldst_phys.h.inc"
94 
95 /* Inline fast path for direct RAM access.  */
96 #define ENDIANNESS
97 #include "exec/memory_ldst_cached.h.inc"
98 
99 #define SUFFIX       _cached
100 #define ARG1         cache
101 #define ARG1_DECL    MemoryRegionCache *cache
102 #define TARGET_ENDIANNESS
103 #include "exec/memory_ldst_phys.h.inc"
104 #endif
105 
106 /* page related stuff */
107 #include "exec/cpu-defs.h"
108 #ifdef TARGET_PAGE_BITS_VARY
109 # include "exec/page-vary.h"
110 extern const TargetPageBits target_page;
111 # ifdef CONFIG_DEBUG_TCG
112 #  define TARGET_PAGE_BITS   ({ assert(target_page.decided); \
113                                 target_page.bits; })
114 #  define TARGET_PAGE_MASK   ({ assert(target_page.decided); \
115                                 (target_long)target_page.mask; })
116 # else
117 #  define TARGET_PAGE_BITS   target_page.bits
118 #  define TARGET_PAGE_MASK   ((target_long)target_page.mask)
119 # endif
120 # define TARGET_PAGE_SIZE    (-(int)TARGET_PAGE_MASK)
121 #else
122 # define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
123 # define TARGET_PAGE_SIZE    (1 << TARGET_PAGE_BITS)
124 # define TARGET_PAGE_MASK    ((target_long)-1 << TARGET_PAGE_BITS)
125 #endif
126 
127 #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
128 
129 CPUArchState *cpu_copy(CPUArchState *env);
130 
131 /* Flags for use in ENV->INTERRUPT_PENDING.
132 
133    The numbers assigned here are non-sequential in order to preserve
134    binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
135    previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
136    the vmstate dump.  */
137 
138 /* External hardware interrupt pending.  This is typically used for
139    interrupts from devices.  */
140 #define CPU_INTERRUPT_HARD        0x0002
141 
142 /* Exit the current TB.  This is typically used when some system-level device
143    makes some change to the memory mapping.  E.g. the a20 line change.  */
144 #define CPU_INTERRUPT_EXITTB      0x0004
145 
146 /* Halt the CPU.  */
147 #define CPU_INTERRUPT_HALT        0x0020
148 
149 /* Debug event pending.  */
150 #define CPU_INTERRUPT_DEBUG       0x0080
151 
152 /* Reset signal.  */
153 #define CPU_INTERRUPT_RESET       0x0400
154 
155 /* Several target-specific external hardware interrupts.  Each target/cpu.h
156    should define proper names based on these defines.  */
157 #define CPU_INTERRUPT_TGT_EXT_0   0x0008
158 #define CPU_INTERRUPT_TGT_EXT_1   0x0010
159 #define CPU_INTERRUPT_TGT_EXT_2   0x0040
160 #define CPU_INTERRUPT_TGT_EXT_3   0x0200
161 #define CPU_INTERRUPT_TGT_EXT_4   0x1000
162 
163 /* Several target-specific internal interrupts.  These differ from the
164    preceding target-specific interrupts in that they are intended to
165    originate from within the cpu itself, typically in response to some
166    instruction being executed.  These, therefore, are not masked while
167    single-stepping within the debugger.  */
168 #define CPU_INTERRUPT_TGT_INT_0   0x0100
169 #define CPU_INTERRUPT_TGT_INT_1   0x0800
170 #define CPU_INTERRUPT_TGT_INT_2   0x2000
171 
172 /* First unused bit: 0x4000.  */
173 
174 /* The set of all bits that should be masked when single-stepping.  */
175 #define CPU_INTERRUPT_SSTEP_MASK \
176     (CPU_INTERRUPT_HARD          \
177      | CPU_INTERRUPT_TGT_EXT_0   \
178      | CPU_INTERRUPT_TGT_EXT_1   \
179      | CPU_INTERRUPT_TGT_EXT_2   \
180      | CPU_INTERRUPT_TGT_EXT_3   \
181      | CPU_INTERRUPT_TGT_EXT_4)
182 
183 #include "cpu.h"
184 
185 #ifdef CONFIG_USER_ONLY
186 
187 static inline int cpu_mmu_index(CPUState *cs, bool ifetch);
188 
189 /*
190  * Allow some level of source compatibility with softmmu.  We do not
191  * support any of the more exotic features, so only invalid pages may
192  * be signaled by probe_access_flags().
193  */
194 #define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
195 #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 2))
196 #define TLB_WATCHPOINT      0
197 
198 static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
199 {
200     return MMU_USER_IDX;
201 }
202 #else
203 
204 /*
205  * Flags stored in the low bits of the TLB virtual address.
206  * These are defined so that fast path ram access is all zeros.
207  * The flags all must be between TARGET_PAGE_BITS and
208  * maximum address alignment bit.
209  *
210  * Use TARGET_PAGE_BITS_MIN so that these bits are constant
211  * when TARGET_PAGE_BITS_VARY is in effect.
212  *
213  * The count, if not the placement of these bits is known
214  * to tcg/tcg-op-ldst.c, check_max_alignment().
215  */
216 /* Zero if TLB entry is valid.  */
217 #define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
218 /* Set if TLB entry references a clean RAM page.  The iotlb entry will
219    contain the page physical address.  */
220 #define TLB_NOTDIRTY        (1 << (TARGET_PAGE_BITS_MIN - 2))
221 /* Set if TLB entry is an IO callback.  */
222 #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 3))
223 /* Set if TLB entry writes ignored.  */
224 #define TLB_DISCARD_WRITE   (1 << (TARGET_PAGE_BITS_MIN - 4))
225 /* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
226 #define TLB_FORCE_SLOW      (1 << (TARGET_PAGE_BITS_MIN - 5))
227 
228 /*
229  * Use this mask to check interception with an alignment mask
230  * in a TCG backend.
231  */
232 #define TLB_FLAGS_MASK \
233     (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
234     | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
235 
236 /*
237  * Flags stored in CPUTLBEntryFull.slow_flags[x].
238  * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
239  */
240 /* Set if TLB entry requires byte swap.  */
241 #define TLB_BSWAP            (1 << 0)
242 /* Set if TLB entry contains a watchpoint.  */
243 #define TLB_WATCHPOINT       (1 << 1)
244 /* Set if TLB entry requires aligned accesses.  */
245 #define TLB_CHECK_ALIGNED    (1 << 2)
246 
247 #define TLB_SLOW_FLAGS_MASK  (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
248 
249 /* The two sets of flags must not overlap. */
250 QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
251 
252 /**
253  * tlb_hit_page: return true if page aligned @addr is a hit against the
254  * TLB entry @tlb_addr
255  *
256  * @addr: virtual address to test (must be page aligned)
257  * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
258  */
259 static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
260 {
261     return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
262 }
263 
264 /**
265  * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
266  *
267  * @addr: virtual address to test (need not be page aligned)
268  * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
269  */
270 static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
271 {
272     return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
273 }
274 
275 #endif /* !CONFIG_USER_ONLY */
276 
277 /* Validate correct placement of CPUArchState. */
278 QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
279 QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
280 
281 #endif /* CPU_ALL_H */
282