xref: /openbmc/qemu/include/exec/cpu-all.h (revision 54cdddc6dc29e557de33138fc0d89e73510c8487)
1022c62cbSPaolo Bonzini /*
2022c62cbSPaolo Bonzini  * defines common to all virtual CPUs
3022c62cbSPaolo Bonzini  *
4022c62cbSPaolo Bonzini  *  Copyright (c) 2003 Fabrice Bellard
5022c62cbSPaolo Bonzini  *
6022c62cbSPaolo Bonzini  * This library is free software; you can redistribute it and/or
7022c62cbSPaolo Bonzini  * modify it under the terms of the GNU Lesser General Public
8022c62cbSPaolo Bonzini  * License as published by the Free Software Foundation; either
9d6ea4236SChetan Pant  * version 2.1 of the License, or (at your option) any later version.
10022c62cbSPaolo Bonzini  *
11022c62cbSPaolo Bonzini  * This library is distributed in the hope that it will be useful,
12022c62cbSPaolo Bonzini  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13022c62cbSPaolo Bonzini  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14022c62cbSPaolo Bonzini  * Lesser General Public License for more details.
15022c62cbSPaolo Bonzini  *
16022c62cbSPaolo Bonzini  * You should have received a copy of the GNU Lesser General Public
17022c62cbSPaolo Bonzini  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18022c62cbSPaolo Bonzini  */
19022c62cbSPaolo Bonzini #ifndef CPU_ALL_H
20022c62cbSPaolo Bonzini #define CPU_ALL_H
21022c62cbSPaolo Bonzini 
2274781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
23022c62cbSPaolo Bonzini #include "exec/cpu-common.h"
241ab4c8ceSJuan Quintela #include "exec/memory.h"
2524be3369SThomas Huth #include "exec/tswap.h"
262e5b09fdSMarkus Armbruster #include "hw/core/cpu.h"
27022c62cbSPaolo Bonzini 
28022c62cbSPaolo Bonzini /* some important defines:
29022c62cbSPaolo Bonzini  *
30e03b5686SMarc-André Lureau  * HOST_BIG_ENDIAN : whether the host cpu is big endian and
31022c62cbSPaolo Bonzini  * otherwise little endian.
32022c62cbSPaolo Bonzini  *
33ee3eb3a7SMarc-André Lureau  * TARGET_BIG_ENDIAN : same for the target cpu
34022c62cbSPaolo Bonzini  */
35022c62cbSPaolo Bonzini 
36ee3eb3a7SMarc-André Lureau #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
37022c62cbSPaolo Bonzini #define BSWAP_NEEDED
38022c62cbSPaolo Bonzini #endif
39022c62cbSPaolo Bonzini 
40db5fd8d7SPeter Maydell /* Target-endianness CPU memory access functions. These fit into the
41db5fd8d7SPeter Maydell  * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
42022c62cbSPaolo Bonzini  */
43ee3eb3a7SMarc-André Lureau #if TARGET_BIG_ENDIAN
44022c62cbSPaolo Bonzini #define lduw_p(p) lduw_be_p(p)
45022c62cbSPaolo Bonzini #define ldsw_p(p) ldsw_be_p(p)
46022c62cbSPaolo Bonzini #define ldl_p(p) ldl_be_p(p)
47022c62cbSPaolo Bonzini #define ldq_p(p) ldq_be_p(p)
48022c62cbSPaolo Bonzini #define stw_p(p, v) stw_be_p(p, v)
49022c62cbSPaolo Bonzini #define stl_p(p, v) stl_be_p(p, v)
50022c62cbSPaolo Bonzini #define stq_p(p, v) stq_be_p(p, v)
51afa4f665SPeter Maydell #define ldn_p(p, sz) ldn_be_p(p, sz)
52afa4f665SPeter Maydell #define stn_p(p, sz, v) stn_be_p(p, sz, v)
53022c62cbSPaolo Bonzini #else
54022c62cbSPaolo Bonzini #define lduw_p(p) lduw_le_p(p)
55022c62cbSPaolo Bonzini #define ldsw_p(p) ldsw_le_p(p)
56022c62cbSPaolo Bonzini #define ldl_p(p) ldl_le_p(p)
57022c62cbSPaolo Bonzini #define ldq_p(p) ldq_le_p(p)
58022c62cbSPaolo Bonzini #define stw_p(p, v) stw_le_p(p, v)
59022c62cbSPaolo Bonzini #define stl_p(p, v) stl_le_p(p, v)
60022c62cbSPaolo Bonzini #define stq_p(p, v) stq_le_p(p, v)
61afa4f665SPeter Maydell #define ldn_p(p, sz) ldn_le_p(p, sz)
62afa4f665SPeter Maydell #define stn_p(p, sz, v) stn_le_p(p, sz, v)
63022c62cbSPaolo Bonzini #endif
64022c62cbSPaolo Bonzini 
65022c62cbSPaolo Bonzini /* MMU memory access macros */
66022c62cbSPaolo Bonzini 
67022c62cbSPaolo Bonzini #if defined(CONFIG_USER_ONLY)
6822879b66SPhilippe Mathieu-Daudé #include "user/abitypes.h"
6995059f9cSRichard Henderson 
7095059f9cSRichard Henderson /*
7195059f9cSRichard Henderson  * If non-zero, the guest virtual address space is a contiguous subset
7295059f9cSRichard Henderson  * of the host virtual address space, i.e. '-R reserved_va' is in effect
7395059f9cSRichard Henderson  * either from the command-line or by default.  The value is the last
7495059f9cSRichard Henderson  * byte of the guest address space e.g. UINT32_MAX.
7595059f9cSRichard Henderson  *
7695059f9cSRichard Henderson  * If zero, the host and guest virtual address spaces are intermingled.
7795059f9cSRichard Henderson  */
78022c62cbSPaolo Bonzini extern unsigned long reserved_va;
79022c62cbSPaolo Bonzini 
807d8cbbabSRichard Henderson /*
817d8cbbabSRichard Henderson  * Limit the guest addresses as best we can.
827d8cbbabSRichard Henderson  *
837d8cbbabSRichard Henderson  * When not using -R reserved_va, we cannot really limit the guest
847d8cbbabSRichard Henderson  * to less address space than the host.  For 32-bit guests, this
857d8cbbabSRichard Henderson  * acts as a sanity check that we're not giving the guest an address
867d8cbbabSRichard Henderson  * that it cannot even represent.  For 64-bit guests... the address
877d8cbbabSRichard Henderson  * might not be what the real kernel would give, but it is at least
887d8cbbabSRichard Henderson  * representable in the guest.
897d8cbbabSRichard Henderson  *
907d8cbbabSRichard Henderson  * TODO: Improve address allocation to avoid this problem, and to
917d8cbbabSRichard Henderson  * avoid setting bits at the top of guest addresses that might need
927d8cbbabSRichard Henderson  * to be used for tags.
937d8cbbabSRichard Henderson  */
94f9919116SEric Blake #define GUEST_ADDR_MAX_                                                 \
95f9919116SEric Blake     ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ?  \
96f9919116SEric Blake      UINT32_MAX : ~0ul)
9795059f9cSRichard Henderson #define GUEST_ADDR_MAX    (reserved_va ? : GUEST_ADDR_MAX_)
987d8cbbabSRichard Henderson 
99a7d6039cSPaolo Bonzini #else
100a7d6039cSPaolo Bonzini 
101a7d6039cSPaolo Bonzini #include "exec/hwaddr.h"
102a7d6039cSPaolo Bonzini 
1034269c82bSPaolo Bonzini #define SUFFIX
1044269c82bSPaolo Bonzini #define ARG1         as
1054269c82bSPaolo Bonzini #define ARG1_DECL    AddressSpace *as
1064269c82bSPaolo Bonzini #define TARGET_ENDIANNESS
1070979ed01SPaolo Bonzini #include "exec/memory_ldst.h.inc"
1081f4e496eSPaolo Bonzini 
10948564041SPaolo Bonzini #define SUFFIX       _cached_slow
1104269c82bSPaolo Bonzini #define ARG1         cache
1114269c82bSPaolo Bonzini #define ARG1_DECL    MemoryRegionCache *cache
1124269c82bSPaolo Bonzini #define TARGET_ENDIANNESS
1130979ed01SPaolo Bonzini #include "exec/memory_ldst.h.inc"
1141f4e496eSPaolo Bonzini 
stl_phys_notdirty(AddressSpace * as,hwaddr addr,uint32_t val)1154269c82bSPaolo Bonzini static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
1164269c82bSPaolo Bonzini {
1174269c82bSPaolo Bonzini     address_space_stl_notdirty(as, addr, val,
1184269c82bSPaolo Bonzini                                MEMTXATTRS_UNSPECIFIED, NULL);
1194269c82bSPaolo Bonzini }
1204269c82bSPaolo Bonzini 
1214269c82bSPaolo Bonzini #define SUFFIX
1224269c82bSPaolo Bonzini #define ARG1         as
1234269c82bSPaolo Bonzini #define ARG1_DECL    AddressSpace *as
1244269c82bSPaolo Bonzini #define TARGET_ENDIANNESS
1250979ed01SPaolo Bonzini #include "exec/memory_ldst_phys.h.inc"
1264269c82bSPaolo Bonzini 
12748564041SPaolo Bonzini /* Inline fast path for direct RAM access.  */
12848564041SPaolo Bonzini #define ENDIANNESS
1290979ed01SPaolo Bonzini #include "exec/memory_ldst_cached.h.inc"
13048564041SPaolo Bonzini 
1314269c82bSPaolo Bonzini #define SUFFIX       _cached
1324269c82bSPaolo Bonzini #define ARG1         cache
1334269c82bSPaolo Bonzini #define ARG1_DECL    MemoryRegionCache *cache
1344269c82bSPaolo Bonzini #define TARGET_ENDIANNESS
1350979ed01SPaolo Bonzini #include "exec/memory_ldst_phys.h.inc"
136022c62cbSPaolo Bonzini #endif
137022c62cbSPaolo Bonzini 
138022c62cbSPaolo Bonzini /* page related stuff */
139022c62cbSPaolo Bonzini 
14020bccb82SPeter Maydell #ifdef TARGET_PAGE_BITS_VARY
14127eb9d65SRichard Henderson # include "exec/page-vary.h"
142bbc17cafSRichard Henderson extern const TargetPageBits target_page;
143639044b5SRichard Henderson # ifdef CONFIG_DEBUG_TCG
1441072f927SPhilippe Mathieu-Daudé #  define TARGET_PAGE_BITS   ({ assert(target_page.decided); \
1451072f927SPhilippe Mathieu-Daudé                                 target_page.bits; })
14627eb9d65SRichard Henderson #  define TARGET_PAGE_MASK   ({ assert(target_page.decided); \
14727eb9d65SRichard Henderson                                 (target_long)target_page.mask; })
14820bccb82SPeter Maydell # else
149639044b5SRichard Henderson #  define TARGET_PAGE_BITS   target_page.bits
15027eb9d65SRichard Henderson #  define TARGET_PAGE_MASK   ((target_long)target_page.mask)
151639044b5SRichard Henderson # endif
152bb8e3ea6SRichard Henderson # define TARGET_PAGE_SIZE    (-(int)TARGET_PAGE_MASK)
153639044b5SRichard Henderson #else
15420bccb82SPeter Maydell # define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
155022c62cbSPaolo Bonzini # define TARGET_PAGE_SIZE    (1 << TARGET_PAGE_BITS)
156f048b8a7SRichard Henderson # define TARGET_PAGE_MASK    ((target_long)-1 << TARGET_PAGE_BITS)
157bb8e3ea6SRichard Henderson #endif
158bb8e3ea6SRichard Henderson 
15950276a79SWei Yang #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
160022c62cbSPaolo Bonzini 
161022c62cbSPaolo Bonzini #if defined(CONFIG_USER_ONLY)
162022c62cbSPaolo Bonzini void page_dump(FILE *f);
163022c62cbSPaolo Bonzini 
1641a1c4db9SMikhail Ilyin typedef int (*walk_memory_regions_fn)(void *, target_ulong,
1651a1c4db9SMikhail Ilyin                                       target_ulong, unsigned long);
166022c62cbSPaolo Bonzini int walk_memory_regions(void *, walk_memory_regions_fn);
167022c62cbSPaolo Bonzini 
168022c62cbSPaolo Bonzini int page_get_flags(target_ulong address);
169*25f4e717SPhilippe Mathieu-Daudé 
170*25f4e717SPhilippe Mathieu-Daudé /**
171*25f4e717SPhilippe Mathieu-Daudé  * page_set_flags:
172*25f4e717SPhilippe Mathieu-Daudé  * @start: first byte of range
173*25f4e717SPhilippe Mathieu-Daudé  * @last: last byte of range
174*25f4e717SPhilippe Mathieu-Daudé  * @flags: flags to set
175*25f4e717SPhilippe Mathieu-Daudé  * Context: holding mmap lock
176*25f4e717SPhilippe Mathieu-Daudé  *
177*25f4e717SPhilippe Mathieu-Daudé  * Modify the flags of a page and invalidate the code if necessary.
178*25f4e717SPhilippe Mathieu-Daudé  * The flag PAGE_WRITE_ORG is positioned automatically depending
179*25f4e717SPhilippe Mathieu-Daudé  * on PAGE_WRITE.  The mmap_lock should already be held.
180*25f4e717SPhilippe Mathieu-Daudé  */
18149840a4aSRichard Henderson void page_set_flags(target_ulong start, target_ulong last, int flags);
182*25f4e717SPhilippe Mathieu-Daudé 
18310310cbdSRichard Henderson void page_reset_target_data(target_ulong start, target_ulong last);
184bef6f008SRichard Henderson 
185bef6f008SRichard Henderson /**
186bef6f008SRichard Henderson  * page_check_range
187bef6f008SRichard Henderson  * @start: first byte of range
188bef6f008SRichard Henderson  * @len: length of range
189bef6f008SRichard Henderson  * @flags: flags required for each page
190bef6f008SRichard Henderson  *
191bef6f008SRichard Henderson  * Return true if every page in [@start, @start+@len) has @flags set.
192bef6f008SRichard Henderson  * Return false if any page is unmapped.  Thus testing flags == 0 is
193bef6f008SRichard Henderson  * equivalent to testing for flags == PAGE_VALID.
194bef6f008SRichard Henderson  */
195bef6f008SRichard Henderson bool page_check_range(target_ulong start, target_ulong last, int flags);
196d9c58585SRichard Henderson 
197d9c58585SRichard Henderson /**
198c2281ddcSRichard Henderson  * page_check_range_empty:
199c2281ddcSRichard Henderson  * @start: first byte of range
200c2281ddcSRichard Henderson  * @last: last byte of range
201c2281ddcSRichard Henderson  * Context: holding mmap lock
202c2281ddcSRichard Henderson  *
203c2281ddcSRichard Henderson  * Return true if the entire range [@start, @last] is unmapped.
204c2281ddcSRichard Henderson  * The memory lock must be held so that the caller will can ensure
205c2281ddcSRichard Henderson  * the result stays true until a new mapping can be installed.
206c2281ddcSRichard Henderson  */
207c2281ddcSRichard Henderson bool page_check_range_empty(target_ulong start, target_ulong last);
208c2281ddcSRichard Henderson 
209c2281ddcSRichard Henderson /**
210f2bb7cf2SRichard Henderson  * page_find_range_empty
211f2bb7cf2SRichard Henderson  * @min: first byte of search range
212f2bb7cf2SRichard Henderson  * @max: last byte of search range
213f2bb7cf2SRichard Henderson  * @len: size of the hole required
214f2bb7cf2SRichard Henderson  * @align: alignment of the hole required (power of 2)
215f2bb7cf2SRichard Henderson  *
216f2bb7cf2SRichard Henderson  * If there is a range [x, x+@len) within [@min, @max] such that
217f2bb7cf2SRichard Henderson  * x % @align == 0, then return x.  Otherwise return -1.
218f2bb7cf2SRichard Henderson  * The memory lock must be held, as the caller will want to ensure
219f2bb7cf2SRichard Henderson  * the returned range stays empty until a new mapping can be installed.
220f2bb7cf2SRichard Henderson  */
221f2bb7cf2SRichard Henderson target_ulong page_find_range_empty(target_ulong min, target_ulong max,
222f2bb7cf2SRichard Henderson                                    target_ulong len, target_ulong align);
223f2bb7cf2SRichard Henderson 
224f2bb7cf2SRichard Henderson /**
2258269c014SRichard Henderson  * page_get_target_data(address)
226d9c58585SRichard Henderson  * @address: guest virtual address
227d9c58585SRichard Henderson  *
2288269c014SRichard Henderson  * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
2298269c014SRichard Henderson  * with the guest page at @address, allocating it if necessary.  The
2308269c014SRichard Henderson  * caller should already have verified that the address is valid.
231d9c58585SRichard Henderson  *
232d9c58585SRichard Henderson  * The memory will be freed when the guest page is deallocated,
233d9c58585SRichard Henderson  * e.g. with the munmap system call.
234d9c58585SRichard Henderson  */
2358269c014SRichard Henderson void *page_get_target_data(target_ulong address)
2368269c014SRichard Henderson     __attribute__((returns_nonnull));
237022c62cbSPaolo Bonzini #endif
238022c62cbSPaolo Bonzini 
239022c62cbSPaolo Bonzini CPUArchState *cpu_copy(CPUArchState *env);
240022c62cbSPaolo Bonzini 
241022c62cbSPaolo Bonzini /* Flags for use in ENV->INTERRUPT_PENDING.
242022c62cbSPaolo Bonzini 
243022c62cbSPaolo Bonzini    The numbers assigned here are non-sequential in order to preserve
244022c62cbSPaolo Bonzini    binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
245022c62cbSPaolo Bonzini    previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
246022c62cbSPaolo Bonzini    the vmstate dump.  */
247022c62cbSPaolo Bonzini 
248022c62cbSPaolo Bonzini /* External hardware interrupt pending.  This is typically used for
249022c62cbSPaolo Bonzini    interrupts from devices.  */
250022c62cbSPaolo Bonzini #define CPU_INTERRUPT_HARD        0x0002
251022c62cbSPaolo Bonzini 
252022c62cbSPaolo Bonzini /* Exit the current TB.  This is typically used when some system-level device
253022c62cbSPaolo Bonzini    makes some change to the memory mapping.  E.g. the a20 line change.  */
254022c62cbSPaolo Bonzini #define CPU_INTERRUPT_EXITTB      0x0004
255022c62cbSPaolo Bonzini 
256022c62cbSPaolo Bonzini /* Halt the CPU.  */
257022c62cbSPaolo Bonzini #define CPU_INTERRUPT_HALT        0x0020
258022c62cbSPaolo Bonzini 
259022c62cbSPaolo Bonzini /* Debug event pending.  */
260022c62cbSPaolo Bonzini #define CPU_INTERRUPT_DEBUG       0x0080
261022c62cbSPaolo Bonzini 
2624a92a558SPaolo Bonzini /* Reset signal.  */
2634a92a558SPaolo Bonzini #define CPU_INTERRUPT_RESET       0x0400
2644a92a558SPaolo Bonzini 
265022c62cbSPaolo Bonzini /* Several target-specific external hardware interrupts.  Each target/cpu.h
266022c62cbSPaolo Bonzini    should define proper names based on these defines.  */
267022c62cbSPaolo Bonzini #define CPU_INTERRUPT_TGT_EXT_0   0x0008
268022c62cbSPaolo Bonzini #define CPU_INTERRUPT_TGT_EXT_1   0x0010
269022c62cbSPaolo Bonzini #define CPU_INTERRUPT_TGT_EXT_2   0x0040
270022c62cbSPaolo Bonzini #define CPU_INTERRUPT_TGT_EXT_3   0x0200
271022c62cbSPaolo Bonzini #define CPU_INTERRUPT_TGT_EXT_4   0x1000
272022c62cbSPaolo Bonzini 
273022c62cbSPaolo Bonzini /* Several target-specific internal interrupts.  These differ from the
274022c62cbSPaolo Bonzini    preceding target-specific interrupts in that they are intended to
275022c62cbSPaolo Bonzini    originate from within the cpu itself, typically in response to some
276022c62cbSPaolo Bonzini    instruction being executed.  These, therefore, are not masked while
277022c62cbSPaolo Bonzini    single-stepping within the debugger.  */
278022c62cbSPaolo Bonzini #define CPU_INTERRUPT_TGT_INT_0   0x0100
2794a92a558SPaolo Bonzini #define CPU_INTERRUPT_TGT_INT_1   0x0800
2804a92a558SPaolo Bonzini #define CPU_INTERRUPT_TGT_INT_2   0x2000
281022c62cbSPaolo Bonzini 
282022c62cbSPaolo Bonzini /* First unused bit: 0x4000.  */
283022c62cbSPaolo Bonzini 
284022c62cbSPaolo Bonzini /* The set of all bits that should be masked when single-stepping.  */
285022c62cbSPaolo Bonzini #define CPU_INTERRUPT_SSTEP_MASK \
286022c62cbSPaolo Bonzini     (CPU_INTERRUPT_HARD          \
287022c62cbSPaolo Bonzini      | CPU_INTERRUPT_TGT_EXT_0   \
288022c62cbSPaolo Bonzini      | CPU_INTERRUPT_TGT_EXT_1   \
289022c62cbSPaolo Bonzini      | CPU_INTERRUPT_TGT_EXT_2   \
290022c62cbSPaolo Bonzini      | CPU_INTERRUPT_TGT_EXT_3   \
291022c62cbSPaolo Bonzini      | CPU_INTERRUPT_TGT_EXT_4)
292022c62cbSPaolo Bonzini 
293069cfe77SRichard Henderson #ifdef CONFIG_USER_ONLY
294069cfe77SRichard Henderson 
295069cfe77SRichard Henderson /*
296069cfe77SRichard Henderson  * Allow some level of source compatibility with softmmu.  We do not
297069cfe77SRichard Henderson  * support any of the more exotic features, so only invalid pages may
298069cfe77SRichard Henderson  * be signaled by probe_access_flags().
299069cfe77SRichard Henderson  */
300069cfe77SRichard Henderson #define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
3016d03226bSAlex Bennée #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 2))
302069cfe77SRichard Henderson #define TLB_WATCHPOINT      0
303069cfe77SRichard Henderson 
cpu_mmu_index(CPUState * cs,bool ifetch)3043b916140SRichard Henderson static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
305a120d320SRichard Henderson {
306a120d320SRichard Henderson     return MMU_USER_IDX;
307a120d320SRichard Henderson }
308069cfe77SRichard Henderson #else
309022c62cbSPaolo Bonzini 
3101f6f2b34SRichard Henderson /*
3111f6f2b34SRichard Henderson  * Flags stored in the low bits of the TLB virtual address.
3121f6f2b34SRichard Henderson  * These are defined so that fast path ram access is all zeros.
3131f00b27fSSergey Sorokin  * The flags all must be between TARGET_PAGE_BITS and
3141f00b27fSSergey Sorokin  * maximum address alignment bit.
3151f6f2b34SRichard Henderson  *
3161f6f2b34SRichard Henderson  * Use TARGET_PAGE_BITS_MIN so that these bits are constant
3171f6f2b34SRichard Henderson  * when TARGET_PAGE_BITS_VARY is in effect.
3183a80bde3SRichard Henderson  *
3193a80bde3SRichard Henderson  * The count, if not the placement of these bits is known
3203a80bde3SRichard Henderson  * to tcg/tcg-op-ldst.c, check_max_alignment().
3211f00b27fSSergey Sorokin  */
322022c62cbSPaolo Bonzini /* Zero if TLB entry is valid.  */
3231f6f2b34SRichard Henderson #define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
324022c62cbSPaolo Bonzini /* Set if TLB entry references a clean RAM page.  The iotlb entry will
325022c62cbSPaolo Bonzini    contain the page physical address.  */
3261f6f2b34SRichard Henderson #define TLB_NOTDIRTY        (1 << (TARGET_PAGE_BITS_MIN - 2))
327022c62cbSPaolo Bonzini /* Set if TLB entry is an IO callback.  */
3281f6f2b34SRichard Henderson #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 3))
329a0eaae08SRichard Henderson /* Set if TLB entry writes ignored.  */
330a0eaae08SRichard Henderson #define TLB_DISCARD_WRITE   (1 << (TARGET_PAGE_BITS_MIN - 4))
33158e8f1f6SRichard Henderson /* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
33258e8f1f6SRichard Henderson #define TLB_FORCE_SLOW      (1 << (TARGET_PAGE_BITS_MIN - 5))
3331f00b27fSSergey Sorokin 
33458e8f1f6SRichard Henderson /*
33558e8f1f6SRichard Henderson  * Use this mask to check interception with an alignment mask
3361f00b27fSSergey Sorokin  * in a TCG backend.
3371f00b27fSSergey Sorokin  */
33850b107c5SRichard Henderson #define TLB_FLAGS_MASK \
3397b0d792cSRichard Henderson     (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
340187ba694SRichard Henderson     | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
34158e8f1f6SRichard Henderson 
34258e8f1f6SRichard Henderson /*
34358e8f1f6SRichard Henderson  * Flags stored in CPUTLBEntryFull.slow_flags[x].
34458e8f1f6SRichard Henderson  * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
34558e8f1f6SRichard Henderson  */
34658e8f1f6SRichard Henderson /* Set if TLB entry requires byte swap.  */
34758e8f1f6SRichard Henderson #define TLB_BSWAP            (1 << 0)
348187ba694SRichard Henderson /* Set if TLB entry contains a watchpoint.  */
349187ba694SRichard Henderson #define TLB_WATCHPOINT       (1 << 1)
35049fa457cSRichard Henderson /* Set if TLB entry requires aligned accesses.  */
35149fa457cSRichard Henderson #define TLB_CHECK_ALIGNED    (1 << 2)
35258e8f1f6SRichard Henderson 
35349fa457cSRichard Henderson #define TLB_SLOW_FLAGS_MASK  (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
35458e8f1f6SRichard Henderson 
35558e8f1f6SRichard Henderson /* The two sets of flags must not overlap. */
35658e8f1f6SRichard Henderson QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
357022c62cbSPaolo Bonzini 
358334692bcSPeter Maydell /**
359334692bcSPeter Maydell  * tlb_hit_page: return true if page aligned @addr is a hit against the
360334692bcSPeter Maydell  * TLB entry @tlb_addr
361334692bcSPeter Maydell  *
362334692bcSPeter Maydell  * @addr: virtual address to test (must be page aligned)
363334692bcSPeter Maydell  * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
364334692bcSPeter Maydell  */
tlb_hit_page(uint64_t tlb_addr,vaddr addr)365c78edb56SAnton Johansson static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
366334692bcSPeter Maydell {
367334692bcSPeter Maydell     return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
368334692bcSPeter Maydell }
369334692bcSPeter Maydell 
370334692bcSPeter Maydell /**
371334692bcSPeter Maydell  * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
372334692bcSPeter Maydell  *
373334692bcSPeter Maydell  * @addr: virtual address to test (need not be page aligned)
374334692bcSPeter Maydell  * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
375334692bcSPeter Maydell  */
tlb_hit(uint64_t tlb_addr,vaddr addr)376c78edb56SAnton Johansson static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
377334692bcSPeter Maydell {
378334692bcSPeter Maydell     return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
379334692bcSPeter Maydell }
380334692bcSPeter Maydell 
381022c62cbSPaolo Bonzini #endif /* !CONFIG_USER_ONLY */
382022c62cbSPaolo Bonzini 
3833b3d7df5SRichard Henderson /* Validate correct placement of CPUArchState. */
3848501048bSPhilippe Mathieu-Daudé #include "cpu.h"
3853b3d7df5SRichard Henderson QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
3863b3d7df5SRichard Henderson QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
3873b3d7df5SRichard Henderson 
388022c62cbSPaolo Bonzini #endif /* CPU_ALL_H */
389