xref: /openbmc/qemu/include/exec/cpu-all.h (revision 71119ed3651622e1c531d1294839e9f3341adaf5)
1 /*
2  * defines common to all virtual CPUs
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef CPU_ALL_H
20 #define CPU_ALL_H
21 
22 #include "exec/page-protection.h"
23 #include "exec/cpu-common.h"
24 #include "exec/cpu-interrupt.h"
25 #include "exec/memory.h"
26 #include "exec/tswap.h"
27 #include "hw/core/cpu.h"
28 
29 /* Target-endianness CPU memory access functions. These fit into the
30  * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
31  */
32 #if TARGET_BIG_ENDIAN
33 #define lduw_p(p) lduw_be_p(p)
34 #define ldsw_p(p) ldsw_be_p(p)
35 #define ldl_p(p) ldl_be_p(p)
36 #define ldq_p(p) ldq_be_p(p)
37 #define stw_p(p, v) stw_be_p(p, v)
38 #define stl_p(p, v) stl_be_p(p, v)
39 #define stq_p(p, v) stq_be_p(p, v)
40 #define ldn_p(p, sz) ldn_be_p(p, sz)
41 #define stn_p(p, sz, v) stn_be_p(p, sz, v)
42 #else
43 #define lduw_p(p) lduw_le_p(p)
44 #define ldsw_p(p) ldsw_le_p(p)
45 #define ldl_p(p) ldl_le_p(p)
46 #define ldq_p(p) ldq_le_p(p)
47 #define stw_p(p, v) stw_le_p(p, v)
48 #define stl_p(p, v) stl_le_p(p, v)
49 #define stq_p(p, v) stq_le_p(p, v)
50 #define ldn_p(p, sz) ldn_le_p(p, sz)
51 #define stn_p(p, sz, v) stn_le_p(p, sz, v)
52 #endif
53 
54 /* MMU memory access macros */
55 
56 #if !defined(CONFIG_USER_ONLY)
57 
58 #include "exec/hwaddr.h"
59 
60 #define SUFFIX
61 #define ARG1         as
62 #define ARG1_DECL    AddressSpace *as
63 #define TARGET_ENDIANNESS
64 #include "exec/memory_ldst.h.inc"
65 
66 #define SUFFIX       _cached_slow
67 #define ARG1         cache
68 #define ARG1_DECL    MemoryRegionCache *cache
69 #define TARGET_ENDIANNESS
70 #include "exec/memory_ldst.h.inc"
71 
stl_phys_notdirty(AddressSpace * as,hwaddr addr,uint32_t val)72 static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
73 {
74     address_space_stl_notdirty(as, addr, val,
75                                MEMTXATTRS_UNSPECIFIED, NULL);
76 }
77 
78 #define SUFFIX
79 #define ARG1         as
80 #define ARG1_DECL    AddressSpace *as
81 #define TARGET_ENDIANNESS
82 #include "exec/memory_ldst_phys.h.inc"
83 
84 /* Inline fast path for direct RAM access.  */
85 #define ENDIANNESS
86 #include "exec/memory_ldst_cached.h.inc"
87 
88 #define SUFFIX       _cached
89 #define ARG1         cache
90 #define ARG1_DECL    MemoryRegionCache *cache
91 #define TARGET_ENDIANNESS
92 #include "exec/memory_ldst_phys.h.inc"
93 #endif
94 
95 /* page related stuff */
96 #include "exec/cpu-defs.h"
97 #include "exec/target_page.h"
98 
99 CPUArchState *cpu_copy(CPUArchState *env);
100 
101 #include "cpu.h"
102 
103 #ifdef CONFIG_USER_ONLY
104 
105 static inline int cpu_mmu_index(CPUState *cs, bool ifetch);
106 
107 /*
108  * Allow some level of source compatibility with softmmu.  We do not
109  * support any of the more exotic features, so only invalid pages may
110  * be signaled by probe_access_flags().
111  */
112 #define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
113 #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 2))
114 #define TLB_WATCHPOINT      0
115 
cpu_mmu_index(CPUState * cs,bool ifetch)116 static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
117 {
118     return MMU_USER_IDX;
119 }
120 #else
121 
122 /*
123  * Flags stored in the low bits of the TLB virtual address.
124  * These are defined so that fast path ram access is all zeros.
125  * The flags all must be between TARGET_PAGE_BITS and
126  * maximum address alignment bit.
127  *
128  * Use TARGET_PAGE_BITS_MIN so that these bits are constant
129  * when TARGET_PAGE_BITS_VARY is in effect.
130  *
131  * The count, if not the placement of these bits is known
132  * to tcg/tcg-op-ldst.c, check_max_alignment().
133  */
134 /* Zero if TLB entry is valid.  */
135 #define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
136 /* Set if TLB entry references a clean RAM page.  The iotlb entry will
137    contain the page physical address.  */
138 #define TLB_NOTDIRTY        (1 << (TARGET_PAGE_BITS_MIN - 2))
139 /* Set if TLB entry is an IO callback.  */
140 #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 3))
141 /* Set if TLB entry writes ignored.  */
142 #define TLB_DISCARD_WRITE   (1 << (TARGET_PAGE_BITS_MIN - 4))
143 /* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
144 #define TLB_FORCE_SLOW      (1 << (TARGET_PAGE_BITS_MIN - 5))
145 
146 /*
147  * Use this mask to check interception with an alignment mask
148  * in a TCG backend.
149  */
150 #define TLB_FLAGS_MASK \
151     (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
152     | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
153 
154 /*
155  * Flags stored in CPUTLBEntryFull.slow_flags[x].
156  * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
157  */
158 /* Set if TLB entry requires byte swap.  */
159 #define TLB_BSWAP            (1 << 0)
160 /* Set if TLB entry contains a watchpoint.  */
161 #define TLB_WATCHPOINT       (1 << 1)
162 /* Set if TLB entry requires aligned accesses.  */
163 #define TLB_CHECK_ALIGNED    (1 << 2)
164 
165 #define TLB_SLOW_FLAGS_MASK  (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
166 
167 /* The two sets of flags must not overlap. */
168 QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
169 
170 #endif /* !CONFIG_USER_ONLY */
171 
172 /* Validate correct placement of CPUArchState. */
173 QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
174 QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
175 
176 #endif /* CPU_ALL_H */
177