1 /*
2 * Software MMU support (per-target)
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16 *
17 */
18
19 /*
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
22 * sizes.
23 *
24 * Used by target op helpers.
25 *
26 * The syntax for the accessors is:
27 *
28 * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr)
29 * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr)
30 * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
31 * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr)
32 *
33 * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val)
34 * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr)
35 * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
36 * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)
37 *
38 * sign is:
39 * (empty): for 32 and 64 bit sizes
40 * u : unsigned
41 * s : signed
42 *
43 * size is:
44 * b: 8 bits
45 * w: 16 bits
46 * l: 32 bits
47 * q: 64 bits
48 *
49 * end is:
50 * (empty): for target native endian, or for 8 bit access
51 * _be: for forced big endian
52 * _le: for forced little endian
53 *
54 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
55 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
56 * the index to use; the "data" and "code" suffixes take the index from
57 * cpu_mmu_index().
58 *
59 * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the
60 * MemOp including alignment requirements. The alignment will be enforced.
61 */
62 #ifndef CPU_LDST_H
63 #define CPU_LDST_H
64
65 #ifndef CONFIG_TCG
66 #error Can only include this header with TCG
67 #endif
68
69 #include "exec/memopidx.h"
70 #include "exec/vaddr.h"
71 #include "exec/abi_ptr.h"
72 #include "exec/mmu-access-type.h"
73 #include "qemu/int128.h"
74
75 #if defined(CONFIG_USER_ONLY)
76 #include "user/guest-host.h"
77 #endif /* CONFIG_USER_ONLY */
78
79 uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
80 int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
81 uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr);
82 int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr);
83 uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr);
84 uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr);
85 uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr);
86 int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr);
87 uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr);
88 uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr);
89
90 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
91 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
92 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
93 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
94 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
95 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
96 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
97 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
98 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
99 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra);
100
101 void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
102 void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
103 void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
104 void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
105 void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
106 void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
107 void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
108
109 void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
110 uint32_t val, uintptr_t ra);
111 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr,
112 uint32_t val, uintptr_t ra);
113 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr,
114 uint32_t val, uintptr_t ra);
115 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr,
116 uint64_t val, uintptr_t ra);
117 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr,
118 uint32_t val, uintptr_t ra);
119 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr,
120 uint32_t val, uintptr_t ra);
121 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr,
122 uint64_t val, uintptr_t ra);
123
124 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
125 int mmu_idx, uintptr_t ra);
126 int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
127 int mmu_idx, uintptr_t ra);
128 uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
129 int mmu_idx, uintptr_t ra);
130 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
131 int mmu_idx, uintptr_t ra);
132 uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
133 int mmu_idx, uintptr_t ra);
134 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
135 int mmu_idx, uintptr_t ra);
136 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
137 int mmu_idx, uintptr_t ra);
138 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
139 int mmu_idx, uintptr_t ra);
140 uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
141 int mmu_idx, uintptr_t ra);
142 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr,
143 int mmu_idx, uintptr_t ra);
144
145 void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
146 int mmu_idx, uintptr_t ra);
147 void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
148 int mmu_idx, uintptr_t ra);
149 void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
150 int mmu_idx, uintptr_t ra);
151 void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
152 int mmu_idx, uintptr_t ra);
153 void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
154 int mmu_idx, uintptr_t ra);
155 void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val,
156 int mmu_idx, uintptr_t ra);
157 void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val,
158 int mmu_idx, uintptr_t ra);
159
160 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
161 uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
162 uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
163 uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra);
164 Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra);
165
166 void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val,
167 MemOpIdx oi, uintptr_t ra);
168 void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val,
169 MemOpIdx oi, uintptr_t ra);
170 void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val,
171 MemOpIdx oi, uintptr_t ra);
172 void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
173 MemOpIdx oi, uintptr_t ra);
174 void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
175 MemOpIdx oi, uintptr_t ra);
176
177 uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr,
178 uint32_t cmpv, uint32_t newv,
179 MemOpIdx oi, uintptr_t retaddr);
180 uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr,
181 uint32_t cmpv, uint32_t newv,
182 MemOpIdx oi, uintptr_t retaddr);
183 uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr,
184 uint32_t cmpv, uint32_t newv,
185 MemOpIdx oi, uintptr_t retaddr);
186 uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr,
187 uint64_t cmpv, uint64_t newv,
188 MemOpIdx oi, uintptr_t retaddr);
189 uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr,
190 uint32_t cmpv, uint32_t newv,
191 MemOpIdx oi, uintptr_t retaddr);
192 uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr,
193 uint32_t cmpv, uint32_t newv,
194 MemOpIdx oi, uintptr_t retaddr);
195 uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr,
196 uint64_t cmpv, uint64_t newv,
197 MemOpIdx oi, uintptr_t retaddr);
198
199 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
200 TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
201 (CPUArchState *env, abi_ptr addr, TYPE val, \
202 MemOpIdx oi, uintptr_t retaddr);
203
204 #ifdef CONFIG_ATOMIC64
205 #define GEN_ATOMIC_HELPER_ALL(NAME) \
206 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
207 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
208 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
209 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
210 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
211 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
212 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
213 #else
214 #define GEN_ATOMIC_HELPER_ALL(NAME) \
215 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
216 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
217 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
218 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
219 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
220 #endif
221
222 GEN_ATOMIC_HELPER_ALL(fetch_add)
223 GEN_ATOMIC_HELPER_ALL(fetch_sub)
224 GEN_ATOMIC_HELPER_ALL(fetch_and)
225 GEN_ATOMIC_HELPER_ALL(fetch_or)
226 GEN_ATOMIC_HELPER_ALL(fetch_xor)
227 GEN_ATOMIC_HELPER_ALL(fetch_smin)
228 GEN_ATOMIC_HELPER_ALL(fetch_umin)
229 GEN_ATOMIC_HELPER_ALL(fetch_smax)
230 GEN_ATOMIC_HELPER_ALL(fetch_umax)
231
232 GEN_ATOMIC_HELPER_ALL(add_fetch)
233 GEN_ATOMIC_HELPER_ALL(sub_fetch)
234 GEN_ATOMIC_HELPER_ALL(and_fetch)
235 GEN_ATOMIC_HELPER_ALL(or_fetch)
236 GEN_ATOMIC_HELPER_ALL(xor_fetch)
237 GEN_ATOMIC_HELPER_ALL(smin_fetch)
238 GEN_ATOMIC_HELPER_ALL(umin_fetch)
239 GEN_ATOMIC_HELPER_ALL(smax_fetch)
240 GEN_ATOMIC_HELPER_ALL(umax_fetch)
241
242 GEN_ATOMIC_HELPER_ALL(xchg)
243
244 #undef GEN_ATOMIC_HELPER_ALL
245 #undef GEN_ATOMIC_HELPER
246
247 Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr,
248 Int128 cmpv, Int128 newv,
249 MemOpIdx oi, uintptr_t retaddr);
250 Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr,
251 Int128 cmpv, Int128 newv,
252 MemOpIdx oi, uintptr_t retaddr);
253
254 #if TARGET_BIG_ENDIAN
255 # define cpu_lduw_data cpu_lduw_be_data
256 # define cpu_ldsw_data cpu_ldsw_be_data
257 # define cpu_ldl_data cpu_ldl_be_data
258 # define cpu_ldq_data cpu_ldq_be_data
259 # define cpu_lduw_data_ra cpu_lduw_be_data_ra
260 # define cpu_ldsw_data_ra cpu_ldsw_be_data_ra
261 # define cpu_ldl_data_ra cpu_ldl_be_data_ra
262 # define cpu_ldq_data_ra cpu_ldq_be_data_ra
263 # define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra
264 # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra
265 # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra
266 # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra
267 # define cpu_stw_data cpu_stw_be_data
268 # define cpu_stl_data cpu_stl_be_data
269 # define cpu_stq_data cpu_stq_be_data
270 # define cpu_stw_data_ra cpu_stw_be_data_ra
271 # define cpu_stl_data_ra cpu_stl_be_data_ra
272 # define cpu_stq_data_ra cpu_stq_be_data_ra
273 # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra
274 # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra
275 # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra
276 #else
277 # define cpu_lduw_data cpu_lduw_le_data
278 # define cpu_ldsw_data cpu_ldsw_le_data
279 # define cpu_ldl_data cpu_ldl_le_data
280 # define cpu_ldq_data cpu_ldq_le_data
281 # define cpu_lduw_data_ra cpu_lduw_le_data_ra
282 # define cpu_ldsw_data_ra cpu_ldsw_le_data_ra
283 # define cpu_ldl_data_ra cpu_ldl_le_data_ra
284 # define cpu_ldq_data_ra cpu_ldq_le_data_ra
285 # define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra
286 # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra
287 # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra
288 # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra
289 # define cpu_stw_data cpu_stw_le_data
290 # define cpu_stl_data cpu_stl_le_data
291 # define cpu_stq_data cpu_stq_le_data
292 # define cpu_stw_data_ra cpu_stw_le_data_ra
293 # define cpu_stl_data_ra cpu_stl_le_data_ra
294 # define cpu_stq_data_ra cpu_stq_le_data_ra
295 # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra
296 # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra
297 # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra
298 #endif
299
300 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
301 MemOpIdx oi, uintptr_t ra);
302 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
303 MemOpIdx oi, uintptr_t ra);
304 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
305 MemOpIdx oi, uintptr_t ra);
306 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
307 MemOpIdx oi, uintptr_t ra);
308
309 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
310 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
311 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
312 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
313
314 /**
315 * tlb_vaddr_to_host:
316 * @env: CPUArchState
317 * @addr: guest virtual address to look up
318 * @access_type: 0 for read, 1 for write, 2 for execute
319 * @mmu_idx: MMU index to use for lookup
320 *
321 * Look up the specified guest virtual index in the TCG softmmu TLB.
322 * If we can translate a host virtual address suitable for direct RAM
323 * access, without causing a guest exception, then return it.
324 * Otherwise (TLB entry is for an I/O access, guest software
325 * TLB fill required, etc) return NULL.
326 */
327 #ifdef CONFIG_USER_ONLY
tlb_vaddr_to_host(CPUArchState * env,abi_ptr addr,MMUAccessType access_type,int mmu_idx)328 static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
329 MMUAccessType access_type, int mmu_idx)
330 {
331 return g2h(env_cpu(env), addr);
332 }
333 #else
334 void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
335 MMUAccessType access_type, int mmu_idx);
336 #endif
337
338 /*
339 * For user-only, helpers that use guest to host address translation
340 * must protect the actual host memory access by recording 'retaddr'
341 * for the signal handler. This is required for a race condition in
342 * which another thread unmaps the page between a probe and the
343 * actual access.
344 */
345 #ifdef CONFIG_USER_ONLY
346 extern __thread uintptr_t helper_retaddr;
347
set_helper_retaddr(uintptr_t ra)348 static inline void set_helper_retaddr(uintptr_t ra)
349 {
350 helper_retaddr = ra;
351 /*
352 * Ensure that this write is visible to the SIGSEGV handler that
353 * may be invoked due to a subsequent invalid memory operation.
354 */
355 signal_barrier();
356 }
357
clear_helper_retaddr(void)358 static inline void clear_helper_retaddr(void)
359 {
360 /*
361 * Ensure that previous memory operations have succeeded before
362 * removing the data visible to the signal handler.
363 */
364 signal_barrier();
365 helper_retaddr = 0;
366 }
367 #else
368 #define set_helper_retaddr(ra) do { } while (0)
369 #define clear_helper_retaddr() do { } while (0)
370 #endif
371
372 #endif /* CPU_LDST_H */
373