1 /* 2 * Software MMU support (per-target) 3 * 4 * This library is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU Lesser General Public 6 * License as published by the Free Software Foundation; either 7 * version 2.1 of the License, or (at your option) any later version. 8 * 9 * This library is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * Lesser General Public License for more details. 13 * 14 * You should have received a copy of the GNU Lesser General Public 15 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 16 * 17 */ 18 19 /* 20 * Generate inline load/store functions for all MMU modes (typically 21 * at least _user and _kernel) as well as _data versions, for all data 22 * sizes. 23 * 24 * Used by target op helpers. 25 * 26 * The syntax for the accessors is: 27 * 28 * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) 29 * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) 30 * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) 31 * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr) 32 * 33 * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) 34 * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) 35 * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) 36 * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr) 37 * 38 * sign is: 39 * (empty): for 32 and 64 bit sizes 40 * u : unsigned 41 * s : signed 42 * 43 * size is: 44 * b: 8 bits 45 * w: 16 bits 46 * l: 32 bits 47 * q: 64 bits 48 * 49 * end is: 50 * (empty): for target native endian, or for 8 bit access 51 * _be: for forced big endian 52 * _le: for forced little endian 53 * 54 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". 55 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies 56 * the index to use; the "data" and "code" suffixes take the index from 57 * cpu_mmu_index(). 58 * 59 * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the 60 * MemOp including alignment requirements. The alignment will be enforced. 61 */ 62 #ifndef CPU_LDST_H 63 #define CPU_LDST_H 64 65 #ifndef CONFIG_TCG 66 #error Can only include this header with TCG 67 #endif 68 69 #include "exec/memopidx.h" 70 #include "exec/abi_ptr.h" 71 #include "exec/mmu-access-type.h" 72 #include "qemu/int128.h" 73 74 #if defined(CONFIG_USER_ONLY) 75 76 #include "user/guest-base.h" 77 78 #ifndef TARGET_TAGGED_ADDRESSES 79 static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x) 80 { 81 return x; 82 } 83 #endif 84 85 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 86 static inline void *g2h_untagged(abi_ptr x) 87 { 88 return (void *)((uintptr_t)(x) + guest_base); 89 } 90 91 static inline void *g2h(CPUState *cs, abi_ptr x) 92 { 93 return g2h_untagged(cpu_untagged_addr(cs, x)); 94 } 95 96 static inline bool guest_addr_valid_untagged(abi_ulong x) 97 { 98 return x <= GUEST_ADDR_MAX; 99 } 100 101 static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len) 102 { 103 return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1; 104 } 105 106 #define h2g_valid(x) \ 107 (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \ 108 (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX) 109 110 #define h2g_nocheck(x) ({ \ 111 uintptr_t __ret = (uintptr_t)(x) - guest_base; \ 112 (abi_ptr)__ret; \ 113 }) 114 115 #define h2g(x) ({ \ 116 /* Check if given address fits target address space */ \ 117 assert(h2g_valid(x)); \ 118 h2g_nocheck(x); \ 119 }) 120 121 #endif /* CONFIG_USER_ONLY */ 122 123 uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); 124 int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); 125 uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); 126 int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); 127 uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); 128 uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); 129 uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); 130 int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); 131 uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); 132 uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr); 133 134 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 135 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 136 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 137 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 138 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 139 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 140 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 141 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 142 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 143 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 144 145 void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 146 void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 147 void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 148 void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); 149 void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 150 void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 151 void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); 152 153 void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, 154 uint32_t val, uintptr_t ra); 155 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, 156 uint32_t val, uintptr_t ra); 157 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, 158 uint32_t val, uintptr_t ra); 159 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, 160 uint64_t val, uintptr_t ra); 161 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, 162 uint32_t val, uintptr_t ra); 163 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, 164 uint32_t val, uintptr_t ra); 165 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, 166 uint64_t val, uintptr_t ra); 167 168 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 169 int mmu_idx, uintptr_t ra); 170 int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 171 int mmu_idx, uintptr_t ra); 172 uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 173 int mmu_idx, uintptr_t ra); 174 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 175 int mmu_idx, uintptr_t ra); 176 uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 177 int mmu_idx, uintptr_t ra); 178 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 179 int mmu_idx, uintptr_t ra); 180 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 181 int mmu_idx, uintptr_t ra); 182 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 183 int mmu_idx, uintptr_t ra); 184 uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 185 int mmu_idx, uintptr_t ra); 186 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, 187 int mmu_idx, uintptr_t ra); 188 189 void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, 190 int mmu_idx, uintptr_t ra); 191 void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, 192 int mmu_idx, uintptr_t ra); 193 void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, 194 int mmu_idx, uintptr_t ra); 195 void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, 196 int mmu_idx, uintptr_t ra); 197 void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, 198 int mmu_idx, uintptr_t ra); 199 void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, 200 int mmu_idx, uintptr_t ra); 201 void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, 202 int mmu_idx, uintptr_t ra); 203 204 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); 205 uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); 206 uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); 207 uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); 208 Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra); 209 210 void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val, 211 MemOpIdx oi, uintptr_t ra); 212 void cpu_stw_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, 213 MemOpIdx oi, uintptr_t ra); 214 void cpu_stl_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, 215 MemOpIdx oi, uintptr_t ra); 216 void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, 217 MemOpIdx oi, uintptr_t ra); 218 void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 219 MemOpIdx oi, uintptr_t ra); 220 221 uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr, 222 uint32_t cmpv, uint32_t newv, 223 MemOpIdx oi, uintptr_t retaddr); 224 uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr, 225 uint32_t cmpv, uint32_t newv, 226 MemOpIdx oi, uintptr_t retaddr); 227 uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr, 228 uint32_t cmpv, uint32_t newv, 229 MemOpIdx oi, uintptr_t retaddr); 230 uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr, 231 uint64_t cmpv, uint64_t newv, 232 MemOpIdx oi, uintptr_t retaddr); 233 uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr, 234 uint32_t cmpv, uint32_t newv, 235 MemOpIdx oi, uintptr_t retaddr); 236 uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr, 237 uint32_t cmpv, uint32_t newv, 238 MemOpIdx oi, uintptr_t retaddr); 239 uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr, 240 uint64_t cmpv, uint64_t newv, 241 MemOpIdx oi, uintptr_t retaddr); 242 243 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ 244 TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ 245 (CPUArchState *env, abi_ptr addr, TYPE val, \ 246 MemOpIdx oi, uintptr_t retaddr); 247 248 #ifdef CONFIG_ATOMIC64 249 #define GEN_ATOMIC_HELPER_ALL(NAME) \ 250 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ 251 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ 252 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ 253 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ 254 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ 255 GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ 256 GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) 257 #else 258 #define GEN_ATOMIC_HELPER_ALL(NAME) \ 259 GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ 260 GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ 261 GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ 262 GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ 263 GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) 264 #endif 265 266 GEN_ATOMIC_HELPER_ALL(fetch_add) 267 GEN_ATOMIC_HELPER_ALL(fetch_sub) 268 GEN_ATOMIC_HELPER_ALL(fetch_and) 269 GEN_ATOMIC_HELPER_ALL(fetch_or) 270 GEN_ATOMIC_HELPER_ALL(fetch_xor) 271 GEN_ATOMIC_HELPER_ALL(fetch_smin) 272 GEN_ATOMIC_HELPER_ALL(fetch_umin) 273 GEN_ATOMIC_HELPER_ALL(fetch_smax) 274 GEN_ATOMIC_HELPER_ALL(fetch_umax) 275 276 GEN_ATOMIC_HELPER_ALL(add_fetch) 277 GEN_ATOMIC_HELPER_ALL(sub_fetch) 278 GEN_ATOMIC_HELPER_ALL(and_fetch) 279 GEN_ATOMIC_HELPER_ALL(or_fetch) 280 GEN_ATOMIC_HELPER_ALL(xor_fetch) 281 GEN_ATOMIC_HELPER_ALL(smin_fetch) 282 GEN_ATOMIC_HELPER_ALL(umin_fetch) 283 GEN_ATOMIC_HELPER_ALL(smax_fetch) 284 GEN_ATOMIC_HELPER_ALL(umax_fetch) 285 286 GEN_ATOMIC_HELPER_ALL(xchg) 287 288 #undef GEN_ATOMIC_HELPER_ALL 289 #undef GEN_ATOMIC_HELPER 290 291 Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr, 292 Int128 cmpv, Int128 newv, 293 MemOpIdx oi, uintptr_t retaddr); 294 Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr, 295 Int128 cmpv, Int128 newv, 296 MemOpIdx oi, uintptr_t retaddr); 297 298 #if TARGET_BIG_ENDIAN 299 # define cpu_lduw_data cpu_lduw_be_data 300 # define cpu_ldsw_data cpu_ldsw_be_data 301 # define cpu_ldl_data cpu_ldl_be_data 302 # define cpu_ldq_data cpu_ldq_be_data 303 # define cpu_lduw_data_ra cpu_lduw_be_data_ra 304 # define cpu_ldsw_data_ra cpu_ldsw_be_data_ra 305 # define cpu_ldl_data_ra cpu_ldl_be_data_ra 306 # define cpu_ldq_data_ra cpu_ldq_be_data_ra 307 # define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra 308 # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra 309 # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra 310 # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra 311 # define cpu_stw_data cpu_stw_be_data 312 # define cpu_stl_data cpu_stl_be_data 313 # define cpu_stq_data cpu_stq_be_data 314 # define cpu_stw_data_ra cpu_stw_be_data_ra 315 # define cpu_stl_data_ra cpu_stl_be_data_ra 316 # define cpu_stq_data_ra cpu_stq_be_data_ra 317 # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra 318 # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra 319 # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra 320 #else 321 # define cpu_lduw_data cpu_lduw_le_data 322 # define cpu_ldsw_data cpu_ldsw_le_data 323 # define cpu_ldl_data cpu_ldl_le_data 324 # define cpu_ldq_data cpu_ldq_le_data 325 # define cpu_lduw_data_ra cpu_lduw_le_data_ra 326 # define cpu_ldsw_data_ra cpu_ldsw_le_data_ra 327 # define cpu_ldl_data_ra cpu_ldl_le_data_ra 328 # define cpu_ldq_data_ra cpu_ldq_le_data_ra 329 # define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra 330 # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra 331 # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra 332 # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra 333 # define cpu_stw_data cpu_stw_le_data 334 # define cpu_stl_data cpu_stl_le_data 335 # define cpu_stq_data cpu_stq_le_data 336 # define cpu_stw_data_ra cpu_stw_le_data_ra 337 # define cpu_stl_data_ra cpu_stl_le_data_ra 338 # define cpu_stq_data_ra cpu_stq_le_data_ra 339 # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra 340 # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra 341 # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra 342 #endif 343 344 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, 345 MemOpIdx oi, uintptr_t ra); 346 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, 347 MemOpIdx oi, uintptr_t ra); 348 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, 349 MemOpIdx oi, uintptr_t ra); 350 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, 351 MemOpIdx oi, uintptr_t ra); 352 353 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); 354 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); 355 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); 356 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr); 357 358 /** 359 * tlb_vaddr_to_host: 360 * @env: CPUArchState 361 * @addr: guest virtual address to look up 362 * @access_type: 0 for read, 1 for write, 2 for execute 363 * @mmu_idx: MMU index to use for lookup 364 * 365 * Look up the specified guest virtual index in the TCG softmmu TLB. 366 * If we can translate a host virtual address suitable for direct RAM 367 * access, without causing a guest exception, then return it. 368 * Otherwise (TLB entry is for an I/O access, guest software 369 * TLB fill required, etc) return NULL. 370 */ 371 #ifdef CONFIG_USER_ONLY 372 static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 373 MMUAccessType access_type, int mmu_idx) 374 { 375 return g2h(env_cpu(env), addr); 376 } 377 #else 378 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 379 MMUAccessType access_type, int mmu_idx); 380 #endif 381 382 /* 383 * For user-only, helpers that use guest to host address translation 384 * must protect the actual host memory access by recording 'retaddr' 385 * for the signal handler. This is required for a race condition in 386 * which another thread unmaps the page between a probe and the 387 * actual access. 388 */ 389 #ifdef CONFIG_USER_ONLY 390 extern __thread uintptr_t helper_retaddr; 391 392 static inline void set_helper_retaddr(uintptr_t ra) 393 { 394 helper_retaddr = ra; 395 /* 396 * Ensure that this write is visible to the SIGSEGV handler that 397 * may be invoked due to a subsequent invalid memory operation. 398 */ 399 signal_barrier(); 400 } 401 402 static inline void clear_helper_retaddr(void) 403 { 404 /* 405 * Ensure that previous memory operations have succeeded before 406 * removing the data visible to the signal handler. 407 */ 408 signal_barrier(); 409 helper_retaddr = 0; 410 } 411 #else 412 #define set_helper_retaddr(ra) do { } while (0) 413 #define clear_helper_retaddr() do { } while (0) 414 #endif 415 416 #endif /* CPU_LDST_H */ 417