1 /* 2 * Software MMU support 3 * 4 * This library is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU Lesser General Public 6 * License as published by the Free Software Foundation; either 7 * version 2.1 of the License, or (at your option) any later version. 8 * 9 * This library is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * Lesser General Public License for more details. 13 * 14 * You should have received a copy of the GNU Lesser General Public 15 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 16 * 17 */ 18 19 /* 20 * Generate inline load/store functions for all MMU modes (typically 21 * at least _user and _kernel) as well as _data versions, for all data 22 * sizes. 23 * 24 * Used by target op helpers. 25 * 26 * The syntax for the accessors is: 27 * 28 * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) 29 * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) 30 * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) 31 * 32 * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) 33 * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) 34 * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) 35 * 36 * sign is: 37 * (empty): for 32 and 64 bit sizes 38 * u : unsigned 39 * s : signed 40 * 41 * size is: 42 * b: 8 bits 43 * w: 16 bits 44 * l: 32 bits 45 * q: 64 bits 46 * 47 * end is: 48 * (empty): for target native endian, or for 8 bit access 49 * _be: for forced big endian 50 * _le: for forced little endian 51 * 52 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". 53 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies 54 * the index to use; the "data" and "code" suffixes take the index from 55 * cpu_mmu_index(). 56 */ 57 #ifndef CPU_LDST_H 58 #define CPU_LDST_H 59 60 #if defined(CONFIG_USER_ONLY) 61 /* sparc32plus has 64bit long but 32bit space address 62 * this can make bad result with g2h() and h2g() 63 */ 64 #if TARGET_VIRT_ADDR_SPACE_BITS <= 32 65 typedef uint32_t abi_ptr; 66 #define TARGET_ABI_FMT_ptr "%x" 67 #else 68 typedef uint64_t abi_ptr; 69 #define TARGET_ABI_FMT_ptr "%"PRIx64 70 #endif 71 72 #ifndef TARGET_TAGGED_ADDRESSES 73 static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x) 74 { 75 return x; 76 } 77 #endif 78 79 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 80 static inline void *g2h_untagged(abi_ptr x) 81 { 82 return (void *)((uintptr_t)(x) + guest_base); 83 } 84 85 static inline void *g2h(CPUState *cs, abi_ptr x) 86 { 87 return g2h_untagged(cpu_untagged_addr(cs, x)); 88 } 89 90 static inline bool guest_addr_valid_untagged(abi_ulong x) 91 { 92 return x <= GUEST_ADDR_MAX; 93 } 94 95 static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len) 96 { 97 return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1; 98 } 99 100 #define h2g_valid(x) \ 101 (HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \ 102 (uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX) 103 104 #define h2g_nocheck(x) ({ \ 105 uintptr_t __ret = (uintptr_t)(x) - guest_base; \ 106 (abi_ptr)__ret; \ 107 }) 108 109 #define h2g(x) ({ \ 110 /* Check if given address fits target address space */ \ 111 assert(h2g_valid(x)); \ 112 h2g_nocheck(x); \ 113 }) 114 #else 115 typedef target_ulong abi_ptr; 116 #define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx 117 #endif 118 119 uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); 120 int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); 121 122 uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); 123 int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); 124 uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); 125 uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); 126 127 uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); 128 int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); 129 uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); 130 uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr); 131 132 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 133 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 134 135 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 136 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 137 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 138 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 139 140 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 141 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 142 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 143 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 144 145 void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 146 147 void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 148 void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 149 void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); 150 151 void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 152 void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 153 void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); 154 155 void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, 156 uint32_t val, uintptr_t ra); 157 158 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, 159 uint32_t val, uintptr_t ra); 160 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, 161 uint32_t val, uintptr_t ra); 162 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, 163 uint64_t val, uintptr_t ra); 164 165 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, 166 uint32_t val, uintptr_t ra); 167 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, 168 uint32_t val, uintptr_t ra); 169 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, 170 uint64_t val, uintptr_t ra); 171 172 #if defined(CONFIG_USER_ONLY) 173 174 extern __thread uintptr_t helper_retaddr; 175 176 static inline void set_helper_retaddr(uintptr_t ra) 177 { 178 helper_retaddr = ra; 179 /* 180 * Ensure that this write is visible to the SIGSEGV handler that 181 * may be invoked due to a subsequent invalid memory operation. 182 */ 183 signal_barrier(); 184 } 185 186 static inline void clear_helper_retaddr(void) 187 { 188 /* 189 * Ensure that previous memory operations have succeeded before 190 * removing the data visible to the signal handler. 191 */ 192 signal_barrier(); 193 helper_retaddr = 0; 194 } 195 196 /* 197 * Provide the same *_mmuidx_ra interface as for softmmu. 198 * The mmu_idx argument is ignored. 199 */ 200 201 static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 202 int mmu_idx, uintptr_t ra) 203 { 204 return cpu_ldub_data_ra(env, addr, ra); 205 } 206 207 static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 208 int mmu_idx, uintptr_t ra) 209 { 210 return cpu_ldsb_data_ra(env, addr, ra); 211 } 212 213 static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 214 int mmu_idx, uintptr_t ra) 215 { 216 return cpu_lduw_be_data_ra(env, addr, ra); 217 } 218 219 static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 220 int mmu_idx, uintptr_t ra) 221 { 222 return cpu_ldsw_be_data_ra(env, addr, ra); 223 } 224 225 static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 226 int mmu_idx, uintptr_t ra) 227 { 228 return cpu_ldl_be_data_ra(env, addr, ra); 229 } 230 231 static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 232 int mmu_idx, uintptr_t ra) 233 { 234 return cpu_ldq_be_data_ra(env, addr, ra); 235 } 236 237 static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 238 int mmu_idx, uintptr_t ra) 239 { 240 return cpu_lduw_le_data_ra(env, addr, ra); 241 } 242 243 static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 244 int mmu_idx, uintptr_t ra) 245 { 246 return cpu_ldsw_le_data_ra(env, addr, ra); 247 } 248 249 static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 250 int mmu_idx, uintptr_t ra) 251 { 252 return cpu_ldl_le_data_ra(env, addr, ra); 253 } 254 255 static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 256 int mmu_idx, uintptr_t ra) 257 { 258 return cpu_ldq_le_data_ra(env, addr, ra); 259 } 260 261 static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 262 uint32_t val, int mmu_idx, uintptr_t ra) 263 { 264 cpu_stb_data_ra(env, addr, val, ra); 265 } 266 267 static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 268 uint32_t val, int mmu_idx, 269 uintptr_t ra) 270 { 271 cpu_stw_be_data_ra(env, addr, val, ra); 272 } 273 274 static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 275 uint32_t val, int mmu_idx, 276 uintptr_t ra) 277 { 278 cpu_stl_be_data_ra(env, addr, val, ra); 279 } 280 281 static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 282 uint64_t val, int mmu_idx, 283 uintptr_t ra) 284 { 285 cpu_stq_be_data_ra(env, addr, val, ra); 286 } 287 288 static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 289 uint32_t val, int mmu_idx, 290 uintptr_t ra) 291 { 292 cpu_stw_le_data_ra(env, addr, val, ra); 293 } 294 295 static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 296 uint32_t val, int mmu_idx, 297 uintptr_t ra) 298 { 299 cpu_stl_le_data_ra(env, addr, val, ra); 300 } 301 302 static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 303 uint64_t val, int mmu_idx, 304 uintptr_t ra) 305 { 306 cpu_stq_le_data_ra(env, addr, val, ra); 307 } 308 309 #else 310 311 /* Needed for TCG_OVERSIZED_GUEST */ 312 #include "tcg/tcg.h" 313 314 static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) 315 { 316 #if TCG_OVERSIZED_GUEST 317 return entry->addr_write; 318 #else 319 return qatomic_read(&entry->addr_write); 320 #endif 321 } 322 323 /* Find the TLB index corresponding to the mmu_idx + address pair. */ 324 static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, 325 target_ulong addr) 326 { 327 uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; 328 329 return (addr >> TARGET_PAGE_BITS) & size_mask; 330 } 331 332 /* Find the TLB entry corresponding to the mmu_idx + address pair. */ 333 static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, 334 target_ulong addr) 335 { 336 return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; 337 } 338 339 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 340 int mmu_idx, uintptr_t ra); 341 int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 342 int mmu_idx, uintptr_t ra); 343 344 uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 345 int mmu_idx, uintptr_t ra); 346 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 347 int mmu_idx, uintptr_t ra); 348 uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 349 int mmu_idx, uintptr_t ra); 350 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 351 int mmu_idx, uintptr_t ra); 352 353 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 354 int mmu_idx, uintptr_t ra); 355 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 356 int mmu_idx, uintptr_t ra); 357 uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 358 int mmu_idx, uintptr_t ra); 359 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 360 int mmu_idx, uintptr_t ra); 361 362 void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 363 int mmu_idx, uintptr_t retaddr); 364 365 void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 366 int mmu_idx, uintptr_t retaddr); 367 void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 368 int mmu_idx, uintptr_t retaddr); 369 void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, 370 int mmu_idx, uintptr_t retaddr); 371 372 void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 373 int mmu_idx, uintptr_t retaddr); 374 void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 375 int mmu_idx, uintptr_t retaddr); 376 void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, 377 int mmu_idx, uintptr_t retaddr); 378 379 #endif /* defined(CONFIG_USER_ONLY) */ 380 381 #ifdef TARGET_WORDS_BIGENDIAN 382 # define cpu_lduw_data cpu_lduw_be_data 383 # define cpu_ldsw_data cpu_ldsw_be_data 384 # define cpu_ldl_data cpu_ldl_be_data 385 # define cpu_ldq_data cpu_ldq_be_data 386 # define cpu_lduw_data_ra cpu_lduw_be_data_ra 387 # define cpu_ldsw_data_ra cpu_ldsw_be_data_ra 388 # define cpu_ldl_data_ra cpu_ldl_be_data_ra 389 # define cpu_ldq_data_ra cpu_ldq_be_data_ra 390 # define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra 391 # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra 392 # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra 393 # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra 394 # define cpu_stw_data cpu_stw_be_data 395 # define cpu_stl_data cpu_stl_be_data 396 # define cpu_stq_data cpu_stq_be_data 397 # define cpu_stw_data_ra cpu_stw_be_data_ra 398 # define cpu_stl_data_ra cpu_stl_be_data_ra 399 # define cpu_stq_data_ra cpu_stq_be_data_ra 400 # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra 401 # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra 402 # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra 403 #else 404 # define cpu_lduw_data cpu_lduw_le_data 405 # define cpu_ldsw_data cpu_ldsw_le_data 406 # define cpu_ldl_data cpu_ldl_le_data 407 # define cpu_ldq_data cpu_ldq_le_data 408 # define cpu_lduw_data_ra cpu_lduw_le_data_ra 409 # define cpu_ldsw_data_ra cpu_ldsw_le_data_ra 410 # define cpu_ldl_data_ra cpu_ldl_le_data_ra 411 # define cpu_ldq_data_ra cpu_ldq_le_data_ra 412 # define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra 413 # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra 414 # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra 415 # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra 416 # define cpu_stw_data cpu_stw_le_data 417 # define cpu_stl_data cpu_stl_le_data 418 # define cpu_stq_data cpu_stq_le_data 419 # define cpu_stw_data_ra cpu_stw_le_data_ra 420 # define cpu_stl_data_ra cpu_stl_le_data_ra 421 # define cpu_stq_data_ra cpu_stq_le_data_ra 422 # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra 423 # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra 424 # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra 425 #endif 426 427 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); 428 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); 429 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); 430 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr); 431 432 static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr) 433 { 434 return (int8_t)cpu_ldub_code(env, addr); 435 } 436 437 static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr) 438 { 439 return (int16_t)cpu_lduw_code(env, addr); 440 } 441 442 /** 443 * tlb_vaddr_to_host: 444 * @env: CPUArchState 445 * @addr: guest virtual address to look up 446 * @access_type: 0 for read, 1 for write, 2 for execute 447 * @mmu_idx: MMU index to use for lookup 448 * 449 * Look up the specified guest virtual index in the TCG softmmu TLB. 450 * If we can translate a host virtual address suitable for direct RAM 451 * access, without causing a guest exception, then return it. 452 * Otherwise (TLB entry is for an I/O access, guest software 453 * TLB fill required, etc) return NULL. 454 */ 455 #ifdef CONFIG_USER_ONLY 456 static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 457 MMUAccessType access_type, int mmu_idx) 458 { 459 return g2h(env_cpu(env), addr); 460 } 461 #else 462 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 463 MMUAccessType access_type, int mmu_idx); 464 #endif 465 466 #endif /* CPU_LDST_H */ 467