1 /* 2 * User emulator execution 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "hw/core/tcg-cpu-ops.h" 21 #include "disas/disas.h" 22 #include "exec/exec-all.h" 23 #include "tcg/tcg.h" 24 #include "qemu/bitops.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/translate-all.h" 27 #include "exec/helper-proto.h" 28 #include "qemu/atomic128.h" 29 #include "trace/trace-root.h" 30 #include "tcg/tcg-ldst.h" 31 #include "internal.h" 32 33 __thread uintptr_t helper_retaddr; 34 35 //#define DEBUG_SIGNAL 36 37 /* 38 * Adjust the pc to pass to cpu_restore_state; return the memop type. 39 */ 40 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) 41 { 42 switch (helper_retaddr) { 43 default: 44 /* 45 * Fault during host memory operation within a helper function. 46 * The helper's host return address, saved here, gives us a 47 * pointer into the generated code that will unwind to the 48 * correct guest pc. 49 */ 50 *pc = helper_retaddr; 51 break; 52 53 case 0: 54 /* 55 * Fault during host memory operation within generated code. 56 * (Or, a unrelated bug within qemu, but we can't tell from here). 57 * 58 * We take the host pc from the signal frame. However, we cannot 59 * use that value directly. Within cpu_restore_state_from_tb, we 60 * assume PC comes from GETPC(), as used by the helper functions, 61 * so we adjust the address by -GETPC_ADJ to form an address that 62 * is within the call insn, so that the address does not accidentally 63 * match the beginning of the next guest insn. However, when the 64 * pc comes from the signal frame it points to the actual faulting 65 * host memory insn and not the return from a call insn. 66 * 67 * Therefore, adjust to compensate for what will be done later 68 * by cpu_restore_state_from_tb. 69 */ 70 *pc += GETPC_ADJ; 71 break; 72 73 case 1: 74 /* 75 * Fault during host read for translation, or loosely, "execution". 76 * 77 * The guest pc is already pointing to the start of the TB for which 78 * code is being generated. If the guest translator manages the 79 * page crossings correctly, this is exactly the correct address 80 * (and if the translator doesn't handle page boundaries correctly 81 * there's little we can do about that here). Therefore, do not 82 * trigger the unwinder. 83 * 84 * Like tb_gen_code, release the memory lock before cpu_loop_exit. 85 */ 86 mmap_unlock(); 87 *pc = 0; 88 return MMU_INST_FETCH; 89 } 90 91 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; 92 } 93 94 /** 95 * handle_sigsegv_accerr_write: 96 * @cpu: the cpu context 97 * @old_set: the sigset_t from the signal ucontext_t 98 * @host_pc: the host pc, adjusted for the signal 99 * @guest_addr: the guest address of the fault 100 * 101 * Return true if the write fault has been handled, and should be re-tried. 102 * 103 * Note that it is important that we don't call page_unprotect() unless 104 * this is really a "write to nonwriteable page" fault, because 105 * page_unprotect() assumes that if it is called for an access to 106 * a page that's writeable this means we had two threads racing and 107 * another thread got there first and already made the page writeable; 108 * so we will retry the access. If we were to call page_unprotect() 109 * for some other kind of fault that should really be passed to the 110 * guest, we'd end up in an infinite loop of retrying the faulting access. 111 */ 112 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 113 uintptr_t host_pc, abi_ptr guest_addr) 114 { 115 switch (page_unprotect(guest_addr, host_pc)) { 116 case 0: 117 /* 118 * Fault not caused by a page marked unwritable to protect 119 * cached translations, must be the guest binary's problem. 120 */ 121 return false; 122 case 1: 123 /* 124 * Fault caused by protection of cached translation; TBs 125 * invalidated, so resume execution. 126 */ 127 return true; 128 case 2: 129 /* 130 * Fault caused by protection of cached translation, and the 131 * currently executing TB was modified and must be exited immediately. 132 */ 133 sigprocmask(SIG_SETMASK, old_set, NULL); 134 cpu_loop_exit_noexc(cpu); 135 /* NORETURN */ 136 default: 137 g_assert_not_reached(); 138 } 139 } 140 141 static int probe_access_internal(CPUArchState *env, target_ulong addr, 142 int fault_size, MMUAccessType access_type, 143 bool nonfault, uintptr_t ra) 144 { 145 int acc_flag; 146 bool maperr; 147 148 switch (access_type) { 149 case MMU_DATA_STORE: 150 acc_flag = PAGE_WRITE_ORG; 151 break; 152 case MMU_DATA_LOAD: 153 acc_flag = PAGE_READ; 154 break; 155 case MMU_INST_FETCH: 156 acc_flag = PAGE_EXEC; 157 break; 158 default: 159 g_assert_not_reached(); 160 } 161 162 if (guest_addr_valid_untagged(addr)) { 163 int page_flags = page_get_flags(addr); 164 if (page_flags & acc_flag) { 165 return 0; /* success */ 166 } 167 maperr = !(page_flags & PAGE_VALID); 168 } else { 169 maperr = true; 170 } 171 172 if (nonfault) { 173 return TLB_INVALID_MASK; 174 } 175 176 cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); 177 } 178 179 int probe_access_flags(CPUArchState *env, target_ulong addr, 180 MMUAccessType access_type, int mmu_idx, 181 bool nonfault, void **phost, uintptr_t ra) 182 { 183 int flags; 184 185 flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra); 186 *phost = flags ? NULL : g2h(env_cpu(env), addr); 187 return flags; 188 } 189 190 void *probe_access(CPUArchState *env, target_ulong addr, int size, 191 MMUAccessType access_type, int mmu_idx, uintptr_t ra) 192 { 193 int flags; 194 195 g_assert(-(addr | TARGET_PAGE_MASK) >= size); 196 flags = probe_access_internal(env, addr, size, access_type, false, ra); 197 g_assert(flags == 0); 198 199 return size ? g2h(env_cpu(env), addr) : NULL; 200 } 201 202 /* The softmmu versions of these helpers are in cputlb.c. */ 203 204 /* 205 * Verify that we have passed the correct MemOp to the correct function. 206 * 207 * We could present one function to target code, and dispatch based on 208 * the MemOp, but so far we have worked hard to avoid an indirect function 209 * call along the memory path. 210 */ 211 static void validate_memop(MemOpIdx oi, MemOp expected) 212 { 213 #ifdef CONFIG_DEBUG_TCG 214 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); 215 assert(have == expected); 216 #endif 217 } 218 219 void helper_unaligned_ld(CPUArchState *env, target_ulong addr) 220 { 221 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); 222 } 223 224 void helper_unaligned_st(CPUArchState *env, target_ulong addr) 225 { 226 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); 227 } 228 229 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, 230 MemOpIdx oi, uintptr_t ra, MMUAccessType type) 231 { 232 MemOp mop = get_memop(oi); 233 int a_bits = get_alignment_bits(mop); 234 void *ret; 235 236 /* Enforce guest required alignment. */ 237 if (unlikely(addr & ((1 << a_bits) - 1))) { 238 cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); 239 } 240 241 ret = g2h(env_cpu(env), addr); 242 set_helper_retaddr(ra); 243 return ret; 244 } 245 246 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, 247 MemOpIdx oi, uintptr_t ra) 248 { 249 void *haddr; 250 uint8_t ret; 251 252 validate_memop(oi, MO_UB); 253 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 254 ret = ldub_p(haddr); 255 clear_helper_retaddr(); 256 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 257 return ret; 258 } 259 260 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 261 MemOpIdx oi, uintptr_t ra) 262 { 263 void *haddr; 264 uint16_t ret; 265 266 validate_memop(oi, MO_BEUW); 267 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 268 ret = lduw_be_p(haddr); 269 clear_helper_retaddr(); 270 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 271 return ret; 272 } 273 274 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 275 MemOpIdx oi, uintptr_t ra) 276 { 277 void *haddr; 278 uint32_t ret; 279 280 validate_memop(oi, MO_BEUL); 281 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 282 ret = ldl_be_p(haddr); 283 clear_helper_retaddr(); 284 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 285 return ret; 286 } 287 288 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 289 MemOpIdx oi, uintptr_t ra) 290 { 291 void *haddr; 292 uint64_t ret; 293 294 validate_memop(oi, MO_BEUQ); 295 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 296 ret = ldq_be_p(haddr); 297 clear_helper_retaddr(); 298 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 299 return ret; 300 } 301 302 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 303 MemOpIdx oi, uintptr_t ra) 304 { 305 void *haddr; 306 uint16_t ret; 307 308 validate_memop(oi, MO_LEUW); 309 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 310 ret = lduw_le_p(haddr); 311 clear_helper_retaddr(); 312 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 313 return ret; 314 } 315 316 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 317 MemOpIdx oi, uintptr_t ra) 318 { 319 void *haddr; 320 uint32_t ret; 321 322 validate_memop(oi, MO_LEUL); 323 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 324 ret = ldl_le_p(haddr); 325 clear_helper_retaddr(); 326 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 327 return ret; 328 } 329 330 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 331 MemOpIdx oi, uintptr_t ra) 332 { 333 void *haddr; 334 uint64_t ret; 335 336 validate_memop(oi, MO_LEUQ); 337 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 338 ret = ldq_le_p(haddr); 339 clear_helper_retaddr(); 340 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 341 return ret; 342 } 343 344 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 345 MemOpIdx oi, uintptr_t ra) 346 { 347 void *haddr; 348 349 validate_memop(oi, MO_UB); 350 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 351 stb_p(haddr, val); 352 clear_helper_retaddr(); 353 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 354 } 355 356 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 357 MemOpIdx oi, uintptr_t ra) 358 { 359 void *haddr; 360 361 validate_memop(oi, MO_BEUW); 362 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 363 stw_be_p(haddr, val); 364 clear_helper_retaddr(); 365 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 366 } 367 368 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 369 MemOpIdx oi, uintptr_t ra) 370 { 371 void *haddr; 372 373 validate_memop(oi, MO_BEUL); 374 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 375 stl_be_p(haddr, val); 376 clear_helper_retaddr(); 377 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 378 } 379 380 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 381 MemOpIdx oi, uintptr_t ra) 382 { 383 void *haddr; 384 385 validate_memop(oi, MO_BEUQ); 386 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 387 stq_be_p(haddr, val); 388 clear_helper_retaddr(); 389 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 390 } 391 392 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 393 MemOpIdx oi, uintptr_t ra) 394 { 395 void *haddr; 396 397 validate_memop(oi, MO_LEUW); 398 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 399 stw_le_p(haddr, val); 400 clear_helper_retaddr(); 401 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 402 } 403 404 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 405 MemOpIdx oi, uintptr_t ra) 406 { 407 void *haddr; 408 409 validate_memop(oi, MO_LEUL); 410 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 411 stl_le_p(haddr, val); 412 clear_helper_retaddr(); 413 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 414 } 415 416 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 417 MemOpIdx oi, uintptr_t ra) 418 { 419 void *haddr; 420 421 validate_memop(oi, MO_LEUQ); 422 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); 423 stq_le_p(haddr, val); 424 clear_helper_retaddr(); 425 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 426 } 427 428 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) 429 { 430 uint32_t ret; 431 432 set_helper_retaddr(1); 433 ret = ldub_p(g2h_untagged(ptr)); 434 clear_helper_retaddr(); 435 return ret; 436 } 437 438 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) 439 { 440 uint32_t ret; 441 442 set_helper_retaddr(1); 443 ret = lduw_p(g2h_untagged(ptr)); 444 clear_helper_retaddr(); 445 return ret; 446 } 447 448 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) 449 { 450 uint32_t ret; 451 452 set_helper_retaddr(1); 453 ret = ldl_p(g2h_untagged(ptr)); 454 clear_helper_retaddr(); 455 return ret; 456 } 457 458 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) 459 { 460 uint64_t ret; 461 462 set_helper_retaddr(1); 463 ret = ldq_p(g2h_untagged(ptr)); 464 clear_helper_retaddr(); 465 return ret; 466 } 467 468 #include "ldst_common.c.inc" 469 470 /* 471 * Do not allow unaligned operations to proceed. Return the host address. 472 * 473 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 474 */ 475 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 476 MemOpIdx oi, int size, int prot, 477 uintptr_t retaddr) 478 { 479 MemOp mop = get_memop(oi); 480 int a_bits = get_alignment_bits(mop); 481 void *ret; 482 483 /* Enforce guest required alignment. */ 484 if (unlikely(addr & ((1 << a_bits) - 1))) { 485 MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; 486 cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); 487 } 488 489 /* Enforce qemu required alignment. */ 490 if (unlikely(addr & (size - 1))) { 491 cpu_loop_exit_atomic(env_cpu(env), retaddr); 492 } 493 494 ret = g2h(env_cpu(env), addr); 495 set_helper_retaddr(retaddr); 496 return ret; 497 } 498 499 #include "atomic_common.c.inc" 500 501 /* 502 * First set of functions passes in OI and RETADDR. 503 * This makes them callable from other helpers. 504 */ 505 506 #define ATOMIC_NAME(X) \ 507 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 508 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) 509 510 #define DATA_SIZE 1 511 #include "atomic_template.h" 512 513 #define DATA_SIZE 2 514 #include "atomic_template.h" 515 516 #define DATA_SIZE 4 517 #include "atomic_template.h" 518 519 #ifdef CONFIG_ATOMIC64 520 #define DATA_SIZE 8 521 #include "atomic_template.h" 522 #endif 523 524 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 525 #define DATA_SIZE 16 526 #include "atomic_template.h" 527 #endif 528