1 /* 2 * User emulator execution 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "hw/core/tcg-cpu-ops.h" 21 #include "disas/disas.h" 22 #include "exec/exec-all.h" 23 #include "tcg/tcg.h" 24 #include "qemu/bitops.h" 25 #include "qemu/rcu.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/translate-all.h" 28 #include "exec/helper-proto.h" 29 #include "qemu/atomic128.h" 30 #include "trace/trace-root.h" 31 #include "tcg/tcg-ldst.h" 32 #include "internal.h" 33 34 __thread uintptr_t helper_retaddr; 35 36 //#define DEBUG_SIGNAL 37 38 /* 39 * Adjust the pc to pass to cpu_restore_state; return the memop type. 40 */ 41 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) 42 { 43 switch (helper_retaddr) { 44 default: 45 /* 46 * Fault during host memory operation within a helper function. 47 * The helper's host return address, saved here, gives us a 48 * pointer into the generated code that will unwind to the 49 * correct guest pc. 50 */ 51 *pc = helper_retaddr; 52 break; 53 54 case 0: 55 /* 56 * Fault during host memory operation within generated code. 57 * (Or, a unrelated bug within qemu, but we can't tell from here). 58 * 59 * We take the host pc from the signal frame. However, we cannot 60 * use that value directly. Within cpu_restore_state_from_tb, we 61 * assume PC comes from GETPC(), as used by the helper functions, 62 * so we adjust the address by -GETPC_ADJ to form an address that 63 * is within the call insn, so that the address does not accidentally 64 * match the beginning of the next guest insn. However, when the 65 * pc comes from the signal frame it points to the actual faulting 66 * host memory insn and not the return from a call insn. 67 * 68 * Therefore, adjust to compensate for what will be done later 69 * by cpu_restore_state_from_tb. 70 */ 71 *pc += GETPC_ADJ; 72 break; 73 74 case 1: 75 /* 76 * Fault during host read for translation, or loosely, "execution". 77 * 78 * The guest pc is already pointing to the start of the TB for which 79 * code is being generated. If the guest translator manages the 80 * page crossings correctly, this is exactly the correct address 81 * (and if the translator doesn't handle page boundaries correctly 82 * there's little we can do about that here). Therefore, do not 83 * trigger the unwinder. 84 */ 85 *pc = 0; 86 return MMU_INST_FETCH; 87 } 88 89 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; 90 } 91 92 /** 93 * handle_sigsegv_accerr_write: 94 * @cpu: the cpu context 95 * @old_set: the sigset_t from the signal ucontext_t 96 * @host_pc: the host pc, adjusted for the signal 97 * @guest_addr: the guest address of the fault 98 * 99 * Return true if the write fault has been handled, and should be re-tried. 100 * 101 * Note that it is important that we don't call page_unprotect() unless 102 * this is really a "write to nonwritable page" fault, because 103 * page_unprotect() assumes that if it is called for an access to 104 * a page that's writable this means we had two threads racing and 105 * another thread got there first and already made the page writable; 106 * so we will retry the access. If we were to call page_unprotect() 107 * for some other kind of fault that should really be passed to the 108 * guest, we'd end up in an infinite loop of retrying the faulting access. 109 */ 110 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, 111 uintptr_t host_pc, abi_ptr guest_addr) 112 { 113 switch (page_unprotect(guest_addr, host_pc)) { 114 case 0: 115 /* 116 * Fault not caused by a page marked unwritable to protect 117 * cached translations, must be the guest binary's problem. 118 */ 119 return false; 120 case 1: 121 /* 122 * Fault caused by protection of cached translation; TBs 123 * invalidated, so resume execution. 124 */ 125 return true; 126 case 2: 127 /* 128 * Fault caused by protection of cached translation, and the 129 * currently executing TB was modified and must be exited immediately. 130 */ 131 sigprocmask(SIG_SETMASK, old_set, NULL); 132 cpu_loop_exit_noexc(cpu); 133 /* NORETURN */ 134 default: 135 g_assert_not_reached(); 136 } 137 } 138 139 typedef struct PageFlagsNode { 140 struct rcu_head rcu; 141 IntervalTreeNode itree; 142 int flags; 143 } PageFlagsNode; 144 145 static IntervalTreeRoot pageflags_root; 146 147 static PageFlagsNode *pageflags_find(target_ulong start, target_long last) 148 { 149 IntervalTreeNode *n; 150 151 n = interval_tree_iter_first(&pageflags_root, start, last); 152 return n ? container_of(n, PageFlagsNode, itree) : NULL; 153 } 154 155 static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start, 156 target_long last) 157 { 158 IntervalTreeNode *n; 159 160 n = interval_tree_iter_next(&p->itree, start, last); 161 return n ? container_of(n, PageFlagsNode, itree) : NULL; 162 } 163 164 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 165 { 166 IntervalTreeNode *n; 167 int rc = 0; 168 169 mmap_lock(); 170 for (n = interval_tree_iter_first(&pageflags_root, 0, -1); 171 n != NULL; 172 n = interval_tree_iter_next(n, 0, -1)) { 173 PageFlagsNode *p = container_of(n, PageFlagsNode, itree); 174 175 rc = fn(priv, n->start, n->last + 1, p->flags); 176 if (rc != 0) { 177 break; 178 } 179 } 180 mmap_unlock(); 181 182 return rc; 183 } 184 185 static int dump_region(void *priv, target_ulong start, 186 target_ulong end, unsigned long prot) 187 { 188 FILE *f = (FILE *)priv; 189 190 fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n", 191 start, end, end - start, 192 ((prot & PAGE_READ) ? 'r' : '-'), 193 ((prot & PAGE_WRITE) ? 'w' : '-'), 194 ((prot & PAGE_EXEC) ? 'x' : '-')); 195 return 0; 196 } 197 198 /* dump memory mappings */ 199 void page_dump(FILE *f) 200 { 201 const int length = sizeof(target_ulong) * 2; 202 203 fprintf(f, "%-*s %-*s %-*s %s\n", 204 length, "start", length, "end", length, "size", "prot"); 205 walk_memory_regions(f, dump_region); 206 } 207 208 int page_get_flags(target_ulong address) 209 { 210 PageFlagsNode *p = pageflags_find(address, address); 211 212 /* 213 * See util/interval-tree.c re lockless lookups: no false positives but 214 * there are false negatives. If we find nothing, retry with the mmap 215 * lock acquired. 216 */ 217 if (p) { 218 return p->flags; 219 } 220 if (have_mmap_lock()) { 221 return 0; 222 } 223 224 mmap_lock(); 225 p = pageflags_find(address, address); 226 mmap_unlock(); 227 return p ? p->flags : 0; 228 } 229 230 /* A subroutine of page_set_flags: insert a new node for [start,last]. */ 231 static void pageflags_create(target_ulong start, target_ulong last, int flags) 232 { 233 PageFlagsNode *p = g_new(PageFlagsNode, 1); 234 235 p->itree.start = start; 236 p->itree.last = last; 237 p->flags = flags; 238 interval_tree_insert(&p->itree, &pageflags_root); 239 } 240 241 /* A subroutine of page_set_flags: remove everything in [start,last]. */ 242 static bool pageflags_unset(target_ulong start, target_ulong last) 243 { 244 bool inval_tb = false; 245 246 while (true) { 247 PageFlagsNode *p = pageflags_find(start, last); 248 target_ulong p_last; 249 250 if (!p) { 251 break; 252 } 253 254 if (p->flags & PAGE_EXEC) { 255 inval_tb = true; 256 } 257 258 interval_tree_remove(&p->itree, &pageflags_root); 259 p_last = p->itree.last; 260 261 if (p->itree.start < start) { 262 /* Truncate the node from the end, or split out the middle. */ 263 p->itree.last = start - 1; 264 interval_tree_insert(&p->itree, &pageflags_root); 265 if (last < p_last) { 266 pageflags_create(last + 1, p_last, p->flags); 267 break; 268 } 269 } else if (p_last <= last) { 270 /* Range completely covers node -- remove it. */ 271 g_free_rcu(p, rcu); 272 } else { 273 /* Truncate the node from the start. */ 274 p->itree.start = last + 1; 275 interval_tree_insert(&p->itree, &pageflags_root); 276 break; 277 } 278 } 279 280 return inval_tb; 281 } 282 283 /* 284 * A subroutine of page_set_flags: nothing overlaps [start,last], 285 * but check adjacent mappings and maybe merge into a single range. 286 */ 287 static void pageflags_create_merge(target_ulong start, target_ulong last, 288 int flags) 289 { 290 PageFlagsNode *next = NULL, *prev = NULL; 291 292 if (start > 0) { 293 prev = pageflags_find(start - 1, start - 1); 294 if (prev) { 295 if (prev->flags == flags) { 296 interval_tree_remove(&prev->itree, &pageflags_root); 297 } else { 298 prev = NULL; 299 } 300 } 301 } 302 if (last + 1 != 0) { 303 next = pageflags_find(last + 1, last + 1); 304 if (next) { 305 if (next->flags == flags) { 306 interval_tree_remove(&next->itree, &pageflags_root); 307 } else { 308 next = NULL; 309 } 310 } 311 } 312 313 if (prev) { 314 if (next) { 315 prev->itree.last = next->itree.last; 316 g_free_rcu(next, rcu); 317 } else { 318 prev->itree.last = last; 319 } 320 interval_tree_insert(&prev->itree, &pageflags_root); 321 } else if (next) { 322 next->itree.start = start; 323 interval_tree_insert(&next->itree, &pageflags_root); 324 } else { 325 pageflags_create(start, last, flags); 326 } 327 } 328 329 /* 330 * Allow the target to decide if PAGE_TARGET_[12] may be reset. 331 * By default, they are not kept. 332 */ 333 #ifndef PAGE_TARGET_STICKY 334 #define PAGE_TARGET_STICKY 0 335 #endif 336 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) 337 338 /* A subroutine of page_set_flags: add flags to [start,last]. */ 339 static bool pageflags_set_clear(target_ulong start, target_ulong last, 340 int set_flags, int clear_flags) 341 { 342 PageFlagsNode *p; 343 target_ulong p_start, p_last; 344 int p_flags, merge_flags; 345 bool inval_tb = false; 346 347 restart: 348 p = pageflags_find(start, last); 349 if (!p) { 350 if (set_flags) { 351 pageflags_create_merge(start, last, set_flags); 352 } 353 goto done; 354 } 355 356 p_start = p->itree.start; 357 p_last = p->itree.last; 358 p_flags = p->flags; 359 /* Using mprotect on a page does not change sticky bits. */ 360 merge_flags = (p_flags & ~clear_flags) | set_flags; 361 362 /* 363 * Need to flush if an overlapping executable region 364 * removes exec, or adds write. 365 */ 366 if ((p_flags & PAGE_EXEC) 367 && (!(merge_flags & PAGE_EXEC) 368 || (merge_flags & ~p_flags & PAGE_WRITE))) { 369 inval_tb = true; 370 } 371 372 /* 373 * If there is an exact range match, update and return without 374 * attempting to merge with adjacent regions. 375 */ 376 if (start == p_start && last == p_last) { 377 if (merge_flags) { 378 p->flags = merge_flags; 379 } else { 380 interval_tree_remove(&p->itree, &pageflags_root); 381 g_free_rcu(p, rcu); 382 } 383 goto done; 384 } 385 386 /* 387 * If sticky bits affect the original mapping, then we must be more 388 * careful about the existing intervals and the separate flags. 389 */ 390 if (set_flags != merge_flags) { 391 if (p_start < start) { 392 interval_tree_remove(&p->itree, &pageflags_root); 393 p->itree.last = start - 1; 394 interval_tree_insert(&p->itree, &pageflags_root); 395 396 if (last < p_last) { 397 if (merge_flags) { 398 pageflags_create(start, last, merge_flags); 399 } 400 pageflags_create(last + 1, p_last, p_flags); 401 } else { 402 if (merge_flags) { 403 pageflags_create(start, p_last, merge_flags); 404 } 405 if (p_last < last) { 406 start = p_last + 1; 407 goto restart; 408 } 409 } 410 } else { 411 if (start < p_start && set_flags) { 412 pageflags_create(start, p_start - 1, set_flags); 413 } 414 if (last < p_last) { 415 interval_tree_remove(&p->itree, &pageflags_root); 416 p->itree.start = last + 1; 417 interval_tree_insert(&p->itree, &pageflags_root); 418 if (merge_flags) { 419 pageflags_create(start, last, merge_flags); 420 } 421 } else { 422 if (merge_flags) { 423 p->flags = merge_flags; 424 } else { 425 interval_tree_remove(&p->itree, &pageflags_root); 426 g_free_rcu(p, rcu); 427 } 428 if (p_last < last) { 429 start = p_last + 1; 430 goto restart; 431 } 432 } 433 } 434 goto done; 435 } 436 437 /* If flags are not changing for this range, incorporate it. */ 438 if (set_flags == p_flags) { 439 if (start < p_start) { 440 interval_tree_remove(&p->itree, &pageflags_root); 441 p->itree.start = start; 442 interval_tree_insert(&p->itree, &pageflags_root); 443 } 444 if (p_last < last) { 445 start = p_last + 1; 446 goto restart; 447 } 448 goto done; 449 } 450 451 /* Maybe split out head and/or tail ranges with the original flags. */ 452 interval_tree_remove(&p->itree, &pageflags_root); 453 if (p_start < start) { 454 p->itree.last = start - 1; 455 interval_tree_insert(&p->itree, &pageflags_root); 456 457 if (p_last < last) { 458 goto restart; 459 } 460 if (last < p_last) { 461 pageflags_create(last + 1, p_last, p_flags); 462 } 463 } else if (last < p_last) { 464 p->itree.start = last + 1; 465 interval_tree_insert(&p->itree, &pageflags_root); 466 } else { 467 g_free_rcu(p, rcu); 468 goto restart; 469 } 470 if (set_flags) { 471 pageflags_create(start, last, set_flags); 472 } 473 474 done: 475 return inval_tb; 476 } 477 478 /* 479 * Modify the flags of a page and invalidate the code if necessary. 480 * The flag PAGE_WRITE_ORG is positioned automatically depending 481 * on PAGE_WRITE. The mmap_lock should already be held. 482 */ 483 void page_set_flags(target_ulong start, target_ulong last, int flags) 484 { 485 bool reset = false; 486 bool inval_tb = false; 487 488 /* This function should never be called with addresses outside the 489 guest address space. If this assert fires, it probably indicates 490 a missing call to h2g_valid. */ 491 assert(start <= last); 492 assert(last <= GUEST_ADDR_MAX); 493 /* Only set PAGE_ANON with new mappings. */ 494 assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); 495 assert_memory_lock(); 496 497 start &= TARGET_PAGE_MASK; 498 last |= ~TARGET_PAGE_MASK; 499 500 if (!(flags & PAGE_VALID)) { 501 flags = 0; 502 } else { 503 reset = flags & PAGE_RESET; 504 flags &= ~PAGE_RESET; 505 if (flags & PAGE_WRITE) { 506 flags |= PAGE_WRITE_ORG; 507 } 508 } 509 510 if (!flags || reset) { 511 page_reset_target_data(start, last); 512 inval_tb |= pageflags_unset(start, last); 513 } 514 if (flags) { 515 inval_tb |= pageflags_set_clear(start, last, flags, 516 ~(reset ? 0 : PAGE_STICKY)); 517 } 518 if (inval_tb) { 519 tb_invalidate_phys_range(start, last); 520 } 521 } 522 523 int page_check_range(target_ulong start, target_ulong len, int flags) 524 { 525 target_ulong last; 526 int locked; /* tri-state: =0: unlocked, +1: global, -1: local */ 527 int ret; 528 529 if (len == 0) { 530 return 0; /* trivial length */ 531 } 532 533 last = start + len - 1; 534 if (last < start) { 535 return -1; /* wrap around */ 536 } 537 538 locked = have_mmap_lock(); 539 while (true) { 540 PageFlagsNode *p = pageflags_find(start, last); 541 int missing; 542 543 if (!p) { 544 if (!locked) { 545 /* 546 * Lockless lookups have false negatives. 547 * Retry with the lock held. 548 */ 549 mmap_lock(); 550 locked = -1; 551 p = pageflags_find(start, last); 552 } 553 if (!p) { 554 ret = -1; /* entire region invalid */ 555 break; 556 } 557 } 558 if (start < p->itree.start) { 559 ret = -1; /* initial bytes invalid */ 560 break; 561 } 562 563 missing = flags & ~p->flags; 564 if (missing & PAGE_READ) { 565 ret = -1; /* page not readable */ 566 break; 567 } 568 if (missing & PAGE_WRITE) { 569 if (!(p->flags & PAGE_WRITE_ORG)) { 570 ret = -1; /* page not writable */ 571 break; 572 } 573 /* Asking about writable, but has been protected: undo. */ 574 if (!page_unprotect(start, 0)) { 575 ret = -1; 576 break; 577 } 578 /* TODO: page_unprotect should take a range, not a single page. */ 579 if (last - start < TARGET_PAGE_SIZE) { 580 ret = 0; /* ok */ 581 break; 582 } 583 start += TARGET_PAGE_SIZE; 584 continue; 585 } 586 587 if (last <= p->itree.last) { 588 ret = 0; /* ok */ 589 break; 590 } 591 start = p->itree.last + 1; 592 } 593 594 /* Release the lock if acquired locally. */ 595 if (locked < 0) { 596 mmap_unlock(); 597 } 598 return ret; 599 } 600 601 void page_protect(tb_page_addr_t address) 602 { 603 PageFlagsNode *p; 604 target_ulong start, last; 605 int prot; 606 607 assert_memory_lock(); 608 609 if (qemu_host_page_size <= TARGET_PAGE_SIZE) { 610 start = address & TARGET_PAGE_MASK; 611 last = start + TARGET_PAGE_SIZE - 1; 612 } else { 613 start = address & qemu_host_page_mask; 614 last = start + qemu_host_page_size - 1; 615 } 616 617 p = pageflags_find(start, last); 618 if (!p) { 619 return; 620 } 621 prot = p->flags; 622 623 if (unlikely(p->itree.last < last)) { 624 /* More than one protection region covers the one host page. */ 625 assert(TARGET_PAGE_SIZE < qemu_host_page_size); 626 while ((p = pageflags_next(p, start, last)) != NULL) { 627 prot |= p->flags; 628 } 629 } 630 631 if (prot & PAGE_WRITE) { 632 pageflags_set_clear(start, last, 0, PAGE_WRITE); 633 mprotect(g2h_untagged(start), qemu_host_page_size, 634 prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE); 635 } 636 } 637 638 /* 639 * Called from signal handler: invalidate the code and unprotect the 640 * page. Return 0 if the fault was not handled, 1 if it was handled, 641 * and 2 if it was handled but the caller must cause the TB to be 642 * immediately exited. (We can only return 2 if the 'pc' argument is 643 * non-zero.) 644 */ 645 int page_unprotect(target_ulong address, uintptr_t pc) 646 { 647 PageFlagsNode *p; 648 bool current_tb_invalidated; 649 650 /* 651 * Technically this isn't safe inside a signal handler. However we 652 * know this only ever happens in a synchronous SEGV handler, so in 653 * practice it seems to be ok. 654 */ 655 mmap_lock(); 656 657 p = pageflags_find(address, address); 658 659 /* If this address was not really writable, nothing to do. */ 660 if (!p || !(p->flags & PAGE_WRITE_ORG)) { 661 mmap_unlock(); 662 return 0; 663 } 664 665 current_tb_invalidated = false; 666 if (p->flags & PAGE_WRITE) { 667 /* 668 * If the page is actually marked WRITE then assume this is because 669 * this thread raced with another one which got here first and 670 * set the page to PAGE_WRITE and did the TB invalidate for us. 671 */ 672 #ifdef TARGET_HAS_PRECISE_SMC 673 TranslationBlock *current_tb = tcg_tb_lookup(pc); 674 if (current_tb) { 675 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; 676 } 677 #endif 678 } else { 679 target_ulong start, len, i; 680 int prot; 681 682 if (qemu_host_page_size <= TARGET_PAGE_SIZE) { 683 start = address & TARGET_PAGE_MASK; 684 len = TARGET_PAGE_SIZE; 685 prot = p->flags | PAGE_WRITE; 686 pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0); 687 current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc); 688 } else { 689 start = address & qemu_host_page_mask; 690 len = qemu_host_page_size; 691 prot = 0; 692 693 for (i = 0; i < len; i += TARGET_PAGE_SIZE) { 694 target_ulong addr = start + i; 695 696 p = pageflags_find(addr, addr); 697 if (p) { 698 prot |= p->flags; 699 if (p->flags & PAGE_WRITE_ORG) { 700 prot |= PAGE_WRITE; 701 pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1, 702 PAGE_WRITE, 0); 703 } 704 } 705 /* 706 * Since the content will be modified, we must invalidate 707 * the corresponding translated code. 708 */ 709 current_tb_invalidated |= 710 tb_invalidate_phys_page_unwind(addr, pc); 711 } 712 } 713 if (prot & PAGE_EXEC) { 714 prot = (prot & ~PAGE_EXEC) | PAGE_READ; 715 } 716 mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS); 717 } 718 mmap_unlock(); 719 720 /* If current TB was invalidated return to main loop */ 721 return current_tb_invalidated ? 2 : 1; 722 } 723 724 static int probe_access_internal(CPUArchState *env, target_ulong addr, 725 int fault_size, MMUAccessType access_type, 726 bool nonfault, uintptr_t ra) 727 { 728 int acc_flag; 729 bool maperr; 730 731 switch (access_type) { 732 case MMU_DATA_STORE: 733 acc_flag = PAGE_WRITE_ORG; 734 break; 735 case MMU_DATA_LOAD: 736 acc_flag = PAGE_READ; 737 break; 738 case MMU_INST_FETCH: 739 acc_flag = PAGE_EXEC; 740 break; 741 default: 742 g_assert_not_reached(); 743 } 744 745 if (guest_addr_valid_untagged(addr)) { 746 int page_flags = page_get_flags(addr); 747 if (page_flags & acc_flag) { 748 return 0; /* success */ 749 } 750 maperr = !(page_flags & PAGE_VALID); 751 } else { 752 maperr = true; 753 } 754 755 if (nonfault) { 756 return TLB_INVALID_MASK; 757 } 758 759 cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); 760 } 761 762 int probe_access_flags(CPUArchState *env, target_ulong addr, int size, 763 MMUAccessType access_type, int mmu_idx, 764 bool nonfault, void **phost, uintptr_t ra) 765 { 766 int flags; 767 768 g_assert(-(addr | TARGET_PAGE_MASK) >= size); 769 flags = probe_access_internal(env, addr, size, access_type, nonfault, ra); 770 *phost = flags ? NULL : g2h(env_cpu(env), addr); 771 return flags; 772 } 773 774 void *probe_access(CPUArchState *env, target_ulong addr, int size, 775 MMUAccessType access_type, int mmu_idx, uintptr_t ra) 776 { 777 int flags; 778 779 g_assert(-(addr | TARGET_PAGE_MASK) >= size); 780 flags = probe_access_internal(env, addr, size, access_type, false, ra); 781 g_assert(flags == 0); 782 783 return size ? g2h(env_cpu(env), addr) : NULL; 784 } 785 786 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 787 void **hostp) 788 { 789 int flags; 790 791 flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0); 792 g_assert(flags == 0); 793 794 if (hostp) { 795 *hostp = g2h_untagged(addr); 796 } 797 return addr; 798 } 799 800 #ifdef TARGET_PAGE_DATA_SIZE 801 /* 802 * Allocate chunks of target data together. For the only current user, 803 * if we allocate one hunk per page, we have overhead of 40/128 or 40%. 804 * Therefore, allocate memory for 64 pages at a time for overhead < 1%. 805 */ 806 #define TPD_PAGES 64 807 #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES) 808 809 typedef struct TargetPageDataNode { 810 struct rcu_head rcu; 811 IntervalTreeNode itree; 812 char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned)); 813 } TargetPageDataNode; 814 815 static IntervalTreeRoot targetdata_root; 816 817 void page_reset_target_data(target_ulong start, target_ulong last) 818 { 819 IntervalTreeNode *n, *next; 820 821 assert_memory_lock(); 822 823 start &= TARGET_PAGE_MASK; 824 last |= ~TARGET_PAGE_MASK; 825 826 for (n = interval_tree_iter_first(&targetdata_root, start, last), 827 next = n ? interval_tree_iter_next(n, start, last) : NULL; 828 n != NULL; 829 n = next, 830 next = next ? interval_tree_iter_next(n, start, last) : NULL) { 831 target_ulong n_start, n_last, p_ofs, p_len; 832 TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree); 833 834 if (n->start >= start && n->last <= last) { 835 interval_tree_remove(n, &targetdata_root); 836 g_free_rcu(t, rcu); 837 continue; 838 } 839 840 if (n->start < start) { 841 n_start = start; 842 p_ofs = (start - n->start) >> TARGET_PAGE_BITS; 843 } else { 844 n_start = n->start; 845 p_ofs = 0; 846 } 847 n_last = MIN(last, n->last); 848 p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS; 849 850 memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE); 851 } 852 } 853 854 void *page_get_target_data(target_ulong address) 855 { 856 IntervalTreeNode *n; 857 TargetPageDataNode *t; 858 target_ulong page, region; 859 860 page = address & TARGET_PAGE_MASK; 861 region = address & TBD_MASK; 862 863 n = interval_tree_iter_first(&targetdata_root, page, page); 864 if (!n) { 865 /* 866 * See util/interval-tree.c re lockless lookups: no false positives 867 * but there are false negatives. If we find nothing, retry with 868 * the mmap lock acquired. We also need the lock for the 869 * allocation + insert. 870 */ 871 mmap_lock(); 872 n = interval_tree_iter_first(&targetdata_root, page, page); 873 if (!n) { 874 t = g_new0(TargetPageDataNode, 1); 875 n = &t->itree; 876 n->start = region; 877 n->last = region | ~TBD_MASK; 878 interval_tree_insert(n, &targetdata_root); 879 } 880 mmap_unlock(); 881 } 882 883 t = container_of(n, TargetPageDataNode, itree); 884 return t->data[(page - region) >> TARGET_PAGE_BITS]; 885 } 886 #else 887 void page_reset_target_data(target_ulong start, target_ulong last) { } 888 #endif /* TARGET_PAGE_DATA_SIZE */ 889 890 /* The softmmu versions of these helpers are in cputlb.c. */ 891 892 static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr, 893 MemOp mop, uintptr_t ra, MMUAccessType type) 894 { 895 int a_bits = get_alignment_bits(mop); 896 void *ret; 897 898 /* Enforce guest required alignment. */ 899 if (unlikely(addr & ((1 << a_bits) - 1))) { 900 cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); 901 } 902 903 ret = g2h(env_cpu(env), addr); 904 set_helper_retaddr(ra); 905 return ret; 906 } 907 908 #include "ldst_atomicity.c.inc" 909 910 static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr, 911 MemOp mop, uintptr_t ra) 912 { 913 void *haddr; 914 uint8_t ret; 915 916 tcg_debug_assert((mop & MO_SIZE) == MO_8); 917 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); 918 ret = ldub_p(haddr); 919 clear_helper_retaddr(); 920 return ret; 921 } 922 923 tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, 924 MemOpIdx oi, uintptr_t ra) 925 { 926 return do_ld1_mmu(env, addr, get_memop(oi), ra); 927 } 928 929 tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, 930 MemOpIdx oi, uintptr_t ra) 931 { 932 return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra); 933 } 934 935 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, 936 MemOpIdx oi, uintptr_t ra) 937 { 938 uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra); 939 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 940 return ret; 941 } 942 943 static uint16_t do_ld2_he_mmu(CPUArchState *env, abi_ptr addr, 944 MemOp mop, uintptr_t ra) 945 { 946 void *haddr; 947 uint16_t ret; 948 949 tcg_debug_assert((mop & MO_SIZE) == MO_16); 950 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); 951 ret = load_atom_2(env, ra, haddr, mop); 952 clear_helper_retaddr(); 953 return ret; 954 } 955 956 tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, 957 MemOpIdx oi, uintptr_t ra) 958 { 959 MemOp mop = get_memop(oi); 960 uint16_t ret = do_ld2_he_mmu(env, addr, mop, ra); 961 962 if (mop & MO_BSWAP) { 963 ret = bswap16(ret); 964 } 965 return ret; 966 } 967 968 tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, 969 MemOpIdx oi, uintptr_t ra) 970 { 971 MemOp mop = get_memop(oi); 972 int16_t ret = do_ld2_he_mmu(env, addr, mop, ra); 973 974 if (mop & MO_BSWAP) { 975 ret = bswap16(ret); 976 } 977 return ret; 978 } 979 980 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, 981 MemOpIdx oi, uintptr_t ra) 982 { 983 MemOp mop = get_memop(oi); 984 uint16_t ret; 985 986 tcg_debug_assert((mop & MO_BSWAP) == MO_BE); 987 ret = do_ld2_he_mmu(env, addr, mop, ra); 988 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 989 return cpu_to_be16(ret); 990 } 991 992 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, 993 MemOpIdx oi, uintptr_t ra) 994 { 995 MemOp mop = get_memop(oi); 996 uint16_t ret; 997 998 tcg_debug_assert((mop & MO_BSWAP) == MO_LE); 999 ret = do_ld2_he_mmu(env, addr, mop, ra); 1000 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1001 return cpu_to_le16(ret); 1002 } 1003 1004 static uint32_t do_ld4_he_mmu(CPUArchState *env, abi_ptr addr, 1005 MemOp mop, uintptr_t ra) 1006 { 1007 void *haddr; 1008 uint32_t ret; 1009 1010 tcg_debug_assert((mop & MO_SIZE) == MO_32); 1011 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); 1012 ret = load_atom_4(env, ra, haddr, mop); 1013 clear_helper_retaddr(); 1014 return ret; 1015 } 1016 1017 tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, 1018 MemOpIdx oi, uintptr_t ra) 1019 { 1020 MemOp mop = get_memop(oi); 1021 uint32_t ret = do_ld4_he_mmu(env, addr, mop, ra); 1022 1023 if (mop & MO_BSWAP) { 1024 ret = bswap32(ret); 1025 } 1026 return ret; 1027 } 1028 1029 tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, 1030 MemOpIdx oi, uintptr_t ra) 1031 { 1032 MemOp mop = get_memop(oi); 1033 int32_t ret = do_ld4_he_mmu(env, addr, mop, ra); 1034 1035 if (mop & MO_BSWAP) { 1036 ret = bswap32(ret); 1037 } 1038 return ret; 1039 } 1040 1041 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, 1042 MemOpIdx oi, uintptr_t ra) 1043 { 1044 MemOp mop = get_memop(oi); 1045 uint32_t ret; 1046 1047 tcg_debug_assert((mop & MO_BSWAP) == MO_BE); 1048 ret = do_ld4_he_mmu(env, addr, mop, ra); 1049 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1050 return cpu_to_be32(ret); 1051 } 1052 1053 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, 1054 MemOpIdx oi, uintptr_t ra) 1055 { 1056 MemOp mop = get_memop(oi); 1057 uint32_t ret; 1058 1059 tcg_debug_assert((mop & MO_BSWAP) == MO_LE); 1060 ret = do_ld4_he_mmu(env, addr, mop, ra); 1061 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1062 return cpu_to_le32(ret); 1063 } 1064 1065 static uint64_t do_ld8_he_mmu(CPUArchState *env, abi_ptr addr, 1066 MemOp mop, uintptr_t ra) 1067 { 1068 void *haddr; 1069 uint64_t ret; 1070 1071 tcg_debug_assert((mop & MO_SIZE) == MO_64); 1072 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); 1073 ret = load_atom_8(env, ra, haddr, mop); 1074 clear_helper_retaddr(); 1075 return ret; 1076 } 1077 1078 uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, 1079 MemOpIdx oi, uintptr_t ra) 1080 { 1081 MemOp mop = get_memop(oi); 1082 uint64_t ret = do_ld8_he_mmu(env, addr, mop, ra); 1083 1084 if (mop & MO_BSWAP) { 1085 ret = bswap64(ret); 1086 } 1087 return ret; 1088 } 1089 1090 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, 1091 MemOpIdx oi, uintptr_t ra) 1092 { 1093 MemOp mop = get_memop(oi); 1094 uint64_t ret; 1095 1096 tcg_debug_assert((mop & MO_BSWAP) == MO_BE); 1097 ret = do_ld8_he_mmu(env, addr, mop, ra); 1098 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1099 return cpu_to_be64(ret); 1100 } 1101 1102 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, 1103 MemOpIdx oi, uintptr_t ra) 1104 { 1105 MemOp mop = get_memop(oi); 1106 uint64_t ret; 1107 1108 tcg_debug_assert((mop & MO_BSWAP) == MO_LE); 1109 ret = do_ld8_he_mmu(env, addr, mop, ra); 1110 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1111 return cpu_to_le64(ret); 1112 } 1113 1114 static Int128 do_ld16_he_mmu(CPUArchState *env, abi_ptr addr, 1115 MemOp mop, uintptr_t ra) 1116 { 1117 void *haddr; 1118 Int128 ret; 1119 1120 tcg_debug_assert((mop & MO_SIZE) == MO_128); 1121 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD); 1122 ret = load_atom_16(env, ra, haddr, mop); 1123 clear_helper_retaddr(); 1124 return ret; 1125 } 1126 1127 Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, 1128 MemOpIdx oi, uintptr_t ra) 1129 { 1130 MemOp mop = get_memop(oi); 1131 Int128 ret = do_ld16_he_mmu(env, addr, mop, ra); 1132 1133 if (mop & MO_BSWAP) { 1134 ret = bswap128(ret); 1135 } 1136 return ret; 1137 } 1138 1139 Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi) 1140 { 1141 return helper_ld16_mmu(env, addr, oi, GETPC()); 1142 } 1143 1144 Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr, 1145 MemOpIdx oi, uintptr_t ra) 1146 { 1147 MemOp mop = get_memop(oi); 1148 Int128 ret; 1149 1150 tcg_debug_assert((mop & MO_BSWAP) == MO_BE); 1151 ret = do_ld16_he_mmu(env, addr, mop, ra); 1152 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1153 if (!HOST_BIG_ENDIAN) { 1154 ret = bswap128(ret); 1155 } 1156 return ret; 1157 } 1158 1159 Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr, 1160 MemOpIdx oi, uintptr_t ra) 1161 { 1162 MemOp mop = get_memop(oi); 1163 Int128 ret; 1164 1165 tcg_debug_assert((mop & MO_BSWAP) == MO_LE); 1166 ret = do_ld16_he_mmu(env, addr, mop, ra); 1167 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 1168 if (HOST_BIG_ENDIAN) { 1169 ret = bswap128(ret); 1170 } 1171 return ret; 1172 } 1173 1174 static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 1175 MemOp mop, uintptr_t ra) 1176 { 1177 void *haddr; 1178 1179 tcg_debug_assert((mop & MO_SIZE) == MO_8); 1180 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); 1181 stb_p(haddr, val); 1182 clear_helper_retaddr(); 1183 } 1184 1185 void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 1186 MemOpIdx oi, uintptr_t ra) 1187 { 1188 do_st1_mmu(env, addr, val, get_memop(oi), ra); 1189 } 1190 1191 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 1192 MemOpIdx oi, uintptr_t ra) 1193 { 1194 do_st1_mmu(env, addr, val, get_memop(oi), ra); 1195 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1196 } 1197 1198 static void do_st2_he_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 1199 MemOp mop, uintptr_t ra) 1200 { 1201 void *haddr; 1202 1203 tcg_debug_assert((mop & MO_SIZE) == MO_16); 1204 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); 1205 store_atom_2(env, ra, haddr, mop, val); 1206 clear_helper_retaddr(); 1207 } 1208 1209 void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 1210 MemOpIdx oi, uintptr_t ra) 1211 { 1212 MemOp mop = get_memop(oi); 1213 1214 if (mop & MO_BSWAP) { 1215 val = bswap16(val); 1216 } 1217 do_st2_he_mmu(env, addr, val, mop, ra); 1218 } 1219 1220 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 1221 MemOpIdx oi, uintptr_t ra) 1222 { 1223 MemOp mop = get_memop(oi); 1224 1225 tcg_debug_assert((mop & MO_BSWAP) == MO_BE); 1226 do_st2_he_mmu(env, addr, be16_to_cpu(val), mop, ra); 1227 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1228 } 1229 1230 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 1231 MemOpIdx oi, uintptr_t ra) 1232 { 1233 MemOp mop = get_memop(oi); 1234 1235 tcg_debug_assert((mop & MO_BSWAP) == MO_LE); 1236 do_st2_he_mmu(env, addr, le16_to_cpu(val), mop, ra); 1237 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1238 } 1239 1240 static void do_st4_he_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 1241 MemOp mop, uintptr_t ra) 1242 { 1243 void *haddr; 1244 1245 tcg_debug_assert((mop & MO_SIZE) == MO_32); 1246 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); 1247 store_atom_4(env, ra, haddr, mop, val); 1248 clear_helper_retaddr(); 1249 } 1250 1251 void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 1252 MemOpIdx oi, uintptr_t ra) 1253 { 1254 MemOp mop = get_memop(oi); 1255 1256 if (mop & MO_BSWAP) { 1257 val = bswap32(val); 1258 } 1259 do_st4_he_mmu(env, addr, val, mop, ra); 1260 } 1261 1262 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 1263 MemOpIdx oi, uintptr_t ra) 1264 { 1265 MemOp mop = get_memop(oi); 1266 1267 tcg_debug_assert((mop & MO_BSWAP) == MO_BE); 1268 do_st4_he_mmu(env, addr, be32_to_cpu(val), mop, ra); 1269 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1270 } 1271 1272 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 1273 MemOpIdx oi, uintptr_t ra) 1274 { 1275 MemOp mop = get_memop(oi); 1276 1277 tcg_debug_assert((mop & MO_BSWAP) == MO_LE); 1278 do_st4_he_mmu(env, addr, le32_to_cpu(val), mop, ra); 1279 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1280 } 1281 1282 static void do_st8_he_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 1283 MemOp mop, uintptr_t ra) 1284 { 1285 void *haddr; 1286 1287 tcg_debug_assert((mop & MO_SIZE) == MO_64); 1288 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); 1289 store_atom_8(env, ra, haddr, mop, val); 1290 clear_helper_retaddr(); 1291 } 1292 1293 void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, 1294 MemOpIdx oi, uintptr_t ra) 1295 { 1296 MemOp mop = get_memop(oi); 1297 1298 if (mop & MO_BSWAP) { 1299 val = bswap64(val); 1300 } 1301 do_st8_he_mmu(env, addr, val, mop, ra); 1302 } 1303 1304 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 1305 MemOpIdx oi, uintptr_t ra) 1306 { 1307 MemOp mop = get_memop(oi); 1308 1309 tcg_debug_assert((mop & MO_BSWAP) == MO_BE); 1310 do_st8_he_mmu(env, addr, cpu_to_be64(val), mop, ra); 1311 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1312 } 1313 1314 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 1315 MemOpIdx oi, uintptr_t ra) 1316 { 1317 MemOp mop = get_memop(oi); 1318 1319 tcg_debug_assert((mop & MO_BSWAP) == MO_LE); 1320 do_st8_he_mmu(env, addr, cpu_to_le64(val), mop, ra); 1321 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1322 } 1323 1324 static void do_st16_he_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 1325 MemOp mop, uintptr_t ra) 1326 { 1327 void *haddr; 1328 1329 tcg_debug_assert((mop & MO_SIZE) == MO_128); 1330 haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE); 1331 store_atom_16(env, ra, haddr, mop, val); 1332 clear_helper_retaddr(); 1333 } 1334 1335 void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, 1336 MemOpIdx oi, uintptr_t ra) 1337 { 1338 MemOp mop = get_memop(oi); 1339 1340 if (mop & MO_BSWAP) { 1341 val = bswap128(val); 1342 } 1343 do_st16_he_mmu(env, addr, val, mop, ra); 1344 } 1345 1346 void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) 1347 { 1348 helper_st16_mmu(env, addr, val, oi, GETPC()); 1349 } 1350 1351 void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, 1352 Int128 val, MemOpIdx oi, uintptr_t ra) 1353 { 1354 MemOp mop = get_memop(oi); 1355 1356 tcg_debug_assert((mop & MO_BSWAP) == MO_BE); 1357 if (!HOST_BIG_ENDIAN) { 1358 val = bswap128(val); 1359 } 1360 do_st16_he_mmu(env, addr, val, mop, ra); 1361 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1362 } 1363 1364 void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, 1365 Int128 val, MemOpIdx oi, uintptr_t ra) 1366 { 1367 MemOp mop = get_memop(oi); 1368 1369 tcg_debug_assert((mop & MO_BSWAP) == MO_LE); 1370 if (HOST_BIG_ENDIAN) { 1371 val = bswap128(val); 1372 } 1373 do_st16_he_mmu(env, addr, val, mop, ra); 1374 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 1375 } 1376 1377 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) 1378 { 1379 uint32_t ret; 1380 1381 set_helper_retaddr(1); 1382 ret = ldub_p(g2h_untagged(ptr)); 1383 clear_helper_retaddr(); 1384 return ret; 1385 } 1386 1387 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) 1388 { 1389 uint32_t ret; 1390 1391 set_helper_retaddr(1); 1392 ret = lduw_p(g2h_untagged(ptr)); 1393 clear_helper_retaddr(); 1394 return ret; 1395 } 1396 1397 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) 1398 { 1399 uint32_t ret; 1400 1401 set_helper_retaddr(1); 1402 ret = ldl_p(g2h_untagged(ptr)); 1403 clear_helper_retaddr(); 1404 return ret; 1405 } 1406 1407 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) 1408 { 1409 uint64_t ret; 1410 1411 set_helper_retaddr(1); 1412 ret = ldq_p(g2h_untagged(ptr)); 1413 clear_helper_retaddr(); 1414 return ret; 1415 } 1416 1417 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, 1418 MemOpIdx oi, uintptr_t ra) 1419 { 1420 void *haddr; 1421 uint8_t ret; 1422 1423 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); 1424 ret = ldub_p(haddr); 1425 clear_helper_retaddr(); 1426 return ret; 1427 } 1428 1429 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, 1430 MemOpIdx oi, uintptr_t ra) 1431 { 1432 void *haddr; 1433 uint16_t ret; 1434 1435 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); 1436 ret = lduw_p(haddr); 1437 clear_helper_retaddr(); 1438 if (get_memop(oi) & MO_BSWAP) { 1439 ret = bswap16(ret); 1440 } 1441 return ret; 1442 } 1443 1444 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, 1445 MemOpIdx oi, uintptr_t ra) 1446 { 1447 void *haddr; 1448 uint32_t ret; 1449 1450 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH); 1451 ret = ldl_p(haddr); 1452 clear_helper_retaddr(); 1453 if (get_memop(oi) & MO_BSWAP) { 1454 ret = bswap32(ret); 1455 } 1456 return ret; 1457 } 1458 1459 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, 1460 MemOpIdx oi, uintptr_t ra) 1461 { 1462 void *haddr; 1463 uint64_t ret; 1464 1465 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); 1466 ret = ldq_p(haddr); 1467 clear_helper_retaddr(); 1468 if (get_memop(oi) & MO_BSWAP) { 1469 ret = bswap64(ret); 1470 } 1471 return ret; 1472 } 1473 1474 #include "ldst_common.c.inc" 1475 1476 /* 1477 * Do not allow unaligned operations to proceed. Return the host address. 1478 * 1479 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. 1480 */ 1481 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 1482 MemOpIdx oi, int size, int prot, 1483 uintptr_t retaddr) 1484 { 1485 MemOp mop = get_memop(oi); 1486 int a_bits = get_alignment_bits(mop); 1487 void *ret; 1488 1489 /* Enforce guest required alignment. */ 1490 if (unlikely(addr & ((1 << a_bits) - 1))) { 1491 MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; 1492 cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); 1493 } 1494 1495 /* Enforce qemu required alignment. */ 1496 if (unlikely(addr & (size - 1))) { 1497 cpu_loop_exit_atomic(env_cpu(env), retaddr); 1498 } 1499 1500 ret = g2h(env_cpu(env), addr); 1501 set_helper_retaddr(retaddr); 1502 return ret; 1503 } 1504 1505 #include "atomic_common.c.inc" 1506 1507 /* 1508 * First set of functions passes in OI and RETADDR. 1509 * This makes them callable from other helpers. 1510 */ 1511 1512 #define ATOMIC_NAME(X) \ 1513 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 1514 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) 1515 1516 #define DATA_SIZE 1 1517 #include "atomic_template.h" 1518 1519 #define DATA_SIZE 2 1520 #include "atomic_template.h" 1521 1522 #define DATA_SIZE 4 1523 #include "atomic_template.h" 1524 1525 #ifdef CONFIG_ATOMIC64 1526 #define DATA_SIZE 8 1527 #include "atomic_template.h" 1528 #endif 1529 1530 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 1531 #define DATA_SIZE 16 1532 #include "atomic_template.h" 1533 #endif 1534