1 /* 2 * Host code generation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 22 #define NO_CPU_IO_DEFS 23 #include "cpu.h" 24 #include "trace.h" 25 #include "disas/disas.h" 26 #include "exec/exec-all.h" 27 #include "tcg.h" 28 #if defined(CONFIG_USER_ONLY) 29 #include "qemu.h" 30 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 31 #include <sys/param.h> 32 #if __FreeBSD_version >= 700104 33 #define HAVE_KINFO_GETVMMAP 34 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 35 #include <sys/proc.h> 36 #include <machine/profile.h> 37 #define _KERNEL 38 #include <sys/user.h> 39 #undef _KERNEL 40 #undef sigqueue 41 #include <libutil.h> 42 #endif 43 #endif 44 #else 45 #include "exec/ram_addr.h" 46 #endif 47 48 #include "exec/cputlb.h" 49 #include "exec/tb-hash.h" 50 #include "translate-all.h" 51 #include "qemu/bitmap.h" 52 #include "qemu/error-report.h" 53 #include "qemu/qemu-print.h" 54 #include "qemu/timer.h" 55 #include "qemu/main-loop.h" 56 #include "exec/log.h" 57 #include "sysemu/cpus.h" 58 #include "sysemu/tcg.h" 59 60 /* #define DEBUG_TB_INVALIDATE */ 61 /* #define DEBUG_TB_FLUSH */ 62 /* make various TB consistency checks */ 63 /* #define DEBUG_TB_CHECK */ 64 65 #ifdef DEBUG_TB_INVALIDATE 66 #define DEBUG_TB_INVALIDATE_GATE 1 67 #else 68 #define DEBUG_TB_INVALIDATE_GATE 0 69 #endif 70 71 #ifdef DEBUG_TB_FLUSH 72 #define DEBUG_TB_FLUSH_GATE 1 73 #else 74 #define DEBUG_TB_FLUSH_GATE 0 75 #endif 76 77 #if !defined(CONFIG_USER_ONLY) 78 /* TB consistency checks only implemented for usermode emulation. */ 79 #undef DEBUG_TB_CHECK 80 #endif 81 82 #ifdef DEBUG_TB_CHECK 83 #define DEBUG_TB_CHECK_GATE 1 84 #else 85 #define DEBUG_TB_CHECK_GATE 0 86 #endif 87 88 /* Access to the various translations structures need to be serialised via locks 89 * for consistency. 90 * In user-mode emulation access to the memory related structures are protected 91 * with mmap_lock. 92 * In !user-mode we use per-page locks. 93 */ 94 #ifdef CONFIG_SOFTMMU 95 #define assert_memory_lock() 96 #else 97 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) 98 #endif 99 100 #define SMC_BITMAP_USE_THRESHOLD 10 101 102 typedef struct PageDesc { 103 /* list of TBs intersecting this ram page */ 104 uintptr_t first_tb; 105 #ifdef CONFIG_SOFTMMU 106 /* in order to optimize self modifying code, we count the number 107 of lookups we do to a given page to use a bitmap */ 108 unsigned long *code_bitmap; 109 unsigned int code_write_count; 110 #else 111 unsigned long flags; 112 #endif 113 #ifndef CONFIG_USER_ONLY 114 QemuSpin lock; 115 #endif 116 } PageDesc; 117 118 /** 119 * struct page_entry - page descriptor entry 120 * @pd: pointer to the &struct PageDesc of the page this entry represents 121 * @index: page index of the page 122 * @locked: whether the page is locked 123 * 124 * This struct helps us keep track of the locked state of a page, without 125 * bloating &struct PageDesc. 126 * 127 * A page lock protects accesses to all fields of &struct PageDesc. 128 * 129 * See also: &struct page_collection. 130 */ 131 struct page_entry { 132 PageDesc *pd; 133 tb_page_addr_t index; 134 bool locked; 135 }; 136 137 /** 138 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's) 139 * @tree: Binary search tree (BST) of the pages, with key == page index 140 * @max: Pointer to the page in @tree with the highest page index 141 * 142 * To avoid deadlock we lock pages in ascending order of page index. 143 * When operating on a set of pages, we need to keep track of them so that 144 * we can lock them in order and also unlock them later. For this we collect 145 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the 146 * @tree implementation we use does not provide an O(1) operation to obtain the 147 * highest-ranked element, we use @max to keep track of the inserted page 148 * with the highest index. This is valuable because if a page is not in 149 * the tree and its index is higher than @max's, then we can lock it 150 * without breaking the locking order rule. 151 * 152 * Note on naming: 'struct page_set' would be shorter, but we already have a few 153 * page_set_*() helpers, so page_collection is used instead to avoid confusion. 154 * 155 * See also: page_collection_lock(). 156 */ 157 struct page_collection { 158 GTree *tree; 159 struct page_entry *max; 160 }; 161 162 /* list iterators for lists of tagged pointers in TranslationBlock */ 163 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \ 164 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ 165 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ 166 tb = (TranslationBlock *)((uintptr_t)tb & ~1)) 167 168 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ 169 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) 170 171 #define TB_FOR_EACH_JMP(head_tb, tb, n) \ 172 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) 173 174 /* In system mode we want L1_MAP to be based on ram offsets, 175 while in user mode we want it to be based on virtual addresses. */ 176 #if !defined(CONFIG_USER_ONLY) 177 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 178 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 179 #else 180 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 181 #endif 182 #else 183 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS 184 #endif 185 186 /* Size of the L2 (and L3, etc) page tables. */ 187 #define V_L2_BITS 10 188 #define V_L2_SIZE (1 << V_L2_BITS) 189 190 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ 191 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > 192 sizeof_field(TranslationBlock, trace_vcpu_dstate) 193 * BITS_PER_BYTE); 194 195 /* 196 * L1 Mapping properties 197 */ 198 static int v_l1_size; 199 static int v_l1_shift; 200 static int v_l2_levels; 201 202 /* The bottom level has pointers to PageDesc, and is indexed by 203 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. 204 */ 205 #define V_L1_MIN_BITS 4 206 #define V_L1_MAX_BITS (V_L2_BITS + 3) 207 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) 208 209 static void *l1_map[V_L1_MAX_SIZE]; 210 211 /* code generation context */ 212 TCGContext tcg_init_ctx; 213 __thread TCGContext *tcg_ctx; 214 TBContext tb_ctx; 215 bool parallel_cpus; 216 217 static void page_table_config_init(void) 218 { 219 uint32_t v_l1_bits; 220 221 assert(TARGET_PAGE_BITS); 222 /* The bits remaining after N lower levels of page tables. */ 223 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; 224 if (v_l1_bits < V_L1_MIN_BITS) { 225 v_l1_bits += V_L2_BITS; 226 } 227 228 v_l1_size = 1 << v_l1_bits; 229 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; 230 v_l2_levels = v_l1_shift / V_L2_BITS - 1; 231 232 assert(v_l1_bits <= V_L1_MAX_BITS); 233 assert(v_l1_shift % V_L2_BITS == 0); 234 assert(v_l2_levels >= 0); 235 } 236 237 void cpu_gen_init(void) 238 { 239 tcg_context_init(&tcg_init_ctx); 240 } 241 242 /* Encode VAL as a signed leb128 sequence at P. 243 Return P incremented past the encoded value. */ 244 static uint8_t *encode_sleb128(uint8_t *p, target_long val) 245 { 246 int more, byte; 247 248 do { 249 byte = val & 0x7f; 250 val >>= 7; 251 more = !((val == 0 && (byte & 0x40) == 0) 252 || (val == -1 && (byte & 0x40) != 0)); 253 if (more) { 254 byte |= 0x80; 255 } 256 *p++ = byte; 257 } while (more); 258 259 return p; 260 } 261 262 /* Decode a signed leb128 sequence at *PP; increment *PP past the 263 decoded value. Return the decoded value. */ 264 static target_long decode_sleb128(uint8_t **pp) 265 { 266 uint8_t *p = *pp; 267 target_long val = 0; 268 int byte, shift = 0; 269 270 do { 271 byte = *p++; 272 val |= (target_ulong)(byte & 0x7f) << shift; 273 shift += 7; 274 } while (byte & 0x80); 275 if (shift < TARGET_LONG_BITS && (byte & 0x40)) { 276 val |= -(target_ulong)1 << shift; 277 } 278 279 *pp = p; 280 return val; 281 } 282 283 /* Encode the data collected about the instructions while compiling TB. 284 Place the data at BLOCK, and return the number of bytes consumed. 285 286 The logical table consists of TARGET_INSN_START_WORDS target_ulong's, 287 which come from the target's insn_start data, followed by a uintptr_t 288 which comes from the host pc of the end of the code implementing the insn. 289 290 Each line of the table is encoded as sleb128 deltas from the previous 291 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. 292 That is, the first column is seeded with the guest pc, the last column 293 with the host pc, and the middle columns with zeros. */ 294 295 static int encode_search(TranslationBlock *tb, uint8_t *block) 296 { 297 uint8_t *highwater = tcg_ctx->code_gen_highwater; 298 uint8_t *p = block; 299 int i, j, n; 300 301 for (i = 0, n = tb->icount; i < n; ++i) { 302 target_ulong prev; 303 304 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 305 if (i == 0) { 306 prev = (j == 0 ? tb->pc : 0); 307 } else { 308 prev = tcg_ctx->gen_insn_data[i - 1][j]; 309 } 310 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); 311 } 312 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); 313 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); 314 315 /* Test for (pending) buffer overflow. The assumption is that any 316 one row beginning below the high water mark cannot overrun 317 the buffer completely. Thus we can test for overflow after 318 encoding a row without having to check during encoding. */ 319 if (unlikely(p > highwater)) { 320 return -1; 321 } 322 } 323 324 return p - block; 325 } 326 327 /* The cpu state corresponding to 'searched_pc' is restored. 328 * When reset_icount is true, current TB will be interrupted and 329 * icount should be recalculated. 330 */ 331 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 332 uintptr_t searched_pc, bool reset_icount) 333 { 334 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; 335 uintptr_t host_pc = (uintptr_t)tb->tc.ptr; 336 CPUArchState *env = cpu->env_ptr; 337 uint8_t *p = tb->tc.ptr + tb->tc.size; 338 int i, j, num_insns = tb->icount; 339 #ifdef CONFIG_PROFILER 340 TCGProfile *prof = &tcg_ctx->prof; 341 int64_t ti = profile_getclock(); 342 #endif 343 344 searched_pc -= GETPC_ADJ; 345 346 if (searched_pc < host_pc) { 347 return -1; 348 } 349 350 /* Reconstruct the stored insn data while looking for the point at 351 which the end of the insn exceeds the searched_pc. */ 352 for (i = 0; i < num_insns; ++i) { 353 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 354 data[j] += decode_sleb128(&p); 355 } 356 host_pc += decode_sleb128(&p); 357 if (host_pc > searched_pc) { 358 goto found; 359 } 360 } 361 return -1; 362 363 found: 364 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { 365 assert(use_icount); 366 /* Reset the cycle counter to the start of the block 367 and shift if to the number of actually executed instructions */ 368 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; 369 } 370 restore_state_to_opc(env, tb, data); 371 372 #ifdef CONFIG_PROFILER 373 atomic_set(&prof->restore_time, 374 prof->restore_time + profile_getclock() - ti); 375 atomic_set(&prof->restore_count, prof->restore_count + 1); 376 #endif 377 return 0; 378 } 379 380 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) 381 { 382 TranslationBlock *tb; 383 bool r = false; 384 uintptr_t check_offset; 385 386 /* The host_pc has to be in the region of current code buffer. If 387 * it is not we will not be able to resolve it here. The two cases 388 * where host_pc will not be correct are: 389 * 390 * - fault during translation (instruction fetch) 391 * - fault from helper (not using GETPC() macro) 392 * 393 * Either way we need return early as we can't resolve it here. 394 * 395 * We are using unsigned arithmetic so if host_pc < 396 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way 397 * above the code_gen_buffer_size 398 */ 399 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer; 400 401 if (check_offset < tcg_init_ctx.code_gen_buffer_size) { 402 tb = tcg_tb_lookup(host_pc); 403 if (tb) { 404 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); 405 if (tb_cflags(tb) & CF_NOCACHE) { 406 /* one-shot translation, invalidate it immediately */ 407 tb_phys_invalidate(tb, -1); 408 tcg_tb_remove(tb); 409 } 410 r = true; 411 } 412 } 413 414 return r; 415 } 416 417 static void page_init(void) 418 { 419 page_size_init(); 420 page_table_config_init(); 421 422 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 423 { 424 #ifdef HAVE_KINFO_GETVMMAP 425 struct kinfo_vmentry *freep; 426 int i, cnt; 427 428 freep = kinfo_getvmmap(getpid(), &cnt); 429 if (freep) { 430 mmap_lock(); 431 for (i = 0; i < cnt; i++) { 432 unsigned long startaddr, endaddr; 433 434 startaddr = freep[i].kve_start; 435 endaddr = freep[i].kve_end; 436 if (h2g_valid(startaddr)) { 437 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 438 439 if (h2g_valid(endaddr)) { 440 endaddr = h2g(endaddr); 441 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 442 } else { 443 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 444 endaddr = ~0ul; 445 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 446 #endif 447 } 448 } 449 } 450 free(freep); 451 mmap_unlock(); 452 } 453 #else 454 FILE *f; 455 456 last_brk = (unsigned long)sbrk(0); 457 458 f = fopen("/compat/linux/proc/self/maps", "r"); 459 if (f) { 460 mmap_lock(); 461 462 do { 463 unsigned long startaddr, endaddr; 464 int n; 465 466 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 467 468 if (n == 2 && h2g_valid(startaddr)) { 469 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 470 471 if (h2g_valid(endaddr)) { 472 endaddr = h2g(endaddr); 473 } else { 474 endaddr = ~0ul; 475 } 476 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 477 } 478 } while (!feof(f)); 479 480 fclose(f); 481 mmap_unlock(); 482 } 483 #endif 484 } 485 #endif 486 } 487 488 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) 489 { 490 PageDesc *pd; 491 void **lp; 492 int i; 493 494 /* Level 1. Always allocated. */ 495 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); 496 497 /* Level 2..N-1. */ 498 for (i = v_l2_levels; i > 0; i--) { 499 void **p = atomic_rcu_read(lp); 500 501 if (p == NULL) { 502 void *existing; 503 504 if (!alloc) { 505 return NULL; 506 } 507 p = g_new0(void *, V_L2_SIZE); 508 existing = atomic_cmpxchg(lp, NULL, p); 509 if (unlikely(existing)) { 510 g_free(p); 511 p = existing; 512 } 513 } 514 515 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); 516 } 517 518 pd = atomic_rcu_read(lp); 519 if (pd == NULL) { 520 void *existing; 521 522 if (!alloc) { 523 return NULL; 524 } 525 pd = g_new0(PageDesc, V_L2_SIZE); 526 #ifndef CONFIG_USER_ONLY 527 { 528 int i; 529 530 for (i = 0; i < V_L2_SIZE; i++) { 531 qemu_spin_init(&pd[i].lock); 532 } 533 } 534 #endif 535 existing = atomic_cmpxchg(lp, NULL, pd); 536 if (unlikely(existing)) { 537 g_free(pd); 538 pd = existing; 539 } 540 } 541 542 return pd + (index & (V_L2_SIZE - 1)); 543 } 544 545 static inline PageDesc *page_find(tb_page_addr_t index) 546 { 547 return page_find_alloc(index, 0); 548 } 549 550 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, 551 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); 552 553 /* In user-mode page locks aren't used; mmap_lock is enough */ 554 #ifdef CONFIG_USER_ONLY 555 556 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) 557 558 static inline void page_lock(PageDesc *pd) 559 { } 560 561 static inline void page_unlock(PageDesc *pd) 562 { } 563 564 static inline void page_lock_tb(const TranslationBlock *tb) 565 { } 566 567 static inline void page_unlock_tb(const TranslationBlock *tb) 568 { } 569 570 struct page_collection * 571 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) 572 { 573 return NULL; 574 } 575 576 void page_collection_unlock(struct page_collection *set) 577 { } 578 #else /* !CONFIG_USER_ONLY */ 579 580 #ifdef CONFIG_DEBUG_TCG 581 582 static __thread GHashTable *ht_pages_locked_debug; 583 584 static void ht_pages_locked_debug_init(void) 585 { 586 if (ht_pages_locked_debug) { 587 return; 588 } 589 ht_pages_locked_debug = g_hash_table_new(NULL, NULL); 590 } 591 592 static bool page_is_locked(const PageDesc *pd) 593 { 594 PageDesc *found; 595 596 ht_pages_locked_debug_init(); 597 found = g_hash_table_lookup(ht_pages_locked_debug, pd); 598 return !!found; 599 } 600 601 static void page_lock__debug(PageDesc *pd) 602 { 603 ht_pages_locked_debug_init(); 604 g_assert(!page_is_locked(pd)); 605 g_hash_table_insert(ht_pages_locked_debug, pd, pd); 606 } 607 608 static void page_unlock__debug(const PageDesc *pd) 609 { 610 bool removed; 611 612 ht_pages_locked_debug_init(); 613 g_assert(page_is_locked(pd)); 614 removed = g_hash_table_remove(ht_pages_locked_debug, pd); 615 g_assert(removed); 616 } 617 618 static void 619 do_assert_page_locked(const PageDesc *pd, const char *file, int line) 620 { 621 if (unlikely(!page_is_locked(pd))) { 622 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", 623 pd, file, line); 624 abort(); 625 } 626 } 627 628 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) 629 630 void assert_no_pages_locked(void) 631 { 632 ht_pages_locked_debug_init(); 633 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); 634 } 635 636 #else /* !CONFIG_DEBUG_TCG */ 637 638 #define assert_page_locked(pd) 639 640 static inline void page_lock__debug(const PageDesc *pd) 641 { 642 } 643 644 static inline void page_unlock__debug(const PageDesc *pd) 645 { 646 } 647 648 #endif /* CONFIG_DEBUG_TCG */ 649 650 static inline void page_lock(PageDesc *pd) 651 { 652 page_lock__debug(pd); 653 qemu_spin_lock(&pd->lock); 654 } 655 656 static inline void page_unlock(PageDesc *pd) 657 { 658 qemu_spin_unlock(&pd->lock); 659 page_unlock__debug(pd); 660 } 661 662 /* lock the page(s) of a TB in the correct acquisition order */ 663 static inline void page_lock_tb(const TranslationBlock *tb) 664 { 665 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); 666 } 667 668 static inline void page_unlock_tb(const TranslationBlock *tb) 669 { 670 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 671 672 page_unlock(p1); 673 if (unlikely(tb->page_addr[1] != -1)) { 674 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 675 676 if (p2 != p1) { 677 page_unlock(p2); 678 } 679 } 680 } 681 682 static inline struct page_entry * 683 page_entry_new(PageDesc *pd, tb_page_addr_t index) 684 { 685 struct page_entry *pe = g_malloc(sizeof(*pe)); 686 687 pe->index = index; 688 pe->pd = pd; 689 pe->locked = false; 690 return pe; 691 } 692 693 static void page_entry_destroy(gpointer p) 694 { 695 struct page_entry *pe = p; 696 697 g_assert(pe->locked); 698 page_unlock(pe->pd); 699 g_free(pe); 700 } 701 702 /* returns false on success */ 703 static bool page_entry_trylock(struct page_entry *pe) 704 { 705 bool busy; 706 707 busy = qemu_spin_trylock(&pe->pd->lock); 708 if (!busy) { 709 g_assert(!pe->locked); 710 pe->locked = true; 711 page_lock__debug(pe->pd); 712 } 713 return busy; 714 } 715 716 static void do_page_entry_lock(struct page_entry *pe) 717 { 718 page_lock(pe->pd); 719 g_assert(!pe->locked); 720 pe->locked = true; 721 } 722 723 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) 724 { 725 struct page_entry *pe = value; 726 727 do_page_entry_lock(pe); 728 return FALSE; 729 } 730 731 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) 732 { 733 struct page_entry *pe = value; 734 735 if (pe->locked) { 736 pe->locked = false; 737 page_unlock(pe->pd); 738 } 739 return FALSE; 740 } 741 742 /* 743 * Trylock a page, and if successful, add the page to a collection. 744 * Returns true ("busy") if the page could not be locked; false otherwise. 745 */ 746 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) 747 { 748 tb_page_addr_t index = addr >> TARGET_PAGE_BITS; 749 struct page_entry *pe; 750 PageDesc *pd; 751 752 pe = g_tree_lookup(set->tree, &index); 753 if (pe) { 754 return false; 755 } 756 757 pd = page_find(index); 758 if (pd == NULL) { 759 return false; 760 } 761 762 pe = page_entry_new(pd, index); 763 g_tree_insert(set->tree, &pe->index, pe); 764 765 /* 766 * If this is either (1) the first insertion or (2) a page whose index 767 * is higher than any other so far, just lock the page and move on. 768 */ 769 if (set->max == NULL || pe->index > set->max->index) { 770 set->max = pe; 771 do_page_entry_lock(pe); 772 return false; 773 } 774 /* 775 * Try to acquire out-of-order lock; if busy, return busy so that we acquire 776 * locks in order. 777 */ 778 return page_entry_trylock(pe); 779 } 780 781 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) 782 { 783 tb_page_addr_t a = *(const tb_page_addr_t *)ap; 784 tb_page_addr_t b = *(const tb_page_addr_t *)bp; 785 786 if (a == b) { 787 return 0; 788 } else if (a < b) { 789 return -1; 790 } 791 return 1; 792 } 793 794 /* 795 * Lock a range of pages ([@start,@end[) as well as the pages of all 796 * intersecting TBs. 797 * Locking order: acquire locks in ascending order of page index. 798 */ 799 struct page_collection * 800 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) 801 { 802 struct page_collection *set = g_malloc(sizeof(*set)); 803 tb_page_addr_t index; 804 PageDesc *pd; 805 806 start >>= TARGET_PAGE_BITS; 807 end >>= TARGET_PAGE_BITS; 808 g_assert(start <= end); 809 810 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, 811 page_entry_destroy); 812 set->max = NULL; 813 assert_no_pages_locked(); 814 815 retry: 816 g_tree_foreach(set->tree, page_entry_lock, NULL); 817 818 for (index = start; index <= end; index++) { 819 TranslationBlock *tb; 820 int n; 821 822 pd = page_find(index); 823 if (pd == NULL) { 824 continue; 825 } 826 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { 827 g_tree_foreach(set->tree, page_entry_unlock, NULL); 828 goto retry; 829 } 830 assert_page_locked(pd); 831 PAGE_FOR_EACH_TB(pd, tb, n) { 832 if (page_trylock_add(set, tb->page_addr[0]) || 833 (tb->page_addr[1] != -1 && 834 page_trylock_add(set, tb->page_addr[1]))) { 835 /* drop all locks, and reacquire in order */ 836 g_tree_foreach(set->tree, page_entry_unlock, NULL); 837 goto retry; 838 } 839 } 840 } 841 return set; 842 } 843 844 void page_collection_unlock(struct page_collection *set) 845 { 846 /* entries are unlocked and freed via page_entry_destroy */ 847 g_tree_destroy(set->tree); 848 g_free(set); 849 } 850 851 #endif /* !CONFIG_USER_ONLY */ 852 853 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, 854 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) 855 { 856 PageDesc *p1, *p2; 857 tb_page_addr_t page1; 858 tb_page_addr_t page2; 859 860 assert_memory_lock(); 861 g_assert(phys1 != -1); 862 863 page1 = phys1 >> TARGET_PAGE_BITS; 864 page2 = phys2 >> TARGET_PAGE_BITS; 865 866 p1 = page_find_alloc(page1, alloc); 867 if (ret_p1) { 868 *ret_p1 = p1; 869 } 870 if (likely(phys2 == -1)) { 871 page_lock(p1); 872 return; 873 } else if (page1 == page2) { 874 page_lock(p1); 875 if (ret_p2) { 876 *ret_p2 = p1; 877 } 878 return; 879 } 880 p2 = page_find_alloc(page2, alloc); 881 if (ret_p2) { 882 *ret_p2 = p2; 883 } 884 if (page1 < page2) { 885 page_lock(p1); 886 page_lock(p2); 887 } else { 888 page_lock(p2); 889 page_lock(p1); 890 } 891 } 892 893 #if defined(CONFIG_USER_ONLY) 894 /* Currently it is not recommended to allocate big chunks of data in 895 user mode. It will change when a dedicated libc will be used. */ 896 /* ??? 64-bit hosts ought to have no problem mmaping data outside the 897 region in which the guest needs to run. Revisit this. */ 898 #define USE_STATIC_CODE_GEN_BUFFER 899 #endif 900 901 /* Minimum size of the code gen buffer. This number is randomly chosen, 902 but not so small that we can't have a fair number of TB's live. */ 903 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) 904 905 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise 906 indicated, this is constrained by the range of direct branches on the 907 host cpu, as used by the TCG implementation of goto_tb. */ 908 #if defined(__x86_64__) 909 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 910 #elif defined(__sparc__) 911 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 912 #elif defined(__powerpc64__) 913 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 914 #elif defined(__powerpc__) 915 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024) 916 #elif defined(__aarch64__) 917 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 918 #elif defined(__s390x__) 919 /* We have a +- 4GB range on the branches; leave some slop. */ 920 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) 921 #elif defined(__mips__) 922 /* We have a 256MB branch region, but leave room to make sure the 923 main executable is also within that region. */ 924 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) 925 #else 926 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) 927 #endif 928 929 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) 930 931 #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 932 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 933 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 934 935 static inline size_t size_code_gen_buffer(size_t tb_size) 936 { 937 /* Size the buffer. */ 938 if (tb_size == 0) { 939 #ifdef USE_STATIC_CODE_GEN_BUFFER 940 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 941 #else 942 /* ??? Needs adjustments. */ 943 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the 944 static buffer, we could size this on RESERVED_VA, on the text 945 segment size of the executable, or continue to use the default. */ 946 tb_size = (unsigned long)(ram_size / 4); 947 #endif 948 } 949 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 950 tb_size = MIN_CODE_GEN_BUFFER_SIZE; 951 } 952 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 953 tb_size = MAX_CODE_GEN_BUFFER_SIZE; 954 } 955 return tb_size; 956 } 957 958 #ifdef __mips__ 959 /* In order to use J and JAL within the code_gen_buffer, we require 960 that the buffer not cross a 256MB boundary. */ 961 static inline bool cross_256mb(void *addr, size_t size) 962 { 963 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; 964 } 965 966 /* We weren't able to allocate a buffer without crossing that boundary, 967 so make do with the larger portion of the buffer that doesn't cross. 968 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ 969 static inline void *split_cross_256mb(void *buf1, size_t size1) 970 { 971 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); 972 size_t size2 = buf1 + size1 - buf2; 973 974 size1 = buf2 - buf1; 975 if (size1 < size2) { 976 size1 = size2; 977 buf1 = buf2; 978 } 979 980 tcg_ctx->code_gen_buffer_size = size1; 981 return buf1; 982 } 983 #endif 984 985 #ifdef USE_STATIC_CODE_GEN_BUFFER 986 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 987 __attribute__((aligned(CODE_GEN_ALIGN))); 988 989 static inline void *alloc_code_gen_buffer(void) 990 { 991 void *buf = static_code_gen_buffer; 992 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer); 993 size_t size; 994 995 /* page-align the beginning and end of the buffer */ 996 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); 997 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); 998 999 size = end - buf; 1000 1001 /* Honor a command-line option limiting the size of the buffer. */ 1002 if (size > tcg_ctx->code_gen_buffer_size) { 1003 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size, 1004 qemu_real_host_page_size); 1005 } 1006 tcg_ctx->code_gen_buffer_size = size; 1007 1008 #ifdef __mips__ 1009 if (cross_256mb(buf, size)) { 1010 buf = split_cross_256mb(buf, size); 1011 size = tcg_ctx->code_gen_buffer_size; 1012 } 1013 #endif 1014 1015 if (qemu_mprotect_rwx(buf, size)) { 1016 abort(); 1017 } 1018 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 1019 1020 return buf; 1021 } 1022 #elif defined(_WIN32) 1023 static inline void *alloc_code_gen_buffer(void) 1024 { 1025 size_t size = tcg_ctx->code_gen_buffer_size; 1026 return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, 1027 PAGE_EXECUTE_READWRITE); 1028 } 1029 #else 1030 static inline void *alloc_code_gen_buffer(void) 1031 { 1032 int prot = PROT_WRITE | PROT_READ | PROT_EXEC; 1033 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 1034 uintptr_t start = 0; 1035 size_t size = tcg_ctx->code_gen_buffer_size; 1036 void *buf; 1037 1038 /* Constrain the position of the buffer based on the host cpu. 1039 Note that these addresses are chosen in concert with the 1040 addresses assigned in the relevant linker script file. */ 1041 # if defined(__PIE__) || defined(__PIC__) 1042 /* Don't bother setting a preferred location if we're building 1043 a position-independent executable. We're more likely to get 1044 an address near the main executable if we let the kernel 1045 choose the address. */ 1046 # elif defined(__x86_64__) && defined(MAP_32BIT) 1047 /* Force the memory down into low memory with the executable. 1048 Leave the choice of exact location with the kernel. */ 1049 flags |= MAP_32BIT; 1050 /* Cannot expect to map more than 800MB in low memory. */ 1051 if (size > 800u * 1024 * 1024) { 1052 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024; 1053 } 1054 # elif defined(__sparc__) 1055 start = 0x40000000ul; 1056 # elif defined(__s390x__) 1057 start = 0x90000000ul; 1058 # elif defined(__mips__) 1059 # if _MIPS_SIM == _ABI64 1060 start = 0x128000000ul; 1061 # else 1062 start = 0x08000000ul; 1063 # endif 1064 # endif 1065 1066 buf = mmap((void *)start, size, prot, flags, -1, 0); 1067 if (buf == MAP_FAILED) { 1068 return NULL; 1069 } 1070 1071 #ifdef __mips__ 1072 if (cross_256mb(buf, size)) { 1073 /* Try again, with the original still mapped, to avoid re-acquiring 1074 that 256mb crossing. This time don't specify an address. */ 1075 size_t size2; 1076 void *buf2 = mmap(NULL, size, prot, flags, -1, 0); 1077 switch ((int)(buf2 != MAP_FAILED)) { 1078 case 1: 1079 if (!cross_256mb(buf2, size)) { 1080 /* Success! Use the new buffer. */ 1081 munmap(buf, size); 1082 break; 1083 } 1084 /* Failure. Work with what we had. */ 1085 munmap(buf2, size); 1086 /* fallthru */ 1087 default: 1088 /* Split the original buffer. Free the smaller half. */ 1089 buf2 = split_cross_256mb(buf, size); 1090 size2 = tcg_ctx->code_gen_buffer_size; 1091 if (buf == buf2) { 1092 munmap(buf + size2, size - size2); 1093 } else { 1094 munmap(buf, size - size2); 1095 } 1096 size = size2; 1097 break; 1098 } 1099 buf = buf2; 1100 } 1101 #endif 1102 1103 /* Request large pages for the buffer. */ 1104 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 1105 1106 return buf; 1107 } 1108 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 1109 1110 static inline void code_gen_alloc(size_t tb_size) 1111 { 1112 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size); 1113 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(); 1114 if (tcg_ctx->code_gen_buffer == NULL) { 1115 fprintf(stderr, "Could not allocate dynamic translator buffer\n"); 1116 exit(1); 1117 } 1118 } 1119 1120 static bool tb_cmp(const void *ap, const void *bp) 1121 { 1122 const TranslationBlock *a = ap; 1123 const TranslationBlock *b = bp; 1124 1125 return a->pc == b->pc && 1126 a->cs_base == b->cs_base && 1127 a->flags == b->flags && 1128 (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) && 1129 a->trace_vcpu_dstate == b->trace_vcpu_dstate && 1130 a->page_addr[0] == b->page_addr[0] && 1131 a->page_addr[1] == b->page_addr[1]; 1132 } 1133 1134 static void tb_htable_init(void) 1135 { 1136 unsigned int mode = QHT_MODE_AUTO_RESIZE; 1137 1138 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); 1139 } 1140 1141 /* Must be called before using the QEMU cpus. 'tb_size' is the size 1142 (in bytes) allocated to the translation buffer. Zero means default 1143 size. */ 1144 void tcg_exec_init(unsigned long tb_size) 1145 { 1146 tcg_allowed = true; 1147 cpu_gen_init(); 1148 page_init(); 1149 tb_htable_init(); 1150 code_gen_alloc(tb_size); 1151 #if defined(CONFIG_SOFTMMU) 1152 /* There's no guest base to take into account, so go ahead and 1153 initialize the prologue now. */ 1154 tcg_prologue_init(tcg_ctx); 1155 #endif 1156 } 1157 1158 /* 1159 * Allocate a new translation block. Flush the translation buffer if 1160 * too many translation blocks or too much generated code. 1161 */ 1162 static TranslationBlock *tb_alloc(target_ulong pc) 1163 { 1164 TranslationBlock *tb; 1165 1166 assert_memory_lock(); 1167 1168 tb = tcg_tb_alloc(tcg_ctx); 1169 if (unlikely(tb == NULL)) { 1170 return NULL; 1171 } 1172 return tb; 1173 } 1174 1175 /* call with @p->lock held */ 1176 static inline void invalidate_page_bitmap(PageDesc *p) 1177 { 1178 assert_page_locked(p); 1179 #ifdef CONFIG_SOFTMMU 1180 g_free(p->code_bitmap); 1181 p->code_bitmap = NULL; 1182 p->code_write_count = 0; 1183 #endif 1184 } 1185 1186 /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 1187 static void page_flush_tb_1(int level, void **lp) 1188 { 1189 int i; 1190 1191 if (*lp == NULL) { 1192 return; 1193 } 1194 if (level == 0) { 1195 PageDesc *pd = *lp; 1196 1197 for (i = 0; i < V_L2_SIZE; ++i) { 1198 page_lock(&pd[i]); 1199 pd[i].first_tb = (uintptr_t)NULL; 1200 invalidate_page_bitmap(pd + i); 1201 page_unlock(&pd[i]); 1202 } 1203 } else { 1204 void **pp = *lp; 1205 1206 for (i = 0; i < V_L2_SIZE; ++i) { 1207 page_flush_tb_1(level - 1, pp + i); 1208 } 1209 } 1210 } 1211 1212 static void page_flush_tb(void) 1213 { 1214 int i, l1_sz = v_l1_size; 1215 1216 for (i = 0; i < l1_sz; i++) { 1217 page_flush_tb_1(v_l2_levels, l1_map + i); 1218 } 1219 } 1220 1221 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) 1222 { 1223 const TranslationBlock *tb = value; 1224 size_t *size = data; 1225 1226 *size += tb->tc.size; 1227 return false; 1228 } 1229 1230 /* flush all the translation blocks */ 1231 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) 1232 { 1233 mmap_lock(); 1234 /* If it is already been done on request of another CPU, 1235 * just retry. 1236 */ 1237 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { 1238 goto done; 1239 } 1240 1241 if (DEBUG_TB_FLUSH_GATE) { 1242 size_t nb_tbs = tcg_nb_tbs(); 1243 size_t host_size = 0; 1244 1245 tcg_tb_foreach(tb_host_size_iter, &host_size); 1246 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", 1247 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); 1248 } 1249 1250 CPU_FOREACH(cpu) { 1251 cpu_tb_jmp_cache_clear(cpu); 1252 } 1253 1254 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); 1255 page_flush_tb(); 1256 1257 tcg_region_reset_all(); 1258 /* XXX: flush processor icache at this point if cache flush is 1259 expensive */ 1260 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); 1261 1262 done: 1263 mmap_unlock(); 1264 } 1265 1266 void tb_flush(CPUState *cpu) 1267 { 1268 if (tcg_enabled()) { 1269 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count); 1270 async_safe_run_on_cpu(cpu, do_tb_flush, 1271 RUN_ON_CPU_HOST_INT(tb_flush_count)); 1272 } 1273 } 1274 1275 /* 1276 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, 1277 * so in order to prevent bit rot we compile them unconditionally in user-mode, 1278 * and let the optimizer get rid of them by wrapping their user-only callers 1279 * with if (DEBUG_TB_CHECK_GATE). 1280 */ 1281 #ifdef CONFIG_USER_ONLY 1282 1283 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) 1284 { 1285 TranslationBlock *tb = p; 1286 target_ulong addr = *(target_ulong *)userp; 1287 1288 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { 1289 printf("ERROR invalidate: address=" TARGET_FMT_lx 1290 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); 1291 } 1292 } 1293 1294 /* verify that all the pages have correct rights for code 1295 * 1296 * Called with mmap_lock held. 1297 */ 1298 static void tb_invalidate_check(target_ulong address) 1299 { 1300 address &= TARGET_PAGE_MASK; 1301 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); 1302 } 1303 1304 static void do_tb_page_check(void *p, uint32_t hash, void *userp) 1305 { 1306 TranslationBlock *tb = p; 1307 int flags1, flags2; 1308 1309 flags1 = page_get_flags(tb->pc); 1310 flags2 = page_get_flags(tb->pc + tb->size - 1); 1311 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 1312 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 1313 (long)tb->pc, tb->size, flags1, flags2); 1314 } 1315 } 1316 1317 /* verify that all the pages have correct rights for code */ 1318 static void tb_page_check(void) 1319 { 1320 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); 1321 } 1322 1323 #endif /* CONFIG_USER_ONLY */ 1324 1325 /* 1326 * user-mode: call with mmap_lock held 1327 * !user-mode: call with @pd->lock held 1328 */ 1329 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) 1330 { 1331 TranslationBlock *tb1; 1332 uintptr_t *pprev; 1333 unsigned int n1; 1334 1335 assert_page_locked(pd); 1336 pprev = &pd->first_tb; 1337 PAGE_FOR_EACH_TB(pd, tb1, n1) { 1338 if (tb1 == tb) { 1339 *pprev = tb1->page_next[n1]; 1340 return; 1341 } 1342 pprev = &tb1->page_next[n1]; 1343 } 1344 g_assert_not_reached(); 1345 } 1346 1347 /* remove @orig from its @n_orig-th jump list */ 1348 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) 1349 { 1350 uintptr_t ptr, ptr_locked; 1351 TranslationBlock *dest; 1352 TranslationBlock *tb; 1353 uintptr_t *pprev; 1354 int n; 1355 1356 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ 1357 ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1); 1358 dest = (TranslationBlock *)(ptr & ~1); 1359 if (dest == NULL) { 1360 return; 1361 } 1362 1363 qemu_spin_lock(&dest->jmp_lock); 1364 /* 1365 * While acquiring the lock, the jump might have been removed if the 1366 * destination TB was invalidated; check again. 1367 */ 1368 ptr_locked = atomic_read(&orig->jmp_dest[n_orig]); 1369 if (ptr_locked != ptr) { 1370 qemu_spin_unlock(&dest->jmp_lock); 1371 /* 1372 * The only possibility is that the jump was unlinked via 1373 * tb_jump_unlink(dest). Seeing here another destination would be a bug, 1374 * because we set the LSB above. 1375 */ 1376 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); 1377 return; 1378 } 1379 /* 1380 * We first acquired the lock, and since the destination pointer matches, 1381 * we know for sure that @orig is in the jmp list. 1382 */ 1383 pprev = &dest->jmp_list_head; 1384 TB_FOR_EACH_JMP(dest, tb, n) { 1385 if (tb == orig && n == n_orig) { 1386 *pprev = tb->jmp_list_next[n]; 1387 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ 1388 qemu_spin_unlock(&dest->jmp_lock); 1389 return; 1390 } 1391 pprev = &tb->jmp_list_next[n]; 1392 } 1393 g_assert_not_reached(); 1394 } 1395 1396 /* reset the jump entry 'n' of a TB so that it is not chained to 1397 another TB */ 1398 static inline void tb_reset_jump(TranslationBlock *tb, int n) 1399 { 1400 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); 1401 tb_set_jmp_target(tb, n, addr); 1402 } 1403 1404 /* remove any jumps to the TB */ 1405 static inline void tb_jmp_unlink(TranslationBlock *dest) 1406 { 1407 TranslationBlock *tb; 1408 int n; 1409 1410 qemu_spin_lock(&dest->jmp_lock); 1411 1412 TB_FOR_EACH_JMP(dest, tb, n) { 1413 tb_reset_jump(tb, n); 1414 atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); 1415 /* No need to clear the list entry; setting the dest ptr is enough */ 1416 } 1417 dest->jmp_list_head = (uintptr_t)NULL; 1418 1419 qemu_spin_unlock(&dest->jmp_lock); 1420 } 1421 1422 /* 1423 * In user-mode, call with mmap_lock held. 1424 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' 1425 * locks held. 1426 */ 1427 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) 1428 { 1429 CPUState *cpu; 1430 PageDesc *p; 1431 uint32_t h; 1432 tb_page_addr_t phys_pc; 1433 1434 assert_memory_lock(); 1435 1436 /* make sure no further incoming jumps will be chained to this TB */ 1437 qemu_spin_lock(&tb->jmp_lock); 1438 atomic_set(&tb->cflags, tb->cflags | CF_INVALID); 1439 qemu_spin_unlock(&tb->jmp_lock); 1440 1441 /* remove the TB from the hash list */ 1442 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1443 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, 1444 tb->trace_vcpu_dstate); 1445 if (!(tb->cflags & CF_NOCACHE) && 1446 !qht_remove(&tb_ctx.htable, tb, h)) { 1447 return; 1448 } 1449 1450 /* remove the TB from the page list */ 1451 if (rm_from_page_list) { 1452 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 1453 tb_page_remove(p, tb); 1454 invalidate_page_bitmap(p); 1455 if (tb->page_addr[1] != -1) { 1456 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 1457 tb_page_remove(p, tb); 1458 invalidate_page_bitmap(p); 1459 } 1460 } 1461 1462 /* remove the TB from the hash list */ 1463 h = tb_jmp_cache_hash_func(tb->pc); 1464 CPU_FOREACH(cpu) { 1465 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { 1466 atomic_set(&cpu->tb_jmp_cache[h], NULL); 1467 } 1468 } 1469 1470 /* suppress this TB from the two jump lists */ 1471 tb_remove_from_jmp_list(tb, 0); 1472 tb_remove_from_jmp_list(tb, 1); 1473 1474 /* suppress any remaining jumps to this TB */ 1475 tb_jmp_unlink(tb); 1476 1477 atomic_set(&tcg_ctx->tb_phys_invalidate_count, 1478 tcg_ctx->tb_phys_invalidate_count + 1); 1479 } 1480 1481 static void tb_phys_invalidate__locked(TranslationBlock *tb) 1482 { 1483 do_tb_phys_invalidate(tb, true); 1484 } 1485 1486 /* invalidate one TB 1487 * 1488 * Called with mmap_lock held in user-mode. 1489 */ 1490 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 1491 { 1492 if (page_addr == -1 && tb->page_addr[0] != -1) { 1493 page_lock_tb(tb); 1494 do_tb_phys_invalidate(tb, true); 1495 page_unlock_tb(tb); 1496 } else { 1497 do_tb_phys_invalidate(tb, false); 1498 } 1499 } 1500 1501 #ifdef CONFIG_SOFTMMU 1502 /* call with @p->lock held */ 1503 static void build_page_bitmap(PageDesc *p) 1504 { 1505 int n, tb_start, tb_end; 1506 TranslationBlock *tb; 1507 1508 assert_page_locked(p); 1509 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); 1510 1511 PAGE_FOR_EACH_TB(p, tb, n) { 1512 /* NOTE: this is subtle as a TB may span two physical pages */ 1513 if (n == 0) { 1514 /* NOTE: tb_end may be after the end of the page, but 1515 it is not a problem */ 1516 tb_start = tb->pc & ~TARGET_PAGE_MASK; 1517 tb_end = tb_start + tb->size; 1518 if (tb_end > TARGET_PAGE_SIZE) { 1519 tb_end = TARGET_PAGE_SIZE; 1520 } 1521 } else { 1522 tb_start = 0; 1523 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1524 } 1525 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); 1526 } 1527 } 1528 #endif 1529 1530 /* add the tb in the target page and protect it if necessary 1531 * 1532 * Called with mmap_lock held for user-mode emulation. 1533 * Called with @p->lock held in !user-mode. 1534 */ 1535 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, 1536 unsigned int n, tb_page_addr_t page_addr) 1537 { 1538 #ifndef CONFIG_USER_ONLY 1539 bool page_already_protected; 1540 #endif 1541 1542 assert_page_locked(p); 1543 1544 tb->page_addr[n] = page_addr; 1545 tb->page_next[n] = p->first_tb; 1546 #ifndef CONFIG_USER_ONLY 1547 page_already_protected = p->first_tb != (uintptr_t)NULL; 1548 #endif 1549 p->first_tb = (uintptr_t)tb | n; 1550 invalidate_page_bitmap(p); 1551 1552 #if defined(CONFIG_USER_ONLY) 1553 if (p->flags & PAGE_WRITE) { 1554 target_ulong addr; 1555 PageDesc *p2; 1556 int prot; 1557 1558 /* force the host page as non writable (writes will have a 1559 page fault + mprotect overhead) */ 1560 page_addr &= qemu_host_page_mask; 1561 prot = 0; 1562 for (addr = page_addr; addr < page_addr + qemu_host_page_size; 1563 addr += TARGET_PAGE_SIZE) { 1564 1565 p2 = page_find(addr >> TARGET_PAGE_BITS); 1566 if (!p2) { 1567 continue; 1568 } 1569 prot |= p2->flags; 1570 p2->flags &= ~PAGE_WRITE; 1571 } 1572 mprotect(g2h(page_addr), qemu_host_page_size, 1573 (prot & PAGE_BITS) & ~PAGE_WRITE); 1574 if (DEBUG_TB_INVALIDATE_GATE) { 1575 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); 1576 } 1577 } 1578 #else 1579 /* if some code is already present, then the pages are already 1580 protected. So we handle the case where only the first TB is 1581 allocated in a physical page */ 1582 if (!page_already_protected) { 1583 tlb_protect_code(page_addr); 1584 } 1585 #endif 1586 } 1587 1588 /* add a new TB and link it to the physical page tables. phys_page2 is 1589 * (-1) to indicate that only one page contains the TB. 1590 * 1591 * Called with mmap_lock held for user-mode emulation. 1592 * 1593 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. 1594 * Note that in !user-mode, another thread might have already added a TB 1595 * for the same block of guest code that @tb corresponds to. In that case, 1596 * the caller should discard the original @tb, and use instead the returned TB. 1597 */ 1598 static TranslationBlock * 1599 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, 1600 tb_page_addr_t phys_page2) 1601 { 1602 PageDesc *p; 1603 PageDesc *p2 = NULL; 1604 1605 assert_memory_lock(); 1606 1607 if (phys_pc == -1) { 1608 /* 1609 * If the TB is not associated with a physical RAM page then 1610 * it must be a temporary one-insn TB, and we have nothing to do 1611 * except fill in the page_addr[] fields. 1612 */ 1613 assert(tb->cflags & CF_NOCACHE); 1614 tb->page_addr[0] = tb->page_addr[1] = -1; 1615 return tb; 1616 } 1617 1618 /* 1619 * Add the TB to the page list, acquiring first the pages's locks. 1620 * We keep the locks held until after inserting the TB in the hash table, 1621 * so that if the insertion fails we know for sure that the TBs are still 1622 * in the page descriptors. 1623 * Note that inserting into the hash table first isn't an option, since 1624 * we can only insert TBs that are fully initialized. 1625 */ 1626 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); 1627 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); 1628 if (p2) { 1629 tb_page_add(p2, tb, 1, phys_page2); 1630 } else { 1631 tb->page_addr[1] = -1; 1632 } 1633 1634 if (!(tb->cflags & CF_NOCACHE)) { 1635 void *existing_tb = NULL; 1636 uint32_t h; 1637 1638 /* add in the hash table */ 1639 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, 1640 tb->trace_vcpu_dstate); 1641 qht_insert(&tb_ctx.htable, tb, h, &existing_tb); 1642 1643 /* remove TB from the page(s) if we couldn't insert it */ 1644 if (unlikely(existing_tb)) { 1645 tb_page_remove(p, tb); 1646 invalidate_page_bitmap(p); 1647 if (p2) { 1648 tb_page_remove(p2, tb); 1649 invalidate_page_bitmap(p2); 1650 } 1651 tb = existing_tb; 1652 } 1653 } 1654 1655 if (p2 && p2 != p) { 1656 page_unlock(p2); 1657 } 1658 page_unlock(p); 1659 1660 #ifdef CONFIG_USER_ONLY 1661 if (DEBUG_TB_CHECK_GATE) { 1662 tb_page_check(); 1663 } 1664 #endif 1665 return tb; 1666 } 1667 1668 /* Called with mmap_lock held for user mode emulation. */ 1669 TranslationBlock *tb_gen_code(CPUState *cpu, 1670 target_ulong pc, target_ulong cs_base, 1671 uint32_t flags, int cflags) 1672 { 1673 CPUArchState *env = cpu->env_ptr; 1674 TranslationBlock *tb, *existing_tb; 1675 tb_page_addr_t phys_pc, phys_page2; 1676 target_ulong virt_page2; 1677 tcg_insn_unit *gen_code_buf; 1678 int gen_code_size, search_size, max_insns; 1679 #ifdef CONFIG_PROFILER 1680 TCGProfile *prof = &tcg_ctx->prof; 1681 int64_t ti; 1682 #endif 1683 assert_memory_lock(); 1684 1685 phys_pc = get_page_addr_code(env, pc); 1686 1687 if (phys_pc == -1) { 1688 /* Generate a temporary TB with 1 insn in it */ 1689 cflags &= ~CF_COUNT_MASK; 1690 cflags |= CF_NOCACHE | 1; 1691 } 1692 1693 cflags &= ~CF_CLUSTER_MASK; 1694 cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT; 1695 1696 max_insns = cflags & CF_COUNT_MASK; 1697 if (max_insns == 0) { 1698 max_insns = CF_COUNT_MASK; 1699 } 1700 if (max_insns > TCG_MAX_INSNS) { 1701 max_insns = TCG_MAX_INSNS; 1702 } 1703 if (cpu->singlestep_enabled || singlestep) { 1704 max_insns = 1; 1705 } 1706 1707 buffer_overflow: 1708 tb = tb_alloc(pc); 1709 if (unlikely(!tb)) { 1710 /* flush must be done */ 1711 tb_flush(cpu); 1712 mmap_unlock(); 1713 /* Make the execution loop process the flush as soon as possible. */ 1714 cpu->exception_index = EXCP_INTERRUPT; 1715 cpu_loop_exit(cpu); 1716 } 1717 1718 gen_code_buf = tcg_ctx->code_gen_ptr; 1719 tb->tc.ptr = gen_code_buf; 1720 tb->pc = pc; 1721 tb->cs_base = cs_base; 1722 tb->flags = flags; 1723 tb->cflags = cflags; 1724 tb->trace_vcpu_dstate = *cpu->trace_dstate; 1725 tcg_ctx->tb_cflags = cflags; 1726 tb_overflow: 1727 1728 #ifdef CONFIG_PROFILER 1729 /* includes aborted translations because of exceptions */ 1730 atomic_set(&prof->tb_count1, prof->tb_count1 + 1); 1731 ti = profile_getclock(); 1732 #endif 1733 1734 tcg_func_start(tcg_ctx); 1735 1736 tcg_ctx->cpu = env_cpu(env); 1737 gen_intermediate_code(cpu, tb, max_insns); 1738 tcg_ctx->cpu = NULL; 1739 1740 trace_translate_block(tb, tb->pc, tb->tc.ptr); 1741 1742 /* generate machine code */ 1743 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; 1744 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; 1745 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; 1746 if (TCG_TARGET_HAS_direct_jump) { 1747 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; 1748 tcg_ctx->tb_jmp_target_addr = NULL; 1749 } else { 1750 tcg_ctx->tb_jmp_insn_offset = NULL; 1751 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; 1752 } 1753 1754 #ifdef CONFIG_PROFILER 1755 atomic_set(&prof->tb_count, prof->tb_count + 1); 1756 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti); 1757 ti = profile_getclock(); 1758 #endif 1759 1760 gen_code_size = tcg_gen_code(tcg_ctx, tb); 1761 if (unlikely(gen_code_size < 0)) { 1762 switch (gen_code_size) { 1763 case -1: 1764 /* 1765 * Overflow of code_gen_buffer, or the current slice of it. 1766 * 1767 * TODO: We don't need to re-do gen_intermediate_code, nor 1768 * should we re-do the tcg optimization currently hidden 1769 * inside tcg_gen_code. All that should be required is to 1770 * flush the TBs, allocate a new TB, re-initialize it per 1771 * above, and re-do the actual code generation. 1772 */ 1773 goto buffer_overflow; 1774 1775 case -2: 1776 /* 1777 * The code generated for the TranslationBlock is too large. 1778 * The maximum size allowed by the unwind info is 64k. 1779 * There may be stricter constraints from relocations 1780 * in the tcg backend. 1781 * 1782 * Try again with half as many insns as we attempted this time. 1783 * If a single insn overflows, there's a bug somewhere... 1784 */ 1785 max_insns = tb->icount; 1786 assert(max_insns > 1); 1787 max_insns /= 2; 1788 goto tb_overflow; 1789 1790 default: 1791 g_assert_not_reached(); 1792 } 1793 } 1794 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); 1795 if (unlikely(search_size < 0)) { 1796 goto buffer_overflow; 1797 } 1798 tb->tc.size = gen_code_size; 1799 1800 #ifdef CONFIG_PROFILER 1801 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); 1802 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size); 1803 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); 1804 atomic_set(&prof->search_out_len, prof->search_out_len + search_size); 1805 #endif 1806 1807 #ifdef DEBUG_DISAS 1808 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && 1809 qemu_log_in_addr_range(tb->pc)) { 1810 qemu_log_lock(); 1811 qemu_log("OUT: [size=%d]\n", gen_code_size); 1812 if (tcg_ctx->data_gen_ptr) { 1813 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr; 1814 size_t data_size = gen_code_size - code_size; 1815 size_t i; 1816 1817 log_disas(tb->tc.ptr, code_size); 1818 1819 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { 1820 if (sizeof(tcg_target_ulong) == 8) { 1821 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", 1822 (uintptr_t)tcg_ctx->data_gen_ptr + i, 1823 *(uint64_t *)(tcg_ctx->data_gen_ptr + i)); 1824 } else { 1825 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n", 1826 (uintptr_t)tcg_ctx->data_gen_ptr + i, 1827 *(uint32_t *)(tcg_ctx->data_gen_ptr + i)); 1828 } 1829 } 1830 } else { 1831 log_disas(tb->tc.ptr, gen_code_size); 1832 } 1833 qemu_log("\n"); 1834 qemu_log_flush(); 1835 qemu_log_unlock(); 1836 } 1837 #endif 1838 1839 atomic_set(&tcg_ctx->code_gen_ptr, (void *) 1840 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, 1841 CODE_GEN_ALIGN)); 1842 1843 /* init jump list */ 1844 qemu_spin_init(&tb->jmp_lock); 1845 tb->jmp_list_head = (uintptr_t)NULL; 1846 tb->jmp_list_next[0] = (uintptr_t)NULL; 1847 tb->jmp_list_next[1] = (uintptr_t)NULL; 1848 tb->jmp_dest[0] = (uintptr_t)NULL; 1849 tb->jmp_dest[1] = (uintptr_t)NULL; 1850 1851 /* init original jump addresses which have been set during tcg_gen_code() */ 1852 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 1853 tb_reset_jump(tb, 0); 1854 } 1855 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 1856 tb_reset_jump(tb, 1); 1857 } 1858 1859 /* check next page if needed */ 1860 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; 1861 phys_page2 = -1; 1862 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 1863 phys_page2 = get_page_addr_code(env, virt_page2); 1864 } 1865 /* 1866 * No explicit memory barrier is required -- tb_link_page() makes the 1867 * TB visible in a consistent state. 1868 */ 1869 existing_tb = tb_link_page(tb, phys_pc, phys_page2); 1870 /* if the TB already exists, discard what we just translated */ 1871 if (unlikely(existing_tb != tb)) { 1872 uintptr_t orig_aligned = (uintptr_t)gen_code_buf; 1873 1874 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); 1875 atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); 1876 return existing_tb; 1877 } 1878 tcg_tb_insert(tb); 1879 return tb; 1880 } 1881 1882 /* 1883 * @p must be non-NULL. 1884 * user-mode: call with mmap_lock held. 1885 * !user-mode: call with all @pages locked. 1886 */ 1887 static void 1888 tb_invalidate_phys_page_range__locked(struct page_collection *pages, 1889 PageDesc *p, tb_page_addr_t start, 1890 tb_page_addr_t end, 1891 int is_cpu_write_access) 1892 { 1893 TranslationBlock *tb; 1894 tb_page_addr_t tb_start, tb_end; 1895 int n; 1896 #ifdef TARGET_HAS_PRECISE_SMC 1897 CPUState *cpu = current_cpu; 1898 CPUArchState *env = NULL; 1899 int current_tb_not_found = is_cpu_write_access; 1900 TranslationBlock *current_tb = NULL; 1901 int current_tb_modified = 0; 1902 target_ulong current_pc = 0; 1903 target_ulong current_cs_base = 0; 1904 uint32_t current_flags = 0; 1905 #endif /* TARGET_HAS_PRECISE_SMC */ 1906 1907 assert_page_locked(p); 1908 1909 #if defined(TARGET_HAS_PRECISE_SMC) 1910 if (cpu != NULL) { 1911 env = cpu->env_ptr; 1912 } 1913 #endif 1914 1915 /* we remove all the TBs in the range [start, end[ */ 1916 /* XXX: see if in some cases it could be faster to invalidate all 1917 the code */ 1918 PAGE_FOR_EACH_TB(p, tb, n) { 1919 assert_page_locked(p); 1920 /* NOTE: this is subtle as a TB may span two physical pages */ 1921 if (n == 0) { 1922 /* NOTE: tb_end may be after the end of the page, but 1923 it is not a problem */ 1924 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1925 tb_end = tb_start + tb->size; 1926 } else { 1927 tb_start = tb->page_addr[1]; 1928 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1929 } 1930 if (!(tb_end <= start || tb_start >= end)) { 1931 #ifdef TARGET_HAS_PRECISE_SMC 1932 if (current_tb_not_found) { 1933 current_tb_not_found = 0; 1934 current_tb = NULL; 1935 if (cpu->mem_io_pc) { 1936 /* now we have a real cpu fault */ 1937 current_tb = tcg_tb_lookup(cpu->mem_io_pc); 1938 } 1939 } 1940 if (current_tb == tb && 1941 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { 1942 /* If we are modifying the current TB, we must stop 1943 its execution. We could be more precise by checking 1944 that the modification is after the current PC, but it 1945 would require a specialized function to partially 1946 restore the CPU state */ 1947 1948 current_tb_modified = 1; 1949 cpu_restore_state_from_tb(cpu, current_tb, 1950 cpu->mem_io_pc, true); 1951 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1952 ¤t_flags); 1953 } 1954 #endif /* TARGET_HAS_PRECISE_SMC */ 1955 tb_phys_invalidate__locked(tb); 1956 } 1957 } 1958 #if !defined(CONFIG_USER_ONLY) 1959 /* if no code remaining, no need to continue to use slow writes */ 1960 if (!p->first_tb) { 1961 invalidate_page_bitmap(p); 1962 tlb_unprotect_code(start); 1963 } 1964 #endif 1965 #ifdef TARGET_HAS_PRECISE_SMC 1966 if (current_tb_modified) { 1967 page_collection_unlock(pages); 1968 /* Force execution of one insn next time. */ 1969 cpu->cflags_next_tb = 1 | curr_cflags(); 1970 mmap_unlock(); 1971 cpu_loop_exit_noexc(cpu); 1972 } 1973 #endif 1974 } 1975 1976 /* 1977 * Invalidate all TBs which intersect with the target physical address range 1978 * [start;end[. NOTE: start and end must refer to the *same* physical page. 1979 * 'is_cpu_write_access' should be true if called from a real cpu write 1980 * access: the virtual CPU will exit the current TB if code is modified inside 1981 * this TB. 1982 * 1983 * Called with mmap_lock held for user-mode emulation 1984 */ 1985 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 1986 int is_cpu_write_access) 1987 { 1988 struct page_collection *pages; 1989 PageDesc *p; 1990 1991 assert_memory_lock(); 1992 1993 p = page_find(start >> TARGET_PAGE_BITS); 1994 if (p == NULL) { 1995 return; 1996 } 1997 pages = page_collection_lock(start, end); 1998 tb_invalidate_phys_page_range__locked(pages, p, start, end, 1999 is_cpu_write_access); 2000 page_collection_unlock(pages); 2001 } 2002 2003 /* 2004 * Invalidate all TBs which intersect with the target physical address range 2005 * [start;end[. NOTE: start and end may refer to *different* physical pages. 2006 * 'is_cpu_write_access' should be true if called from a real cpu write 2007 * access: the virtual CPU will exit the current TB if code is modified inside 2008 * this TB. 2009 * 2010 * Called with mmap_lock held for user-mode emulation. 2011 */ 2012 #ifdef CONFIG_SOFTMMU 2013 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end) 2014 #else 2015 void tb_invalidate_phys_range(target_ulong start, target_ulong end) 2016 #endif 2017 { 2018 struct page_collection *pages; 2019 tb_page_addr_t next; 2020 2021 assert_memory_lock(); 2022 2023 pages = page_collection_lock(start, end); 2024 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 2025 start < end; 2026 start = next, next += TARGET_PAGE_SIZE) { 2027 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); 2028 tb_page_addr_t bound = MIN(next, end); 2029 2030 if (pd == NULL) { 2031 continue; 2032 } 2033 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); 2034 } 2035 page_collection_unlock(pages); 2036 } 2037 2038 #ifdef CONFIG_SOFTMMU 2039 /* len must be <= 8 and start must be a multiple of len. 2040 * Called via softmmu_template.h when code areas are written to with 2041 * iothread mutex not held. 2042 * 2043 * Call with all @pages in the range [@start, @start + len[ locked. 2044 */ 2045 void tb_invalidate_phys_page_fast(struct page_collection *pages, 2046 tb_page_addr_t start, int len) 2047 { 2048 PageDesc *p; 2049 2050 assert_memory_lock(); 2051 2052 p = page_find(start >> TARGET_PAGE_BITS); 2053 if (!p) { 2054 return; 2055 } 2056 2057 assert_page_locked(p); 2058 if (!p->code_bitmap && 2059 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { 2060 build_page_bitmap(p); 2061 } 2062 if (p->code_bitmap) { 2063 unsigned int nr; 2064 unsigned long b; 2065 2066 nr = start & ~TARGET_PAGE_MASK; 2067 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); 2068 if (b & ((1 << len) - 1)) { 2069 goto do_invalidate; 2070 } 2071 } else { 2072 do_invalidate: 2073 tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1); 2074 } 2075 } 2076 #else 2077 /* Called with mmap_lock held. If pc is not 0 then it indicates the 2078 * host PC of the faulting store instruction that caused this invalidate. 2079 * Returns true if the caller needs to abort execution of the current 2080 * TB (because it was modified by this store and the guest CPU has 2081 * precise-SMC semantics). 2082 */ 2083 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) 2084 { 2085 TranslationBlock *tb; 2086 PageDesc *p; 2087 int n; 2088 #ifdef TARGET_HAS_PRECISE_SMC 2089 TranslationBlock *current_tb = NULL; 2090 CPUState *cpu = current_cpu; 2091 CPUArchState *env = NULL; 2092 int current_tb_modified = 0; 2093 target_ulong current_pc = 0; 2094 target_ulong current_cs_base = 0; 2095 uint32_t current_flags = 0; 2096 #endif 2097 2098 assert_memory_lock(); 2099 2100 addr &= TARGET_PAGE_MASK; 2101 p = page_find(addr >> TARGET_PAGE_BITS); 2102 if (!p) { 2103 return false; 2104 } 2105 2106 #ifdef TARGET_HAS_PRECISE_SMC 2107 if (p->first_tb && pc != 0) { 2108 current_tb = tcg_tb_lookup(pc); 2109 } 2110 if (cpu != NULL) { 2111 env = cpu->env_ptr; 2112 } 2113 #endif 2114 assert_page_locked(p); 2115 PAGE_FOR_EACH_TB(p, tb, n) { 2116 #ifdef TARGET_HAS_PRECISE_SMC 2117 if (current_tb == tb && 2118 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { 2119 /* If we are modifying the current TB, we must stop 2120 its execution. We could be more precise by checking 2121 that the modification is after the current PC, but it 2122 would require a specialized function to partially 2123 restore the CPU state */ 2124 2125 current_tb_modified = 1; 2126 cpu_restore_state_from_tb(cpu, current_tb, pc, true); 2127 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 2128 ¤t_flags); 2129 } 2130 #endif /* TARGET_HAS_PRECISE_SMC */ 2131 tb_phys_invalidate(tb, addr); 2132 } 2133 p->first_tb = (uintptr_t)NULL; 2134 #ifdef TARGET_HAS_PRECISE_SMC 2135 if (current_tb_modified) { 2136 /* Force execution of one insn next time. */ 2137 cpu->cflags_next_tb = 1 | curr_cflags(); 2138 return true; 2139 } 2140 #endif 2141 2142 return false; 2143 } 2144 #endif 2145 2146 /* user-mode: call with mmap_lock held */ 2147 void tb_check_watchpoint(CPUState *cpu) 2148 { 2149 TranslationBlock *tb; 2150 2151 assert_memory_lock(); 2152 2153 tb = tcg_tb_lookup(cpu->mem_io_pc); 2154 if (tb) { 2155 /* We can use retranslation to find the PC. */ 2156 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true); 2157 tb_phys_invalidate(tb, -1); 2158 } else { 2159 /* The exception probably happened in a helper. The CPU state should 2160 have been saved before calling it. Fetch the PC from there. */ 2161 CPUArchState *env = cpu->env_ptr; 2162 target_ulong pc, cs_base; 2163 tb_page_addr_t addr; 2164 uint32_t flags; 2165 2166 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 2167 addr = get_page_addr_code(env, pc); 2168 if (addr != -1) { 2169 tb_invalidate_phys_range(addr, addr + 1); 2170 } 2171 } 2172 } 2173 2174 #ifndef CONFIG_USER_ONLY 2175 /* in deterministic execution mode, instructions doing device I/Os 2176 * must be at the end of the TB. 2177 * 2178 * Called by softmmu_template.h, with iothread mutex not held. 2179 */ 2180 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) 2181 { 2182 #if defined(TARGET_MIPS) || defined(TARGET_SH4) 2183 CPUArchState *env = cpu->env_ptr; 2184 #endif 2185 TranslationBlock *tb; 2186 uint32_t n; 2187 2188 tb = tcg_tb_lookup(retaddr); 2189 if (!tb) { 2190 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", 2191 (void *)retaddr); 2192 } 2193 cpu_restore_state_from_tb(cpu, tb, retaddr, true); 2194 2195 /* On MIPS and SH, delay slot instructions can only be restarted if 2196 they were already the first instruction in the TB. If this is not 2197 the first instruction in a TB then re-execute the preceding 2198 branch. */ 2199 n = 1; 2200 #if defined(TARGET_MIPS) 2201 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 2202 && env->active_tc.PC != tb->pc) { 2203 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); 2204 cpu_neg(cpu)->icount_decr.u16.low++; 2205 env->hflags &= ~MIPS_HFLAG_BMASK; 2206 n = 2; 2207 } 2208 #elif defined(TARGET_SH4) 2209 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 2210 && env->pc != tb->pc) { 2211 env->pc -= 2; 2212 cpu_neg(cpu)->icount_decr.u16.low++; 2213 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 2214 n = 2; 2215 } 2216 #endif 2217 2218 /* Generate a new TB executing the I/O insn. */ 2219 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n; 2220 2221 if (tb_cflags(tb) & CF_NOCACHE) { 2222 if (tb->orig_tb) { 2223 /* Invalidate original TB if this TB was generated in 2224 * cpu_exec_nocache() */ 2225 tb_phys_invalidate(tb->orig_tb, -1); 2226 } 2227 tcg_tb_remove(tb); 2228 } 2229 2230 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not 2231 * the first in the TB) then we end up generating a whole new TB and 2232 * repeating the fault, which is horribly inefficient. 2233 * Better would be to execute just this insn uncached, or generate a 2234 * second new TB. 2235 */ 2236 cpu_loop_exit_noexc(cpu); 2237 } 2238 2239 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 2240 { 2241 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); 2242 2243 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 2244 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); 2245 } 2246 } 2247 2248 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) 2249 { 2250 /* Discard jump cache entries for any tb which might potentially 2251 overlap the flushed page. */ 2252 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 2253 tb_jmp_cache_clear_page(cpu, addr); 2254 } 2255 2256 static void print_qht_statistics(struct qht_stats hst) 2257 { 2258 uint32_t hgram_opts; 2259 size_t hgram_bins; 2260 char *hgram; 2261 2262 if (!hst.head_buckets) { 2263 return; 2264 } 2265 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", 2266 hst.used_head_buckets, hst.head_buckets, 2267 (double)hst.used_head_buckets / hst.head_buckets * 100); 2268 2269 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 2270 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; 2271 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { 2272 hgram_opts |= QDIST_PR_NODECIMAL; 2273 } 2274 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); 2275 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", 2276 qdist_avg(&hst.occupancy) * 100, hgram); 2277 g_free(hgram); 2278 2279 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 2280 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); 2281 if (hgram_bins > 10) { 2282 hgram_bins = 10; 2283 } else { 2284 hgram_bins = 0; 2285 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; 2286 } 2287 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); 2288 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n", 2289 qdist_avg(&hst.chain), hgram); 2290 g_free(hgram); 2291 } 2292 2293 struct tb_tree_stats { 2294 size_t nb_tbs; 2295 size_t host_size; 2296 size_t target_size; 2297 size_t max_target_size; 2298 size_t direct_jmp_count; 2299 size_t direct_jmp2_count; 2300 size_t cross_page; 2301 }; 2302 2303 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) 2304 { 2305 const TranslationBlock *tb = value; 2306 struct tb_tree_stats *tst = data; 2307 2308 tst->nb_tbs++; 2309 tst->host_size += tb->tc.size; 2310 tst->target_size += tb->size; 2311 if (tb->size > tst->max_target_size) { 2312 tst->max_target_size = tb->size; 2313 } 2314 if (tb->page_addr[1] != -1) { 2315 tst->cross_page++; 2316 } 2317 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 2318 tst->direct_jmp_count++; 2319 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 2320 tst->direct_jmp2_count++; 2321 } 2322 } 2323 return false; 2324 } 2325 2326 void dump_exec_info(void) 2327 { 2328 struct tb_tree_stats tst = {}; 2329 struct qht_stats hst; 2330 size_t nb_tbs, flush_full, flush_part, flush_elide; 2331 2332 tcg_tb_foreach(tb_tree_stats_iter, &tst); 2333 nb_tbs = tst.nb_tbs; 2334 /* XXX: avoid using doubles ? */ 2335 qemu_printf("Translation buffer state:\n"); 2336 /* 2337 * Report total code size including the padding and TB structs; 2338 * otherwise users might think "-tb-size" is not honoured. 2339 * For avg host size we use the precise numbers from tb_tree_stats though. 2340 */ 2341 qemu_printf("gen code size %zu/%zu\n", 2342 tcg_code_size(), tcg_code_capacity()); 2343 qemu_printf("TB count %zu\n", nb_tbs); 2344 qemu_printf("TB avg target size %zu max=%zu bytes\n", 2345 nb_tbs ? tst.target_size / nb_tbs : 0, 2346 tst.max_target_size); 2347 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n", 2348 nb_tbs ? tst.host_size / nb_tbs : 0, 2349 tst.target_size ? (double)tst.host_size / tst.target_size : 0); 2350 qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page, 2351 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); 2352 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n", 2353 tst.direct_jmp_count, 2354 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, 2355 tst.direct_jmp2_count, 2356 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); 2357 2358 qht_statistics_init(&tb_ctx.htable, &hst); 2359 print_qht_statistics(hst); 2360 qht_statistics_destroy(&hst); 2361 2362 qemu_printf("\nStatistics:\n"); 2363 qemu_printf("TB flush count %u\n", 2364 atomic_read(&tb_ctx.tb_flush_count)); 2365 qemu_printf("TB invalidate count %zu\n", 2366 tcg_tb_phys_invalidate_count()); 2367 2368 tlb_flush_counts(&flush_full, &flush_part, &flush_elide); 2369 qemu_printf("TLB full flushes %zu\n", flush_full); 2370 qemu_printf("TLB partial flushes %zu\n", flush_part); 2371 qemu_printf("TLB elided flushes %zu\n", flush_elide); 2372 tcg_dump_info(); 2373 } 2374 2375 void dump_opcount_info(void) 2376 { 2377 tcg_dump_op_count(); 2378 } 2379 2380 #else /* CONFIG_USER_ONLY */ 2381 2382 void cpu_interrupt(CPUState *cpu, int mask) 2383 { 2384 g_assert(qemu_mutex_iothread_locked()); 2385 cpu->interrupt_request |= mask; 2386 atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); 2387 } 2388 2389 /* 2390 * Walks guest process memory "regions" one by one 2391 * and calls callback function 'fn' for each region. 2392 */ 2393 struct walk_memory_regions_data { 2394 walk_memory_regions_fn fn; 2395 void *priv; 2396 target_ulong start; 2397 int prot; 2398 }; 2399 2400 static int walk_memory_regions_end(struct walk_memory_regions_data *data, 2401 target_ulong end, int new_prot) 2402 { 2403 if (data->start != -1u) { 2404 int rc = data->fn(data->priv, data->start, end, data->prot); 2405 if (rc != 0) { 2406 return rc; 2407 } 2408 } 2409 2410 data->start = (new_prot ? end : -1u); 2411 data->prot = new_prot; 2412 2413 return 0; 2414 } 2415 2416 static int walk_memory_regions_1(struct walk_memory_regions_data *data, 2417 target_ulong base, int level, void **lp) 2418 { 2419 target_ulong pa; 2420 int i, rc; 2421 2422 if (*lp == NULL) { 2423 return walk_memory_regions_end(data, base, 0); 2424 } 2425 2426 if (level == 0) { 2427 PageDesc *pd = *lp; 2428 2429 for (i = 0; i < V_L2_SIZE; ++i) { 2430 int prot = pd[i].flags; 2431 2432 pa = base | (i << TARGET_PAGE_BITS); 2433 if (prot != data->prot) { 2434 rc = walk_memory_regions_end(data, pa, prot); 2435 if (rc != 0) { 2436 return rc; 2437 } 2438 } 2439 } 2440 } else { 2441 void **pp = *lp; 2442 2443 for (i = 0; i < V_L2_SIZE; ++i) { 2444 pa = base | ((target_ulong)i << 2445 (TARGET_PAGE_BITS + V_L2_BITS * level)); 2446 rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 2447 if (rc != 0) { 2448 return rc; 2449 } 2450 } 2451 } 2452 2453 return 0; 2454 } 2455 2456 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 2457 { 2458 struct walk_memory_regions_data data; 2459 uintptr_t i, l1_sz = v_l1_size; 2460 2461 data.fn = fn; 2462 data.priv = priv; 2463 data.start = -1u; 2464 data.prot = 0; 2465 2466 for (i = 0; i < l1_sz; i++) { 2467 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); 2468 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); 2469 if (rc != 0) { 2470 return rc; 2471 } 2472 } 2473 2474 return walk_memory_regions_end(&data, 0, 0); 2475 } 2476 2477 static int dump_region(void *priv, target_ulong start, 2478 target_ulong end, unsigned long prot) 2479 { 2480 FILE *f = (FILE *)priv; 2481 2482 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx 2483 " "TARGET_FMT_lx" %c%c%c\n", 2484 start, end, end - start, 2485 ((prot & PAGE_READ) ? 'r' : '-'), 2486 ((prot & PAGE_WRITE) ? 'w' : '-'), 2487 ((prot & PAGE_EXEC) ? 'x' : '-')); 2488 2489 return 0; 2490 } 2491 2492 /* dump memory mappings */ 2493 void page_dump(FILE *f) 2494 { 2495 const int length = sizeof(target_ulong) * 2; 2496 (void) fprintf(f, "%-*s %-*s %-*s %s\n", 2497 length, "start", length, "end", length, "size", "prot"); 2498 walk_memory_regions(f, dump_region); 2499 } 2500 2501 int page_get_flags(target_ulong address) 2502 { 2503 PageDesc *p; 2504 2505 p = page_find(address >> TARGET_PAGE_BITS); 2506 if (!p) { 2507 return 0; 2508 } 2509 return p->flags; 2510 } 2511 2512 /* Modify the flags of a page and invalidate the code if necessary. 2513 The flag PAGE_WRITE_ORG is positioned automatically depending 2514 on PAGE_WRITE. The mmap_lock should already be held. */ 2515 void page_set_flags(target_ulong start, target_ulong end, int flags) 2516 { 2517 target_ulong addr, len; 2518 2519 /* This function should never be called with addresses outside the 2520 guest address space. If this assert fires, it probably indicates 2521 a missing call to h2g_valid. */ 2522 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2523 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2524 #endif 2525 assert(start < end); 2526 assert_memory_lock(); 2527 2528 start = start & TARGET_PAGE_MASK; 2529 end = TARGET_PAGE_ALIGN(end); 2530 2531 if (flags & PAGE_WRITE) { 2532 flags |= PAGE_WRITE_ORG; 2533 } 2534 2535 for (addr = start, len = end - start; 2536 len != 0; 2537 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2538 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2539 2540 /* If the write protection bit is set, then we invalidate 2541 the code inside. */ 2542 if (!(p->flags & PAGE_WRITE) && 2543 (flags & PAGE_WRITE) && 2544 p->first_tb) { 2545 tb_invalidate_phys_page(addr, 0); 2546 } 2547 p->flags = flags; 2548 } 2549 } 2550 2551 int page_check_range(target_ulong start, target_ulong len, int flags) 2552 { 2553 PageDesc *p; 2554 target_ulong end; 2555 target_ulong addr; 2556 2557 /* This function should never be called with addresses outside the 2558 guest address space. If this assert fires, it probably indicates 2559 a missing call to h2g_valid. */ 2560 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2561 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2562 #endif 2563 2564 if (len == 0) { 2565 return 0; 2566 } 2567 if (start + len - 1 < start) { 2568 /* We've wrapped around. */ 2569 return -1; 2570 } 2571 2572 /* must do before we loose bits in the next step */ 2573 end = TARGET_PAGE_ALIGN(start + len); 2574 start = start & TARGET_PAGE_MASK; 2575 2576 for (addr = start, len = end - start; 2577 len != 0; 2578 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2579 p = page_find(addr >> TARGET_PAGE_BITS); 2580 if (!p) { 2581 return -1; 2582 } 2583 if (!(p->flags & PAGE_VALID)) { 2584 return -1; 2585 } 2586 2587 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { 2588 return -1; 2589 } 2590 if (flags & PAGE_WRITE) { 2591 if (!(p->flags & PAGE_WRITE_ORG)) { 2592 return -1; 2593 } 2594 /* unprotect the page if it was put read-only because it 2595 contains translated code */ 2596 if (!(p->flags & PAGE_WRITE)) { 2597 if (!page_unprotect(addr, 0)) { 2598 return -1; 2599 } 2600 } 2601 } 2602 } 2603 return 0; 2604 } 2605 2606 /* called from signal handler: invalidate the code and unprotect the 2607 * page. Return 0 if the fault was not handled, 1 if it was handled, 2608 * and 2 if it was handled but the caller must cause the TB to be 2609 * immediately exited. (We can only return 2 if the 'pc' argument is 2610 * non-zero.) 2611 */ 2612 int page_unprotect(target_ulong address, uintptr_t pc) 2613 { 2614 unsigned int prot; 2615 bool current_tb_invalidated; 2616 PageDesc *p; 2617 target_ulong host_start, host_end, addr; 2618 2619 /* Technically this isn't safe inside a signal handler. However we 2620 know this only ever happens in a synchronous SEGV handler, so in 2621 practice it seems to be ok. */ 2622 mmap_lock(); 2623 2624 p = page_find(address >> TARGET_PAGE_BITS); 2625 if (!p) { 2626 mmap_unlock(); 2627 return 0; 2628 } 2629 2630 /* if the page was really writable, then we change its 2631 protection back to writable */ 2632 if (p->flags & PAGE_WRITE_ORG) { 2633 current_tb_invalidated = false; 2634 if (p->flags & PAGE_WRITE) { 2635 /* If the page is actually marked WRITE then assume this is because 2636 * this thread raced with another one which got here first and 2637 * set the page to PAGE_WRITE and did the TB invalidate for us. 2638 */ 2639 #ifdef TARGET_HAS_PRECISE_SMC 2640 TranslationBlock *current_tb = tcg_tb_lookup(pc); 2641 if (current_tb) { 2642 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; 2643 } 2644 #endif 2645 } else { 2646 host_start = address & qemu_host_page_mask; 2647 host_end = host_start + qemu_host_page_size; 2648 2649 prot = 0; 2650 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) { 2651 p = page_find(addr >> TARGET_PAGE_BITS); 2652 p->flags |= PAGE_WRITE; 2653 prot |= p->flags; 2654 2655 /* and since the content will be modified, we must invalidate 2656 the corresponding translated code. */ 2657 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); 2658 #ifdef CONFIG_USER_ONLY 2659 if (DEBUG_TB_CHECK_GATE) { 2660 tb_invalidate_check(addr); 2661 } 2662 #endif 2663 } 2664 mprotect((void *)g2h(host_start), qemu_host_page_size, 2665 prot & PAGE_BITS); 2666 } 2667 mmap_unlock(); 2668 /* If current TB was invalidated return to main loop */ 2669 return current_tb_invalidated ? 2 : 1; 2670 } 2671 mmap_unlock(); 2672 return 0; 2673 } 2674 #endif /* CONFIG_USER_ONLY */ 2675 2676 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ 2677 void tcg_flush_softmmu_tlb(CPUState *cs) 2678 { 2679 #ifdef CONFIG_SOFTMMU 2680 tlb_flush(cs); 2681 #endif 2682 } 2683