1 /* 2 * Host code generation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifdef _WIN32 20 #include <windows.h> 21 #endif 22 #include "qemu/osdep.h" 23 24 25 #include "qemu-common.h" 26 #define NO_CPU_IO_DEFS 27 #include "cpu.h" 28 #include "trace.h" 29 #include "disas/disas.h" 30 #include "exec/exec-all.h" 31 #include "tcg.h" 32 #if defined(CONFIG_USER_ONLY) 33 #include "qemu.h" 34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 35 #include <sys/param.h> 36 #if __FreeBSD_version >= 700104 37 #define HAVE_KINFO_GETVMMAP 38 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 39 #include <sys/proc.h> 40 #include <machine/profile.h> 41 #define _KERNEL 42 #include <sys/user.h> 43 #undef _KERNEL 44 #undef sigqueue 45 #include <libutil.h> 46 #endif 47 #endif 48 #else 49 #include "exec/address-spaces.h" 50 #endif 51 52 #include "exec/cputlb.h" 53 #include "exec/tb-hash.h" 54 #include "translate-all.h" 55 #include "qemu/bitmap.h" 56 #include "qemu/error-report.h" 57 #include "qemu/timer.h" 58 #include "qemu/main-loop.h" 59 #include "exec/log.h" 60 #include "sysemu/cpus.h" 61 62 /* #define DEBUG_TB_INVALIDATE */ 63 /* #define DEBUG_TB_FLUSH */ 64 /* make various TB consistency checks */ 65 /* #define DEBUG_TB_CHECK */ 66 67 #ifdef DEBUG_TB_INVALIDATE 68 #define DEBUG_TB_INVALIDATE_GATE 1 69 #else 70 #define DEBUG_TB_INVALIDATE_GATE 0 71 #endif 72 73 #ifdef DEBUG_TB_FLUSH 74 #define DEBUG_TB_FLUSH_GATE 1 75 #else 76 #define DEBUG_TB_FLUSH_GATE 0 77 #endif 78 79 #if !defined(CONFIG_USER_ONLY) 80 /* TB consistency checks only implemented for usermode emulation. */ 81 #undef DEBUG_TB_CHECK 82 #endif 83 84 #ifdef DEBUG_TB_CHECK 85 #define DEBUG_TB_CHECK_GATE 1 86 #else 87 #define DEBUG_TB_CHECK_GATE 0 88 #endif 89 90 /* Access to the various translations structures need to be serialised via locks 91 * for consistency. 92 * In user-mode emulation access to the memory related structures are protected 93 * with mmap_lock. 94 * In !user-mode we use per-page locks. 95 */ 96 #ifdef CONFIG_SOFTMMU 97 #define assert_memory_lock() 98 #else 99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) 100 #endif 101 102 #define SMC_BITMAP_USE_THRESHOLD 10 103 104 typedef struct PageDesc { 105 /* list of TBs intersecting this ram page */ 106 uintptr_t first_tb; 107 #ifdef CONFIG_SOFTMMU 108 /* in order to optimize self modifying code, we count the number 109 of lookups we do to a given page to use a bitmap */ 110 unsigned long *code_bitmap; 111 unsigned int code_write_count; 112 #else 113 unsigned long flags; 114 #endif 115 #ifndef CONFIG_USER_ONLY 116 QemuSpin lock; 117 #endif 118 } PageDesc; 119 120 /** 121 * struct page_entry - page descriptor entry 122 * @pd: pointer to the &struct PageDesc of the page this entry represents 123 * @index: page index of the page 124 * @locked: whether the page is locked 125 * 126 * This struct helps us keep track of the locked state of a page, without 127 * bloating &struct PageDesc. 128 * 129 * A page lock protects accesses to all fields of &struct PageDesc. 130 * 131 * See also: &struct page_collection. 132 */ 133 struct page_entry { 134 PageDesc *pd; 135 tb_page_addr_t index; 136 bool locked; 137 }; 138 139 /** 140 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's) 141 * @tree: Binary search tree (BST) of the pages, with key == page index 142 * @max: Pointer to the page in @tree with the highest page index 143 * 144 * To avoid deadlock we lock pages in ascending order of page index. 145 * When operating on a set of pages, we need to keep track of them so that 146 * we can lock them in order and also unlock them later. For this we collect 147 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the 148 * @tree implementation we use does not provide an O(1) operation to obtain the 149 * highest-ranked element, we use @max to keep track of the inserted page 150 * with the highest index. This is valuable because if a page is not in 151 * the tree and its index is higher than @max's, then we can lock it 152 * without breaking the locking order rule. 153 * 154 * Note on naming: 'struct page_set' would be shorter, but we already have a few 155 * page_set_*() helpers, so page_collection is used instead to avoid confusion. 156 * 157 * See also: page_collection_lock(). 158 */ 159 struct page_collection { 160 GTree *tree; 161 struct page_entry *max; 162 }; 163 164 /* list iterators for lists of tagged pointers in TranslationBlock */ 165 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \ 166 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ 167 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ 168 tb = (TranslationBlock *)((uintptr_t)tb & ~1)) 169 170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ 171 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) 172 173 #define TB_FOR_EACH_JMP(head_tb, tb, n) \ 174 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) 175 176 /* In system mode we want L1_MAP to be based on ram offsets, 177 while in user mode we want it to be based on virtual addresses. */ 178 #if !defined(CONFIG_USER_ONLY) 179 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 180 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 181 #else 182 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 183 #endif 184 #else 185 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS 186 #endif 187 188 /* Size of the L2 (and L3, etc) page tables. */ 189 #define V_L2_BITS 10 190 #define V_L2_SIZE (1 << V_L2_BITS) 191 192 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ 193 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > 194 sizeof_field(TranslationBlock, trace_vcpu_dstate) 195 * BITS_PER_BYTE); 196 197 /* 198 * L1 Mapping properties 199 */ 200 static int v_l1_size; 201 static int v_l1_shift; 202 static int v_l2_levels; 203 204 /* The bottom level has pointers to PageDesc, and is indexed by 205 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. 206 */ 207 #define V_L1_MIN_BITS 4 208 #define V_L1_MAX_BITS (V_L2_BITS + 3) 209 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) 210 211 static void *l1_map[V_L1_MAX_SIZE]; 212 213 /* code generation context */ 214 TCGContext tcg_init_ctx; 215 __thread TCGContext *tcg_ctx; 216 TBContext tb_ctx; 217 bool parallel_cpus; 218 219 static void page_table_config_init(void) 220 { 221 uint32_t v_l1_bits; 222 223 assert(TARGET_PAGE_BITS); 224 /* The bits remaining after N lower levels of page tables. */ 225 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; 226 if (v_l1_bits < V_L1_MIN_BITS) { 227 v_l1_bits += V_L2_BITS; 228 } 229 230 v_l1_size = 1 << v_l1_bits; 231 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; 232 v_l2_levels = v_l1_shift / V_L2_BITS - 1; 233 234 assert(v_l1_bits <= V_L1_MAX_BITS); 235 assert(v_l1_shift % V_L2_BITS == 0); 236 assert(v_l2_levels >= 0); 237 } 238 239 void cpu_gen_init(void) 240 { 241 tcg_context_init(&tcg_init_ctx); 242 } 243 244 /* Encode VAL as a signed leb128 sequence at P. 245 Return P incremented past the encoded value. */ 246 static uint8_t *encode_sleb128(uint8_t *p, target_long val) 247 { 248 int more, byte; 249 250 do { 251 byte = val & 0x7f; 252 val >>= 7; 253 more = !((val == 0 && (byte & 0x40) == 0) 254 || (val == -1 && (byte & 0x40) != 0)); 255 if (more) { 256 byte |= 0x80; 257 } 258 *p++ = byte; 259 } while (more); 260 261 return p; 262 } 263 264 /* Decode a signed leb128 sequence at *PP; increment *PP past the 265 decoded value. Return the decoded value. */ 266 static target_long decode_sleb128(uint8_t **pp) 267 { 268 uint8_t *p = *pp; 269 target_long val = 0; 270 int byte, shift = 0; 271 272 do { 273 byte = *p++; 274 val |= (target_ulong)(byte & 0x7f) << shift; 275 shift += 7; 276 } while (byte & 0x80); 277 if (shift < TARGET_LONG_BITS && (byte & 0x40)) { 278 val |= -(target_ulong)1 << shift; 279 } 280 281 *pp = p; 282 return val; 283 } 284 285 /* Encode the data collected about the instructions while compiling TB. 286 Place the data at BLOCK, and return the number of bytes consumed. 287 288 The logical table consists of TARGET_INSN_START_WORDS target_ulong's, 289 which come from the target's insn_start data, followed by a uintptr_t 290 which comes from the host pc of the end of the code implementing the insn. 291 292 Each line of the table is encoded as sleb128 deltas from the previous 293 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. 294 That is, the first column is seeded with the guest pc, the last column 295 with the host pc, and the middle columns with zeros. */ 296 297 static int encode_search(TranslationBlock *tb, uint8_t *block) 298 { 299 uint8_t *highwater = tcg_ctx->code_gen_highwater; 300 uint8_t *p = block; 301 int i, j, n; 302 303 for (i = 0, n = tb->icount; i < n; ++i) { 304 target_ulong prev; 305 306 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 307 if (i == 0) { 308 prev = (j == 0 ? tb->pc : 0); 309 } else { 310 prev = tcg_ctx->gen_insn_data[i - 1][j]; 311 } 312 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); 313 } 314 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); 315 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); 316 317 /* Test for (pending) buffer overflow. The assumption is that any 318 one row beginning below the high water mark cannot overrun 319 the buffer completely. Thus we can test for overflow after 320 encoding a row without having to check during encoding. */ 321 if (unlikely(p > highwater)) { 322 return -1; 323 } 324 } 325 326 return p - block; 327 } 328 329 /* The cpu state corresponding to 'searched_pc' is restored. 330 * When reset_icount is true, current TB will be interrupted and 331 * icount should be recalculated. 332 */ 333 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 334 uintptr_t searched_pc, bool reset_icount) 335 { 336 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; 337 uintptr_t host_pc = (uintptr_t)tb->tc.ptr; 338 CPUArchState *env = cpu->env_ptr; 339 uint8_t *p = tb->tc.ptr + tb->tc.size; 340 int i, j, num_insns = tb->icount; 341 #ifdef CONFIG_PROFILER 342 TCGProfile *prof = &tcg_ctx->prof; 343 int64_t ti = profile_getclock(); 344 #endif 345 346 searched_pc -= GETPC_ADJ; 347 348 if (searched_pc < host_pc) { 349 return -1; 350 } 351 352 /* Reconstruct the stored insn data while looking for the point at 353 which the end of the insn exceeds the searched_pc. */ 354 for (i = 0; i < num_insns; ++i) { 355 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 356 data[j] += decode_sleb128(&p); 357 } 358 host_pc += decode_sleb128(&p); 359 if (host_pc > searched_pc) { 360 goto found; 361 } 362 } 363 return -1; 364 365 found: 366 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { 367 assert(use_icount); 368 /* Reset the cycle counter to the start of the block 369 and shift if to the number of actually executed instructions */ 370 cpu->icount_decr.u16.low += num_insns - i; 371 } 372 restore_state_to_opc(env, tb, data); 373 374 #ifdef CONFIG_PROFILER 375 atomic_set(&prof->restore_time, 376 prof->restore_time + profile_getclock() - ti); 377 atomic_set(&prof->restore_count, prof->restore_count + 1); 378 #endif 379 return 0; 380 } 381 382 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) 383 { 384 TranslationBlock *tb; 385 bool r = false; 386 uintptr_t check_offset; 387 388 /* The host_pc has to be in the region of current code buffer. If 389 * it is not we will not be able to resolve it here. The two cases 390 * where host_pc will not be correct are: 391 * 392 * - fault during translation (instruction fetch) 393 * - fault from helper (not using GETPC() macro) 394 * 395 * Either way we need return early as we can't resolve it here. 396 * 397 * We are using unsigned arithmetic so if host_pc < 398 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way 399 * above the code_gen_buffer_size 400 */ 401 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer; 402 403 if (check_offset < tcg_init_ctx.code_gen_buffer_size) { 404 tb = tcg_tb_lookup(host_pc); 405 if (tb) { 406 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); 407 if (tb_cflags(tb) & CF_NOCACHE) { 408 /* one-shot translation, invalidate it immediately */ 409 tb_phys_invalidate(tb, -1); 410 tcg_tb_remove(tb); 411 } 412 r = true; 413 } 414 } 415 416 return r; 417 } 418 419 static void page_init(void) 420 { 421 page_size_init(); 422 page_table_config_init(); 423 424 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 425 { 426 #ifdef HAVE_KINFO_GETVMMAP 427 struct kinfo_vmentry *freep; 428 int i, cnt; 429 430 freep = kinfo_getvmmap(getpid(), &cnt); 431 if (freep) { 432 mmap_lock(); 433 for (i = 0; i < cnt; i++) { 434 unsigned long startaddr, endaddr; 435 436 startaddr = freep[i].kve_start; 437 endaddr = freep[i].kve_end; 438 if (h2g_valid(startaddr)) { 439 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 440 441 if (h2g_valid(endaddr)) { 442 endaddr = h2g(endaddr); 443 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 444 } else { 445 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 446 endaddr = ~0ul; 447 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 448 #endif 449 } 450 } 451 } 452 free(freep); 453 mmap_unlock(); 454 } 455 #else 456 FILE *f; 457 458 last_brk = (unsigned long)sbrk(0); 459 460 f = fopen("/compat/linux/proc/self/maps", "r"); 461 if (f) { 462 mmap_lock(); 463 464 do { 465 unsigned long startaddr, endaddr; 466 int n; 467 468 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 469 470 if (n == 2 && h2g_valid(startaddr)) { 471 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 472 473 if (h2g_valid(endaddr)) { 474 endaddr = h2g(endaddr); 475 } else { 476 endaddr = ~0ul; 477 } 478 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 479 } 480 } while (!feof(f)); 481 482 fclose(f); 483 mmap_unlock(); 484 } 485 #endif 486 } 487 #endif 488 } 489 490 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) 491 { 492 PageDesc *pd; 493 void **lp; 494 int i; 495 496 /* Level 1. Always allocated. */ 497 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); 498 499 /* Level 2..N-1. */ 500 for (i = v_l2_levels; i > 0; i--) { 501 void **p = atomic_rcu_read(lp); 502 503 if (p == NULL) { 504 void *existing; 505 506 if (!alloc) { 507 return NULL; 508 } 509 p = g_new0(void *, V_L2_SIZE); 510 existing = atomic_cmpxchg(lp, NULL, p); 511 if (unlikely(existing)) { 512 g_free(p); 513 p = existing; 514 } 515 } 516 517 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); 518 } 519 520 pd = atomic_rcu_read(lp); 521 if (pd == NULL) { 522 void *existing; 523 524 if (!alloc) { 525 return NULL; 526 } 527 pd = g_new0(PageDesc, V_L2_SIZE); 528 #ifndef CONFIG_USER_ONLY 529 { 530 int i; 531 532 for (i = 0; i < V_L2_SIZE; i++) { 533 qemu_spin_init(&pd[i].lock); 534 } 535 } 536 #endif 537 existing = atomic_cmpxchg(lp, NULL, pd); 538 if (unlikely(existing)) { 539 g_free(pd); 540 pd = existing; 541 } 542 } 543 544 return pd + (index & (V_L2_SIZE - 1)); 545 } 546 547 static inline PageDesc *page_find(tb_page_addr_t index) 548 { 549 return page_find_alloc(index, 0); 550 } 551 552 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, 553 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); 554 555 /* In user-mode page locks aren't used; mmap_lock is enough */ 556 #ifdef CONFIG_USER_ONLY 557 558 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) 559 560 static inline void page_lock(PageDesc *pd) 561 { } 562 563 static inline void page_unlock(PageDesc *pd) 564 { } 565 566 static inline void page_lock_tb(const TranslationBlock *tb) 567 { } 568 569 static inline void page_unlock_tb(const TranslationBlock *tb) 570 { } 571 572 struct page_collection * 573 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) 574 { 575 return NULL; 576 } 577 578 void page_collection_unlock(struct page_collection *set) 579 { } 580 #else /* !CONFIG_USER_ONLY */ 581 582 #ifdef CONFIG_DEBUG_TCG 583 584 static __thread GHashTable *ht_pages_locked_debug; 585 586 static void ht_pages_locked_debug_init(void) 587 { 588 if (ht_pages_locked_debug) { 589 return; 590 } 591 ht_pages_locked_debug = g_hash_table_new(NULL, NULL); 592 } 593 594 static bool page_is_locked(const PageDesc *pd) 595 { 596 PageDesc *found; 597 598 ht_pages_locked_debug_init(); 599 found = g_hash_table_lookup(ht_pages_locked_debug, pd); 600 return !!found; 601 } 602 603 static void page_lock__debug(PageDesc *pd) 604 { 605 ht_pages_locked_debug_init(); 606 g_assert(!page_is_locked(pd)); 607 g_hash_table_insert(ht_pages_locked_debug, pd, pd); 608 } 609 610 static void page_unlock__debug(const PageDesc *pd) 611 { 612 bool removed; 613 614 ht_pages_locked_debug_init(); 615 g_assert(page_is_locked(pd)); 616 removed = g_hash_table_remove(ht_pages_locked_debug, pd); 617 g_assert(removed); 618 } 619 620 static void 621 do_assert_page_locked(const PageDesc *pd, const char *file, int line) 622 { 623 if (unlikely(!page_is_locked(pd))) { 624 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", 625 pd, file, line); 626 abort(); 627 } 628 } 629 630 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) 631 632 void assert_no_pages_locked(void) 633 { 634 ht_pages_locked_debug_init(); 635 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); 636 } 637 638 #else /* !CONFIG_DEBUG_TCG */ 639 640 #define assert_page_locked(pd) 641 642 static inline void page_lock__debug(const PageDesc *pd) 643 { 644 } 645 646 static inline void page_unlock__debug(const PageDesc *pd) 647 { 648 } 649 650 #endif /* CONFIG_DEBUG_TCG */ 651 652 static inline void page_lock(PageDesc *pd) 653 { 654 page_lock__debug(pd); 655 qemu_spin_lock(&pd->lock); 656 } 657 658 static inline void page_unlock(PageDesc *pd) 659 { 660 qemu_spin_unlock(&pd->lock); 661 page_unlock__debug(pd); 662 } 663 664 /* lock the page(s) of a TB in the correct acquisition order */ 665 static inline void page_lock_tb(const TranslationBlock *tb) 666 { 667 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); 668 } 669 670 static inline void page_unlock_tb(const TranslationBlock *tb) 671 { 672 page_unlock(page_find(tb->page_addr[0] >> TARGET_PAGE_BITS)); 673 if (unlikely(tb->page_addr[1] != -1)) { 674 page_unlock(page_find(tb->page_addr[1] >> TARGET_PAGE_BITS)); 675 } 676 } 677 678 static inline struct page_entry * 679 page_entry_new(PageDesc *pd, tb_page_addr_t index) 680 { 681 struct page_entry *pe = g_malloc(sizeof(*pe)); 682 683 pe->index = index; 684 pe->pd = pd; 685 pe->locked = false; 686 return pe; 687 } 688 689 static void page_entry_destroy(gpointer p) 690 { 691 struct page_entry *pe = p; 692 693 g_assert(pe->locked); 694 page_unlock(pe->pd); 695 g_free(pe); 696 } 697 698 /* returns false on success */ 699 static bool page_entry_trylock(struct page_entry *pe) 700 { 701 bool busy; 702 703 busy = qemu_spin_trylock(&pe->pd->lock); 704 if (!busy) { 705 g_assert(!pe->locked); 706 pe->locked = true; 707 page_lock__debug(pe->pd); 708 } 709 return busy; 710 } 711 712 static void do_page_entry_lock(struct page_entry *pe) 713 { 714 page_lock(pe->pd); 715 g_assert(!pe->locked); 716 pe->locked = true; 717 } 718 719 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) 720 { 721 struct page_entry *pe = value; 722 723 do_page_entry_lock(pe); 724 return FALSE; 725 } 726 727 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) 728 { 729 struct page_entry *pe = value; 730 731 if (pe->locked) { 732 pe->locked = false; 733 page_unlock(pe->pd); 734 } 735 return FALSE; 736 } 737 738 /* 739 * Trylock a page, and if successful, add the page to a collection. 740 * Returns true ("busy") if the page could not be locked; false otherwise. 741 */ 742 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) 743 { 744 tb_page_addr_t index = addr >> TARGET_PAGE_BITS; 745 struct page_entry *pe; 746 PageDesc *pd; 747 748 pe = g_tree_lookup(set->tree, &index); 749 if (pe) { 750 return false; 751 } 752 753 pd = page_find(index); 754 if (pd == NULL) { 755 return false; 756 } 757 758 pe = page_entry_new(pd, index); 759 g_tree_insert(set->tree, &pe->index, pe); 760 761 /* 762 * If this is either (1) the first insertion or (2) a page whose index 763 * is higher than any other so far, just lock the page and move on. 764 */ 765 if (set->max == NULL || pe->index > set->max->index) { 766 set->max = pe; 767 do_page_entry_lock(pe); 768 return false; 769 } 770 /* 771 * Try to acquire out-of-order lock; if busy, return busy so that we acquire 772 * locks in order. 773 */ 774 return page_entry_trylock(pe); 775 } 776 777 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) 778 { 779 tb_page_addr_t a = *(const tb_page_addr_t *)ap; 780 tb_page_addr_t b = *(const tb_page_addr_t *)bp; 781 782 if (a == b) { 783 return 0; 784 } else if (a < b) { 785 return -1; 786 } 787 return 1; 788 } 789 790 /* 791 * Lock a range of pages ([@start,@end[) as well as the pages of all 792 * intersecting TBs. 793 * Locking order: acquire locks in ascending order of page index. 794 */ 795 struct page_collection * 796 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) 797 { 798 struct page_collection *set = g_malloc(sizeof(*set)); 799 tb_page_addr_t index; 800 PageDesc *pd; 801 802 start >>= TARGET_PAGE_BITS; 803 end >>= TARGET_PAGE_BITS; 804 g_assert(start <= end); 805 806 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, 807 page_entry_destroy); 808 set->max = NULL; 809 assert_no_pages_locked(); 810 811 retry: 812 g_tree_foreach(set->tree, page_entry_lock, NULL); 813 814 for (index = start; index <= end; index++) { 815 TranslationBlock *tb; 816 int n; 817 818 pd = page_find(index); 819 if (pd == NULL) { 820 continue; 821 } 822 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { 823 g_tree_foreach(set->tree, page_entry_unlock, NULL); 824 goto retry; 825 } 826 assert_page_locked(pd); 827 PAGE_FOR_EACH_TB(pd, tb, n) { 828 if (page_trylock_add(set, tb->page_addr[0]) || 829 (tb->page_addr[1] != -1 && 830 page_trylock_add(set, tb->page_addr[1]))) { 831 /* drop all locks, and reacquire in order */ 832 g_tree_foreach(set->tree, page_entry_unlock, NULL); 833 goto retry; 834 } 835 } 836 } 837 return set; 838 } 839 840 void page_collection_unlock(struct page_collection *set) 841 { 842 /* entries are unlocked and freed via page_entry_destroy */ 843 g_tree_destroy(set->tree); 844 g_free(set); 845 } 846 847 #endif /* !CONFIG_USER_ONLY */ 848 849 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, 850 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) 851 { 852 PageDesc *p1, *p2; 853 854 assert_memory_lock(); 855 g_assert(phys1 != -1 && phys1 != phys2); 856 p1 = page_find_alloc(phys1 >> TARGET_PAGE_BITS, alloc); 857 if (ret_p1) { 858 *ret_p1 = p1; 859 } 860 if (likely(phys2 == -1)) { 861 page_lock(p1); 862 return; 863 } 864 p2 = page_find_alloc(phys2 >> TARGET_PAGE_BITS, alloc); 865 if (ret_p2) { 866 *ret_p2 = p2; 867 } 868 if (phys1 < phys2) { 869 page_lock(p1); 870 page_lock(p2); 871 } else { 872 page_lock(p2); 873 page_lock(p1); 874 } 875 } 876 877 #if defined(CONFIG_USER_ONLY) 878 /* Currently it is not recommended to allocate big chunks of data in 879 user mode. It will change when a dedicated libc will be used. */ 880 /* ??? 64-bit hosts ought to have no problem mmaping data outside the 881 region in which the guest needs to run. Revisit this. */ 882 #define USE_STATIC_CODE_GEN_BUFFER 883 #endif 884 885 /* Minimum size of the code gen buffer. This number is randomly chosen, 886 but not so small that we can't have a fair number of TB's live. */ 887 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) 888 889 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise 890 indicated, this is constrained by the range of direct branches on the 891 host cpu, as used by the TCG implementation of goto_tb. */ 892 #if defined(__x86_64__) 893 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 894 #elif defined(__sparc__) 895 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 896 #elif defined(__powerpc64__) 897 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 898 #elif defined(__powerpc__) 899 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024) 900 #elif defined(__aarch64__) 901 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 902 #elif defined(__s390x__) 903 /* We have a +- 4GB range on the branches; leave some slop. */ 904 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) 905 #elif defined(__mips__) 906 /* We have a 256MB branch region, but leave room to make sure the 907 main executable is also within that region. */ 908 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) 909 #else 910 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) 911 #endif 912 913 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) 914 915 #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 916 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 917 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 918 919 static inline size_t size_code_gen_buffer(size_t tb_size) 920 { 921 /* Size the buffer. */ 922 if (tb_size == 0) { 923 #ifdef USE_STATIC_CODE_GEN_BUFFER 924 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 925 #else 926 /* ??? Needs adjustments. */ 927 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the 928 static buffer, we could size this on RESERVED_VA, on the text 929 segment size of the executable, or continue to use the default. */ 930 tb_size = (unsigned long)(ram_size / 4); 931 #endif 932 } 933 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 934 tb_size = MIN_CODE_GEN_BUFFER_SIZE; 935 } 936 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 937 tb_size = MAX_CODE_GEN_BUFFER_SIZE; 938 } 939 return tb_size; 940 } 941 942 #ifdef __mips__ 943 /* In order to use J and JAL within the code_gen_buffer, we require 944 that the buffer not cross a 256MB boundary. */ 945 static inline bool cross_256mb(void *addr, size_t size) 946 { 947 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; 948 } 949 950 /* We weren't able to allocate a buffer without crossing that boundary, 951 so make do with the larger portion of the buffer that doesn't cross. 952 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ 953 static inline void *split_cross_256mb(void *buf1, size_t size1) 954 { 955 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); 956 size_t size2 = buf1 + size1 - buf2; 957 958 size1 = buf2 - buf1; 959 if (size1 < size2) { 960 size1 = size2; 961 buf1 = buf2; 962 } 963 964 tcg_ctx->code_gen_buffer_size = size1; 965 return buf1; 966 } 967 #endif 968 969 #ifdef USE_STATIC_CODE_GEN_BUFFER 970 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 971 __attribute__((aligned(CODE_GEN_ALIGN))); 972 973 static inline void *alloc_code_gen_buffer(void) 974 { 975 void *buf = static_code_gen_buffer; 976 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer); 977 size_t size; 978 979 /* page-align the beginning and end of the buffer */ 980 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); 981 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); 982 983 size = end - buf; 984 985 /* Honor a command-line option limiting the size of the buffer. */ 986 if (size > tcg_ctx->code_gen_buffer_size) { 987 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size, 988 qemu_real_host_page_size); 989 } 990 tcg_ctx->code_gen_buffer_size = size; 991 992 #ifdef __mips__ 993 if (cross_256mb(buf, size)) { 994 buf = split_cross_256mb(buf, size); 995 size = tcg_ctx->code_gen_buffer_size; 996 } 997 #endif 998 999 if (qemu_mprotect_rwx(buf, size)) { 1000 abort(); 1001 } 1002 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 1003 1004 return buf; 1005 } 1006 #elif defined(_WIN32) 1007 static inline void *alloc_code_gen_buffer(void) 1008 { 1009 size_t size = tcg_ctx->code_gen_buffer_size; 1010 return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, 1011 PAGE_EXECUTE_READWRITE); 1012 } 1013 #else 1014 static inline void *alloc_code_gen_buffer(void) 1015 { 1016 int prot = PROT_WRITE | PROT_READ | PROT_EXEC; 1017 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 1018 uintptr_t start = 0; 1019 size_t size = tcg_ctx->code_gen_buffer_size; 1020 void *buf; 1021 1022 /* Constrain the position of the buffer based on the host cpu. 1023 Note that these addresses are chosen in concert with the 1024 addresses assigned in the relevant linker script file. */ 1025 # if defined(__PIE__) || defined(__PIC__) 1026 /* Don't bother setting a preferred location if we're building 1027 a position-independent executable. We're more likely to get 1028 an address near the main executable if we let the kernel 1029 choose the address. */ 1030 # elif defined(__x86_64__) && defined(MAP_32BIT) 1031 /* Force the memory down into low memory with the executable. 1032 Leave the choice of exact location with the kernel. */ 1033 flags |= MAP_32BIT; 1034 /* Cannot expect to map more than 800MB in low memory. */ 1035 if (size > 800u * 1024 * 1024) { 1036 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024; 1037 } 1038 # elif defined(__sparc__) 1039 start = 0x40000000ul; 1040 # elif defined(__s390x__) 1041 start = 0x90000000ul; 1042 # elif defined(__mips__) 1043 # if _MIPS_SIM == _ABI64 1044 start = 0x128000000ul; 1045 # else 1046 start = 0x08000000ul; 1047 # endif 1048 # endif 1049 1050 buf = mmap((void *)start, size, prot, flags, -1, 0); 1051 if (buf == MAP_FAILED) { 1052 return NULL; 1053 } 1054 1055 #ifdef __mips__ 1056 if (cross_256mb(buf, size)) { 1057 /* Try again, with the original still mapped, to avoid re-acquiring 1058 that 256mb crossing. This time don't specify an address. */ 1059 size_t size2; 1060 void *buf2 = mmap(NULL, size, prot, flags, -1, 0); 1061 switch ((int)(buf2 != MAP_FAILED)) { 1062 case 1: 1063 if (!cross_256mb(buf2, size)) { 1064 /* Success! Use the new buffer. */ 1065 munmap(buf, size); 1066 break; 1067 } 1068 /* Failure. Work with what we had. */ 1069 munmap(buf2, size); 1070 /* fallthru */ 1071 default: 1072 /* Split the original buffer. Free the smaller half. */ 1073 buf2 = split_cross_256mb(buf, size); 1074 size2 = tcg_ctx->code_gen_buffer_size; 1075 if (buf == buf2) { 1076 munmap(buf + size2, size - size2); 1077 } else { 1078 munmap(buf, size - size2); 1079 } 1080 size = size2; 1081 break; 1082 } 1083 buf = buf2; 1084 } 1085 #endif 1086 1087 /* Request large pages for the buffer. */ 1088 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 1089 1090 return buf; 1091 } 1092 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 1093 1094 static inline void code_gen_alloc(size_t tb_size) 1095 { 1096 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size); 1097 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(); 1098 if (tcg_ctx->code_gen_buffer == NULL) { 1099 fprintf(stderr, "Could not allocate dynamic translator buffer\n"); 1100 exit(1); 1101 } 1102 } 1103 1104 static bool tb_cmp(const void *ap, const void *bp) 1105 { 1106 const TranslationBlock *a = ap; 1107 const TranslationBlock *b = bp; 1108 1109 return a->pc == b->pc && 1110 a->cs_base == b->cs_base && 1111 a->flags == b->flags && 1112 (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) && 1113 a->trace_vcpu_dstate == b->trace_vcpu_dstate && 1114 a->page_addr[0] == b->page_addr[0] && 1115 a->page_addr[1] == b->page_addr[1]; 1116 } 1117 1118 static void tb_htable_init(void) 1119 { 1120 unsigned int mode = QHT_MODE_AUTO_RESIZE; 1121 1122 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); 1123 } 1124 1125 /* Must be called before using the QEMU cpus. 'tb_size' is the size 1126 (in bytes) allocated to the translation buffer. Zero means default 1127 size. */ 1128 void tcg_exec_init(unsigned long tb_size) 1129 { 1130 tcg_allowed = true; 1131 cpu_gen_init(); 1132 page_init(); 1133 tb_htable_init(); 1134 code_gen_alloc(tb_size); 1135 #if defined(CONFIG_SOFTMMU) 1136 /* There's no guest base to take into account, so go ahead and 1137 initialize the prologue now. */ 1138 tcg_prologue_init(tcg_ctx); 1139 #endif 1140 } 1141 1142 /* 1143 * Allocate a new translation block. Flush the translation buffer if 1144 * too many translation blocks or too much generated code. 1145 */ 1146 static TranslationBlock *tb_alloc(target_ulong pc) 1147 { 1148 TranslationBlock *tb; 1149 1150 assert_memory_lock(); 1151 1152 tb = tcg_tb_alloc(tcg_ctx); 1153 if (unlikely(tb == NULL)) { 1154 return NULL; 1155 } 1156 return tb; 1157 } 1158 1159 /* call with @p->lock held */ 1160 static inline void invalidate_page_bitmap(PageDesc *p) 1161 { 1162 assert_page_locked(p); 1163 #ifdef CONFIG_SOFTMMU 1164 g_free(p->code_bitmap); 1165 p->code_bitmap = NULL; 1166 p->code_write_count = 0; 1167 #endif 1168 } 1169 1170 /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 1171 static void page_flush_tb_1(int level, void **lp) 1172 { 1173 int i; 1174 1175 if (*lp == NULL) { 1176 return; 1177 } 1178 if (level == 0) { 1179 PageDesc *pd = *lp; 1180 1181 for (i = 0; i < V_L2_SIZE; ++i) { 1182 page_lock(&pd[i]); 1183 pd[i].first_tb = (uintptr_t)NULL; 1184 invalidate_page_bitmap(pd + i); 1185 page_unlock(&pd[i]); 1186 } 1187 } else { 1188 void **pp = *lp; 1189 1190 for (i = 0; i < V_L2_SIZE; ++i) { 1191 page_flush_tb_1(level - 1, pp + i); 1192 } 1193 } 1194 } 1195 1196 static void page_flush_tb(void) 1197 { 1198 int i, l1_sz = v_l1_size; 1199 1200 for (i = 0; i < l1_sz; i++) { 1201 page_flush_tb_1(v_l2_levels, l1_map + i); 1202 } 1203 } 1204 1205 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) 1206 { 1207 const TranslationBlock *tb = value; 1208 size_t *size = data; 1209 1210 *size += tb->tc.size; 1211 return false; 1212 } 1213 1214 /* flush all the translation blocks */ 1215 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) 1216 { 1217 mmap_lock(); 1218 /* If it is already been done on request of another CPU, 1219 * just retry. 1220 */ 1221 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { 1222 goto done; 1223 } 1224 1225 if (DEBUG_TB_FLUSH_GATE) { 1226 size_t nb_tbs = tcg_nb_tbs(); 1227 size_t host_size = 0; 1228 1229 tcg_tb_foreach(tb_host_size_iter, &host_size); 1230 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", 1231 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); 1232 } 1233 1234 CPU_FOREACH(cpu) { 1235 cpu_tb_jmp_cache_clear(cpu); 1236 } 1237 1238 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); 1239 page_flush_tb(); 1240 1241 tcg_region_reset_all(); 1242 /* XXX: flush processor icache at this point if cache flush is 1243 expensive */ 1244 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); 1245 1246 done: 1247 mmap_unlock(); 1248 } 1249 1250 void tb_flush(CPUState *cpu) 1251 { 1252 if (tcg_enabled()) { 1253 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count); 1254 async_safe_run_on_cpu(cpu, do_tb_flush, 1255 RUN_ON_CPU_HOST_INT(tb_flush_count)); 1256 } 1257 } 1258 1259 /* 1260 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, 1261 * so in order to prevent bit rot we compile them unconditionally in user-mode, 1262 * and let the optimizer get rid of them by wrapping their user-only callers 1263 * with if (DEBUG_TB_CHECK_GATE). 1264 */ 1265 #ifdef CONFIG_USER_ONLY 1266 1267 static void 1268 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp) 1269 { 1270 TranslationBlock *tb = p; 1271 target_ulong addr = *(target_ulong *)userp; 1272 1273 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { 1274 printf("ERROR invalidate: address=" TARGET_FMT_lx 1275 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); 1276 } 1277 } 1278 1279 /* verify that all the pages have correct rights for code 1280 * 1281 * Called with mmap_lock held. 1282 */ 1283 static void tb_invalidate_check(target_ulong address) 1284 { 1285 address &= TARGET_PAGE_MASK; 1286 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); 1287 } 1288 1289 static void 1290 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp) 1291 { 1292 TranslationBlock *tb = p; 1293 int flags1, flags2; 1294 1295 flags1 = page_get_flags(tb->pc); 1296 flags2 = page_get_flags(tb->pc + tb->size - 1); 1297 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 1298 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 1299 (long)tb->pc, tb->size, flags1, flags2); 1300 } 1301 } 1302 1303 /* verify that all the pages have correct rights for code */ 1304 static void tb_page_check(void) 1305 { 1306 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); 1307 } 1308 1309 #endif /* CONFIG_USER_ONLY */ 1310 1311 /* 1312 * user-mode: call with mmap_lock held 1313 * !user-mode: call with @pd->lock held 1314 */ 1315 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) 1316 { 1317 TranslationBlock *tb1; 1318 uintptr_t *pprev; 1319 unsigned int n1; 1320 1321 assert_page_locked(pd); 1322 pprev = &pd->first_tb; 1323 PAGE_FOR_EACH_TB(pd, tb1, n1) { 1324 if (tb1 == tb) { 1325 *pprev = tb1->page_next[n1]; 1326 return; 1327 } 1328 pprev = &tb1->page_next[n1]; 1329 } 1330 g_assert_not_reached(); 1331 } 1332 1333 /* remove @orig from its @n_orig-th jump list */ 1334 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) 1335 { 1336 uintptr_t ptr, ptr_locked; 1337 TranslationBlock *dest; 1338 TranslationBlock *tb; 1339 uintptr_t *pprev; 1340 int n; 1341 1342 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ 1343 ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1); 1344 dest = (TranslationBlock *)(ptr & ~1); 1345 if (dest == NULL) { 1346 return; 1347 } 1348 1349 qemu_spin_lock(&dest->jmp_lock); 1350 /* 1351 * While acquiring the lock, the jump might have been removed if the 1352 * destination TB was invalidated; check again. 1353 */ 1354 ptr_locked = atomic_read(&orig->jmp_dest[n_orig]); 1355 if (ptr_locked != ptr) { 1356 qemu_spin_unlock(&dest->jmp_lock); 1357 /* 1358 * The only possibility is that the jump was unlinked via 1359 * tb_jump_unlink(dest). Seeing here another destination would be a bug, 1360 * because we set the LSB above. 1361 */ 1362 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); 1363 return; 1364 } 1365 /* 1366 * We first acquired the lock, and since the destination pointer matches, 1367 * we know for sure that @orig is in the jmp list. 1368 */ 1369 pprev = &dest->jmp_list_head; 1370 TB_FOR_EACH_JMP(dest, tb, n) { 1371 if (tb == orig && n == n_orig) { 1372 *pprev = tb->jmp_list_next[n]; 1373 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ 1374 qemu_spin_unlock(&dest->jmp_lock); 1375 return; 1376 } 1377 pprev = &tb->jmp_list_next[n]; 1378 } 1379 g_assert_not_reached(); 1380 } 1381 1382 /* reset the jump entry 'n' of a TB so that it is not chained to 1383 another TB */ 1384 static inline void tb_reset_jump(TranslationBlock *tb, int n) 1385 { 1386 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); 1387 tb_set_jmp_target(tb, n, addr); 1388 } 1389 1390 /* remove any jumps to the TB */ 1391 static inline void tb_jmp_unlink(TranslationBlock *dest) 1392 { 1393 TranslationBlock *tb; 1394 int n; 1395 1396 qemu_spin_lock(&dest->jmp_lock); 1397 1398 TB_FOR_EACH_JMP(dest, tb, n) { 1399 tb_reset_jump(tb, n); 1400 atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); 1401 /* No need to clear the list entry; setting the dest ptr is enough */ 1402 } 1403 dest->jmp_list_head = (uintptr_t)NULL; 1404 1405 qemu_spin_unlock(&dest->jmp_lock); 1406 } 1407 1408 /* 1409 * In user-mode, call with mmap_lock held. 1410 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' 1411 * locks held. 1412 */ 1413 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) 1414 { 1415 CPUState *cpu; 1416 PageDesc *p; 1417 uint32_t h; 1418 tb_page_addr_t phys_pc; 1419 1420 assert_memory_lock(); 1421 1422 /* make sure no further incoming jumps will be chained to this TB */ 1423 qemu_spin_lock(&tb->jmp_lock); 1424 atomic_set(&tb->cflags, tb->cflags | CF_INVALID); 1425 qemu_spin_unlock(&tb->jmp_lock); 1426 1427 /* remove the TB from the hash list */ 1428 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1429 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK, 1430 tb->trace_vcpu_dstate); 1431 if (!qht_remove(&tb_ctx.htable, tb, h)) { 1432 return; 1433 } 1434 1435 /* remove the TB from the page list */ 1436 if (rm_from_page_list) { 1437 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 1438 tb_page_remove(p, tb); 1439 invalidate_page_bitmap(p); 1440 if (tb->page_addr[1] != -1) { 1441 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 1442 tb_page_remove(p, tb); 1443 invalidate_page_bitmap(p); 1444 } 1445 } 1446 1447 /* remove the TB from the hash list */ 1448 h = tb_jmp_cache_hash_func(tb->pc); 1449 CPU_FOREACH(cpu) { 1450 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { 1451 atomic_set(&cpu->tb_jmp_cache[h], NULL); 1452 } 1453 } 1454 1455 /* suppress this TB from the two jump lists */ 1456 tb_remove_from_jmp_list(tb, 0); 1457 tb_remove_from_jmp_list(tb, 1); 1458 1459 /* suppress any remaining jumps to this TB */ 1460 tb_jmp_unlink(tb); 1461 1462 atomic_set(&tcg_ctx->tb_phys_invalidate_count, 1463 tcg_ctx->tb_phys_invalidate_count + 1); 1464 } 1465 1466 static void tb_phys_invalidate__locked(TranslationBlock *tb) 1467 { 1468 do_tb_phys_invalidate(tb, true); 1469 } 1470 1471 /* invalidate one TB 1472 * 1473 * Called with mmap_lock held in user-mode. 1474 */ 1475 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 1476 { 1477 if (page_addr == -1) { 1478 page_lock_tb(tb); 1479 do_tb_phys_invalidate(tb, true); 1480 page_unlock_tb(tb); 1481 } else { 1482 do_tb_phys_invalidate(tb, false); 1483 } 1484 } 1485 1486 #ifdef CONFIG_SOFTMMU 1487 /* call with @p->lock held */ 1488 static void build_page_bitmap(PageDesc *p) 1489 { 1490 int n, tb_start, tb_end; 1491 TranslationBlock *tb; 1492 1493 assert_page_locked(p); 1494 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); 1495 1496 PAGE_FOR_EACH_TB(p, tb, n) { 1497 /* NOTE: this is subtle as a TB may span two physical pages */ 1498 if (n == 0) { 1499 /* NOTE: tb_end may be after the end of the page, but 1500 it is not a problem */ 1501 tb_start = tb->pc & ~TARGET_PAGE_MASK; 1502 tb_end = tb_start + tb->size; 1503 if (tb_end > TARGET_PAGE_SIZE) { 1504 tb_end = TARGET_PAGE_SIZE; 1505 } 1506 } else { 1507 tb_start = 0; 1508 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1509 } 1510 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); 1511 } 1512 } 1513 #endif 1514 1515 /* add the tb in the target page and protect it if necessary 1516 * 1517 * Called with mmap_lock held for user-mode emulation. 1518 * Called with @p->lock held in !user-mode. 1519 */ 1520 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, 1521 unsigned int n, tb_page_addr_t page_addr) 1522 { 1523 #ifndef CONFIG_USER_ONLY 1524 bool page_already_protected; 1525 #endif 1526 1527 assert_page_locked(p); 1528 1529 tb->page_addr[n] = page_addr; 1530 tb->page_next[n] = p->first_tb; 1531 #ifndef CONFIG_USER_ONLY 1532 page_already_protected = p->first_tb != (uintptr_t)NULL; 1533 #endif 1534 p->first_tb = (uintptr_t)tb | n; 1535 invalidate_page_bitmap(p); 1536 1537 #if defined(CONFIG_USER_ONLY) 1538 if (p->flags & PAGE_WRITE) { 1539 target_ulong addr; 1540 PageDesc *p2; 1541 int prot; 1542 1543 /* force the host page as non writable (writes will have a 1544 page fault + mprotect overhead) */ 1545 page_addr &= qemu_host_page_mask; 1546 prot = 0; 1547 for (addr = page_addr; addr < page_addr + qemu_host_page_size; 1548 addr += TARGET_PAGE_SIZE) { 1549 1550 p2 = page_find(addr >> TARGET_PAGE_BITS); 1551 if (!p2) { 1552 continue; 1553 } 1554 prot |= p2->flags; 1555 p2->flags &= ~PAGE_WRITE; 1556 } 1557 mprotect(g2h(page_addr), qemu_host_page_size, 1558 (prot & PAGE_BITS) & ~PAGE_WRITE); 1559 if (DEBUG_TB_INVALIDATE_GATE) { 1560 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); 1561 } 1562 } 1563 #else 1564 /* if some code is already present, then the pages are already 1565 protected. So we handle the case where only the first TB is 1566 allocated in a physical page */ 1567 if (!page_already_protected) { 1568 tlb_protect_code(page_addr); 1569 } 1570 #endif 1571 } 1572 1573 /* add a new TB and link it to the physical page tables. phys_page2 is 1574 * (-1) to indicate that only one page contains the TB. 1575 * 1576 * Called with mmap_lock held for user-mode emulation. 1577 * 1578 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. 1579 * Note that in !user-mode, another thread might have already added a TB 1580 * for the same block of guest code that @tb corresponds to. In that case, 1581 * the caller should discard the original @tb, and use instead the returned TB. 1582 */ 1583 static TranslationBlock * 1584 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, 1585 tb_page_addr_t phys_page2) 1586 { 1587 PageDesc *p; 1588 PageDesc *p2 = NULL; 1589 void *existing_tb = NULL; 1590 uint32_t h; 1591 1592 assert_memory_lock(); 1593 1594 /* 1595 * Add the TB to the page list, acquiring first the pages's locks. 1596 * We keep the locks held until after inserting the TB in the hash table, 1597 * so that if the insertion fails we know for sure that the TBs are still 1598 * in the page descriptors. 1599 * Note that inserting into the hash table first isn't an option, since 1600 * we can only insert TBs that are fully initialized. 1601 */ 1602 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); 1603 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); 1604 if (p2) { 1605 tb_page_add(p2, tb, 1, phys_page2); 1606 } else { 1607 tb->page_addr[1] = -1; 1608 } 1609 1610 /* add in the hash table */ 1611 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, 1612 tb->trace_vcpu_dstate); 1613 qht_insert(&tb_ctx.htable, tb, h, &existing_tb); 1614 1615 /* remove TB from the page(s) if we couldn't insert it */ 1616 if (unlikely(existing_tb)) { 1617 tb_page_remove(p, tb); 1618 invalidate_page_bitmap(p); 1619 if (p2) { 1620 tb_page_remove(p2, tb); 1621 invalidate_page_bitmap(p2); 1622 } 1623 tb = existing_tb; 1624 } 1625 1626 if (p2) { 1627 page_unlock(p2); 1628 } 1629 page_unlock(p); 1630 1631 #ifdef CONFIG_USER_ONLY 1632 if (DEBUG_TB_CHECK_GATE) { 1633 tb_page_check(); 1634 } 1635 #endif 1636 return tb; 1637 } 1638 1639 /* Called with mmap_lock held for user mode emulation. */ 1640 TranslationBlock *tb_gen_code(CPUState *cpu, 1641 target_ulong pc, target_ulong cs_base, 1642 uint32_t flags, int cflags) 1643 { 1644 CPUArchState *env = cpu->env_ptr; 1645 TranslationBlock *tb, *existing_tb; 1646 tb_page_addr_t phys_pc, phys_page2; 1647 target_ulong virt_page2; 1648 tcg_insn_unit *gen_code_buf; 1649 int gen_code_size, search_size; 1650 #ifdef CONFIG_PROFILER 1651 TCGProfile *prof = &tcg_ctx->prof; 1652 int64_t ti; 1653 #endif 1654 assert_memory_lock(); 1655 1656 phys_pc = get_page_addr_code(env, pc); 1657 1658 buffer_overflow: 1659 tb = tb_alloc(pc); 1660 if (unlikely(!tb)) { 1661 /* flush must be done */ 1662 tb_flush(cpu); 1663 mmap_unlock(); 1664 /* Make the execution loop process the flush as soon as possible. */ 1665 cpu->exception_index = EXCP_INTERRUPT; 1666 cpu_loop_exit(cpu); 1667 } 1668 1669 gen_code_buf = tcg_ctx->code_gen_ptr; 1670 tb->tc.ptr = gen_code_buf; 1671 tb->pc = pc; 1672 tb->cs_base = cs_base; 1673 tb->flags = flags; 1674 tb->cflags = cflags; 1675 tb->trace_vcpu_dstate = *cpu->trace_dstate; 1676 tcg_ctx->tb_cflags = cflags; 1677 1678 #ifdef CONFIG_PROFILER 1679 /* includes aborted translations because of exceptions */ 1680 atomic_set(&prof->tb_count1, prof->tb_count1 + 1); 1681 ti = profile_getclock(); 1682 #endif 1683 1684 tcg_func_start(tcg_ctx); 1685 1686 tcg_ctx->cpu = ENV_GET_CPU(env); 1687 gen_intermediate_code(cpu, tb); 1688 tcg_ctx->cpu = NULL; 1689 1690 trace_translate_block(tb, tb->pc, tb->tc.ptr); 1691 1692 /* generate machine code */ 1693 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; 1694 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; 1695 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; 1696 if (TCG_TARGET_HAS_direct_jump) { 1697 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; 1698 tcg_ctx->tb_jmp_target_addr = NULL; 1699 } else { 1700 tcg_ctx->tb_jmp_insn_offset = NULL; 1701 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; 1702 } 1703 1704 #ifdef CONFIG_PROFILER 1705 atomic_set(&prof->tb_count, prof->tb_count + 1); 1706 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti); 1707 ti = profile_getclock(); 1708 #endif 1709 1710 /* ??? Overflow could be handled better here. In particular, we 1711 don't need to re-do gen_intermediate_code, nor should we re-do 1712 the tcg optimization currently hidden inside tcg_gen_code. All 1713 that should be required is to flush the TBs, allocate a new TB, 1714 re-initialize it per above, and re-do the actual code generation. */ 1715 gen_code_size = tcg_gen_code(tcg_ctx, tb); 1716 if (unlikely(gen_code_size < 0)) { 1717 goto buffer_overflow; 1718 } 1719 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); 1720 if (unlikely(search_size < 0)) { 1721 goto buffer_overflow; 1722 } 1723 tb->tc.size = gen_code_size; 1724 1725 #ifdef CONFIG_PROFILER 1726 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); 1727 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size); 1728 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); 1729 atomic_set(&prof->search_out_len, prof->search_out_len + search_size); 1730 #endif 1731 1732 #ifdef DEBUG_DISAS 1733 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && 1734 qemu_log_in_addr_range(tb->pc)) { 1735 qemu_log_lock(); 1736 qemu_log("OUT: [size=%d]\n", gen_code_size); 1737 if (tcg_ctx->data_gen_ptr) { 1738 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr; 1739 size_t data_size = gen_code_size - code_size; 1740 size_t i; 1741 1742 log_disas(tb->tc.ptr, code_size); 1743 1744 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { 1745 if (sizeof(tcg_target_ulong) == 8) { 1746 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", 1747 (uintptr_t)tcg_ctx->data_gen_ptr + i, 1748 *(uint64_t *)(tcg_ctx->data_gen_ptr + i)); 1749 } else { 1750 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n", 1751 (uintptr_t)tcg_ctx->data_gen_ptr + i, 1752 *(uint32_t *)(tcg_ctx->data_gen_ptr + i)); 1753 } 1754 } 1755 } else { 1756 log_disas(tb->tc.ptr, gen_code_size); 1757 } 1758 qemu_log("\n"); 1759 qemu_log_flush(); 1760 qemu_log_unlock(); 1761 } 1762 #endif 1763 1764 atomic_set(&tcg_ctx->code_gen_ptr, (void *) 1765 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, 1766 CODE_GEN_ALIGN)); 1767 1768 /* init jump list */ 1769 qemu_spin_init(&tb->jmp_lock); 1770 tb->jmp_list_head = (uintptr_t)NULL; 1771 tb->jmp_list_next[0] = (uintptr_t)NULL; 1772 tb->jmp_list_next[1] = (uintptr_t)NULL; 1773 tb->jmp_dest[0] = (uintptr_t)NULL; 1774 tb->jmp_dest[1] = (uintptr_t)NULL; 1775 1776 /* init original jump addresses wich has been set during tcg_gen_code() */ 1777 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 1778 tb_reset_jump(tb, 0); 1779 } 1780 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 1781 tb_reset_jump(tb, 1); 1782 } 1783 1784 /* check next page if needed */ 1785 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; 1786 phys_page2 = -1; 1787 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 1788 phys_page2 = get_page_addr_code(env, virt_page2); 1789 } 1790 /* 1791 * No explicit memory barrier is required -- tb_link_page() makes the 1792 * TB visible in a consistent state. 1793 */ 1794 existing_tb = tb_link_page(tb, phys_pc, phys_page2); 1795 /* if the TB already exists, discard what we just translated */ 1796 if (unlikely(existing_tb != tb)) { 1797 uintptr_t orig_aligned = (uintptr_t)gen_code_buf; 1798 1799 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); 1800 atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); 1801 return existing_tb; 1802 } 1803 tcg_tb_insert(tb); 1804 return tb; 1805 } 1806 1807 /* 1808 * @p must be non-NULL. 1809 * user-mode: call with mmap_lock held. 1810 * !user-mode: call with all @pages locked. 1811 */ 1812 static void 1813 tb_invalidate_phys_page_range__locked(struct page_collection *pages, 1814 PageDesc *p, tb_page_addr_t start, 1815 tb_page_addr_t end, 1816 int is_cpu_write_access) 1817 { 1818 TranslationBlock *tb; 1819 tb_page_addr_t tb_start, tb_end; 1820 int n; 1821 #ifdef TARGET_HAS_PRECISE_SMC 1822 CPUState *cpu = current_cpu; 1823 CPUArchState *env = NULL; 1824 int current_tb_not_found = is_cpu_write_access; 1825 TranslationBlock *current_tb = NULL; 1826 int current_tb_modified = 0; 1827 target_ulong current_pc = 0; 1828 target_ulong current_cs_base = 0; 1829 uint32_t current_flags = 0; 1830 #endif /* TARGET_HAS_PRECISE_SMC */ 1831 1832 assert_page_locked(p); 1833 1834 #if defined(TARGET_HAS_PRECISE_SMC) 1835 if (cpu != NULL) { 1836 env = cpu->env_ptr; 1837 } 1838 #endif 1839 1840 /* we remove all the TBs in the range [start, end[ */ 1841 /* XXX: see if in some cases it could be faster to invalidate all 1842 the code */ 1843 PAGE_FOR_EACH_TB(p, tb, n) { 1844 assert_page_locked(p); 1845 /* NOTE: this is subtle as a TB may span two physical pages */ 1846 if (n == 0) { 1847 /* NOTE: tb_end may be after the end of the page, but 1848 it is not a problem */ 1849 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1850 tb_end = tb_start + tb->size; 1851 } else { 1852 tb_start = tb->page_addr[1]; 1853 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1854 } 1855 if (!(tb_end <= start || tb_start >= end)) { 1856 #ifdef TARGET_HAS_PRECISE_SMC 1857 if (current_tb_not_found) { 1858 current_tb_not_found = 0; 1859 current_tb = NULL; 1860 if (cpu->mem_io_pc) { 1861 /* now we have a real cpu fault */ 1862 current_tb = tcg_tb_lookup(cpu->mem_io_pc); 1863 } 1864 } 1865 if (current_tb == tb && 1866 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { 1867 /* If we are modifying the current TB, we must stop 1868 its execution. We could be more precise by checking 1869 that the modification is after the current PC, but it 1870 would require a specialized function to partially 1871 restore the CPU state */ 1872 1873 current_tb_modified = 1; 1874 cpu_restore_state_from_tb(cpu, current_tb, 1875 cpu->mem_io_pc, true); 1876 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1877 ¤t_flags); 1878 } 1879 #endif /* TARGET_HAS_PRECISE_SMC */ 1880 tb_phys_invalidate__locked(tb); 1881 } 1882 } 1883 #if !defined(CONFIG_USER_ONLY) 1884 /* if no code remaining, no need to continue to use slow writes */ 1885 if (!p->first_tb) { 1886 invalidate_page_bitmap(p); 1887 tlb_unprotect_code(start); 1888 } 1889 #endif 1890 #ifdef TARGET_HAS_PRECISE_SMC 1891 if (current_tb_modified) { 1892 page_collection_unlock(pages); 1893 /* Force execution of one insn next time. */ 1894 cpu->cflags_next_tb = 1 | curr_cflags(); 1895 mmap_unlock(); 1896 cpu_loop_exit_noexc(cpu); 1897 } 1898 #endif 1899 } 1900 1901 /* 1902 * Invalidate all TBs which intersect with the target physical address range 1903 * [start;end[. NOTE: start and end must refer to the *same* physical page. 1904 * 'is_cpu_write_access' should be true if called from a real cpu write 1905 * access: the virtual CPU will exit the current TB if code is modified inside 1906 * this TB. 1907 * 1908 * Called with mmap_lock held for user-mode emulation 1909 */ 1910 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 1911 int is_cpu_write_access) 1912 { 1913 struct page_collection *pages; 1914 PageDesc *p; 1915 1916 assert_memory_lock(); 1917 1918 p = page_find(start >> TARGET_PAGE_BITS); 1919 if (p == NULL) { 1920 return; 1921 } 1922 pages = page_collection_lock(start, end); 1923 tb_invalidate_phys_page_range__locked(pages, p, start, end, 1924 is_cpu_write_access); 1925 page_collection_unlock(pages); 1926 } 1927 1928 /* 1929 * Invalidate all TBs which intersect with the target physical address range 1930 * [start;end[. NOTE: start and end may refer to *different* physical pages. 1931 * 'is_cpu_write_access' should be true if called from a real cpu write 1932 * access: the virtual CPU will exit the current TB if code is modified inside 1933 * this TB. 1934 * 1935 * Called with mmap_lock held for user-mode emulation. 1936 */ 1937 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) 1938 { 1939 struct page_collection *pages; 1940 tb_page_addr_t next; 1941 1942 assert_memory_lock(); 1943 1944 pages = page_collection_lock(start, end); 1945 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 1946 start < end; 1947 start = next, next += TARGET_PAGE_SIZE) { 1948 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); 1949 tb_page_addr_t bound = MIN(next, end); 1950 1951 if (pd == NULL) { 1952 continue; 1953 } 1954 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); 1955 } 1956 page_collection_unlock(pages); 1957 } 1958 1959 #ifdef CONFIG_SOFTMMU 1960 /* len must be <= 8 and start must be a multiple of len. 1961 * Called via softmmu_template.h when code areas are written to with 1962 * iothread mutex not held. 1963 * 1964 * Call with all @pages in the range [@start, @start + len[ locked. 1965 */ 1966 void tb_invalidate_phys_page_fast(struct page_collection *pages, 1967 tb_page_addr_t start, int len) 1968 { 1969 PageDesc *p; 1970 1971 #if 0 1972 if (1) { 1973 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 1974 cpu_single_env->mem_io_vaddr, len, 1975 cpu_single_env->eip, 1976 cpu_single_env->eip + 1977 (intptr_t)cpu_single_env->segs[R_CS].base); 1978 } 1979 #endif 1980 assert_memory_lock(); 1981 1982 p = page_find(start >> TARGET_PAGE_BITS); 1983 if (!p) { 1984 return; 1985 } 1986 1987 assert_page_locked(p); 1988 if (!p->code_bitmap && 1989 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { 1990 build_page_bitmap(p); 1991 } 1992 if (p->code_bitmap) { 1993 unsigned int nr; 1994 unsigned long b; 1995 1996 nr = start & ~TARGET_PAGE_MASK; 1997 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); 1998 if (b & ((1 << len) - 1)) { 1999 goto do_invalidate; 2000 } 2001 } else { 2002 do_invalidate: 2003 tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1); 2004 } 2005 } 2006 #else 2007 /* Called with mmap_lock held. If pc is not 0 then it indicates the 2008 * host PC of the faulting store instruction that caused this invalidate. 2009 * Returns true if the caller needs to abort execution of the current 2010 * TB (because it was modified by this store and the guest CPU has 2011 * precise-SMC semantics). 2012 */ 2013 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) 2014 { 2015 TranslationBlock *tb; 2016 PageDesc *p; 2017 int n; 2018 #ifdef TARGET_HAS_PRECISE_SMC 2019 TranslationBlock *current_tb = NULL; 2020 CPUState *cpu = current_cpu; 2021 CPUArchState *env = NULL; 2022 int current_tb_modified = 0; 2023 target_ulong current_pc = 0; 2024 target_ulong current_cs_base = 0; 2025 uint32_t current_flags = 0; 2026 #endif 2027 2028 assert_memory_lock(); 2029 2030 addr &= TARGET_PAGE_MASK; 2031 p = page_find(addr >> TARGET_PAGE_BITS); 2032 if (!p) { 2033 return false; 2034 } 2035 2036 #ifdef TARGET_HAS_PRECISE_SMC 2037 if (p->first_tb && pc != 0) { 2038 current_tb = tcg_tb_lookup(pc); 2039 } 2040 if (cpu != NULL) { 2041 env = cpu->env_ptr; 2042 } 2043 #endif 2044 assert_page_locked(p); 2045 PAGE_FOR_EACH_TB(p, tb, n) { 2046 #ifdef TARGET_HAS_PRECISE_SMC 2047 if (current_tb == tb && 2048 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { 2049 /* If we are modifying the current TB, we must stop 2050 its execution. We could be more precise by checking 2051 that the modification is after the current PC, but it 2052 would require a specialized function to partially 2053 restore the CPU state */ 2054 2055 current_tb_modified = 1; 2056 cpu_restore_state_from_tb(cpu, current_tb, pc, true); 2057 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 2058 ¤t_flags); 2059 } 2060 #endif /* TARGET_HAS_PRECISE_SMC */ 2061 tb_phys_invalidate(tb, addr); 2062 } 2063 p->first_tb = (uintptr_t)NULL; 2064 #ifdef TARGET_HAS_PRECISE_SMC 2065 if (current_tb_modified) { 2066 /* Force execution of one insn next time. */ 2067 cpu->cflags_next_tb = 1 | curr_cflags(); 2068 return true; 2069 } 2070 #endif 2071 2072 return false; 2073 } 2074 #endif 2075 2076 #if !defined(CONFIG_USER_ONLY) 2077 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) 2078 { 2079 ram_addr_t ram_addr; 2080 MemoryRegion *mr; 2081 hwaddr l = 1; 2082 2083 rcu_read_lock(); 2084 mr = address_space_translate(as, addr, &addr, &l, false, attrs); 2085 if (!(memory_region_is_ram(mr) 2086 || memory_region_is_romd(mr))) { 2087 rcu_read_unlock(); 2088 return; 2089 } 2090 ram_addr = memory_region_get_ram_addr(mr) + addr; 2091 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); 2092 rcu_read_unlock(); 2093 } 2094 #endif /* !defined(CONFIG_USER_ONLY) */ 2095 2096 /* user-mode: call with mmap_lock held */ 2097 void tb_check_watchpoint(CPUState *cpu) 2098 { 2099 TranslationBlock *tb; 2100 2101 assert_memory_lock(); 2102 2103 tb = tcg_tb_lookup(cpu->mem_io_pc); 2104 if (tb) { 2105 /* We can use retranslation to find the PC. */ 2106 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true); 2107 tb_phys_invalidate(tb, -1); 2108 } else { 2109 /* The exception probably happened in a helper. The CPU state should 2110 have been saved before calling it. Fetch the PC from there. */ 2111 CPUArchState *env = cpu->env_ptr; 2112 target_ulong pc, cs_base; 2113 tb_page_addr_t addr; 2114 uint32_t flags; 2115 2116 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 2117 addr = get_page_addr_code(env, pc); 2118 tb_invalidate_phys_range(addr, addr + 1); 2119 } 2120 } 2121 2122 #ifndef CONFIG_USER_ONLY 2123 /* in deterministic execution mode, instructions doing device I/Os 2124 * must be at the end of the TB. 2125 * 2126 * Called by softmmu_template.h, with iothread mutex not held. 2127 */ 2128 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) 2129 { 2130 #if defined(TARGET_MIPS) || defined(TARGET_SH4) 2131 CPUArchState *env = cpu->env_ptr; 2132 #endif 2133 TranslationBlock *tb; 2134 uint32_t n; 2135 2136 tb = tcg_tb_lookup(retaddr); 2137 if (!tb) { 2138 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", 2139 (void *)retaddr); 2140 } 2141 cpu_restore_state_from_tb(cpu, tb, retaddr, true); 2142 2143 /* On MIPS and SH, delay slot instructions can only be restarted if 2144 they were already the first instruction in the TB. If this is not 2145 the first instruction in a TB then re-execute the preceding 2146 branch. */ 2147 n = 1; 2148 #if defined(TARGET_MIPS) 2149 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 2150 && env->active_tc.PC != tb->pc) { 2151 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); 2152 cpu->icount_decr.u16.low++; 2153 env->hflags &= ~MIPS_HFLAG_BMASK; 2154 n = 2; 2155 } 2156 #elif defined(TARGET_SH4) 2157 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 2158 && env->pc != tb->pc) { 2159 env->pc -= 2; 2160 cpu->icount_decr.u16.low++; 2161 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 2162 n = 2; 2163 } 2164 #endif 2165 2166 /* Generate a new TB executing the I/O insn. */ 2167 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n; 2168 2169 if (tb_cflags(tb) & CF_NOCACHE) { 2170 if (tb->orig_tb) { 2171 /* Invalidate original TB if this TB was generated in 2172 * cpu_exec_nocache() */ 2173 tb_phys_invalidate(tb->orig_tb, -1); 2174 } 2175 tcg_tb_remove(tb); 2176 } 2177 2178 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not 2179 * the first in the TB) then we end up generating a whole new TB and 2180 * repeating the fault, which is horribly inefficient. 2181 * Better would be to execute just this insn uncached, or generate a 2182 * second new TB. 2183 */ 2184 cpu_loop_exit_noexc(cpu); 2185 } 2186 2187 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 2188 { 2189 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); 2190 2191 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 2192 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); 2193 } 2194 } 2195 2196 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) 2197 { 2198 /* Discard jump cache entries for any tb which might potentially 2199 overlap the flushed page. */ 2200 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 2201 tb_jmp_cache_clear_page(cpu, addr); 2202 } 2203 2204 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf, 2205 struct qht_stats hst) 2206 { 2207 uint32_t hgram_opts; 2208 size_t hgram_bins; 2209 char *hgram; 2210 2211 if (!hst.head_buckets) { 2212 return; 2213 } 2214 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", 2215 hst.used_head_buckets, hst.head_buckets, 2216 (double)hst.used_head_buckets / hst.head_buckets * 100); 2217 2218 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 2219 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; 2220 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { 2221 hgram_opts |= QDIST_PR_NODECIMAL; 2222 } 2223 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); 2224 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", 2225 qdist_avg(&hst.occupancy) * 100, hgram); 2226 g_free(hgram); 2227 2228 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 2229 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); 2230 if (hgram_bins > 10) { 2231 hgram_bins = 10; 2232 } else { 2233 hgram_bins = 0; 2234 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; 2235 } 2236 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); 2237 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n", 2238 qdist_avg(&hst.chain), hgram); 2239 g_free(hgram); 2240 } 2241 2242 struct tb_tree_stats { 2243 size_t nb_tbs; 2244 size_t host_size; 2245 size_t target_size; 2246 size_t max_target_size; 2247 size_t direct_jmp_count; 2248 size_t direct_jmp2_count; 2249 size_t cross_page; 2250 }; 2251 2252 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) 2253 { 2254 const TranslationBlock *tb = value; 2255 struct tb_tree_stats *tst = data; 2256 2257 tst->nb_tbs++; 2258 tst->host_size += tb->tc.size; 2259 tst->target_size += tb->size; 2260 if (tb->size > tst->max_target_size) { 2261 tst->max_target_size = tb->size; 2262 } 2263 if (tb->page_addr[1] != -1) { 2264 tst->cross_page++; 2265 } 2266 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 2267 tst->direct_jmp_count++; 2268 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 2269 tst->direct_jmp2_count++; 2270 } 2271 } 2272 return false; 2273 } 2274 2275 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) 2276 { 2277 struct tb_tree_stats tst = {}; 2278 struct qht_stats hst; 2279 size_t nb_tbs; 2280 2281 tcg_tb_foreach(tb_tree_stats_iter, &tst); 2282 nb_tbs = tst.nb_tbs; 2283 /* XXX: avoid using doubles ? */ 2284 cpu_fprintf(f, "Translation buffer state:\n"); 2285 /* 2286 * Report total code size including the padding and TB structs; 2287 * otherwise users might think "-tb-size" is not honoured. 2288 * For avg host size we use the precise numbers from tb_tree_stats though. 2289 */ 2290 cpu_fprintf(f, "gen code size %zu/%zu\n", 2291 tcg_code_size(), tcg_code_capacity()); 2292 cpu_fprintf(f, "TB count %zu\n", nb_tbs); 2293 cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n", 2294 nb_tbs ? tst.target_size / nb_tbs : 0, 2295 tst.max_target_size); 2296 cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n", 2297 nb_tbs ? tst.host_size / nb_tbs : 0, 2298 tst.target_size ? (double)tst.host_size / tst.target_size : 0); 2299 cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page, 2300 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); 2301 cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n", 2302 tst.direct_jmp_count, 2303 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, 2304 tst.direct_jmp2_count, 2305 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); 2306 2307 qht_statistics_init(&tb_ctx.htable, &hst); 2308 print_qht_statistics(f, cpu_fprintf, hst); 2309 qht_statistics_destroy(&hst); 2310 2311 cpu_fprintf(f, "\nStatistics:\n"); 2312 cpu_fprintf(f, "TB flush count %u\n", 2313 atomic_read(&tb_ctx.tb_flush_count)); 2314 cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count()); 2315 cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count()); 2316 tcg_dump_info(f, cpu_fprintf); 2317 } 2318 2319 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) 2320 { 2321 tcg_dump_op_count(f, cpu_fprintf); 2322 } 2323 2324 #else /* CONFIG_USER_ONLY */ 2325 2326 void cpu_interrupt(CPUState *cpu, int mask) 2327 { 2328 g_assert(qemu_mutex_iothread_locked()); 2329 cpu->interrupt_request |= mask; 2330 cpu->icount_decr.u16.high = -1; 2331 } 2332 2333 /* 2334 * Walks guest process memory "regions" one by one 2335 * and calls callback function 'fn' for each region. 2336 */ 2337 struct walk_memory_regions_data { 2338 walk_memory_regions_fn fn; 2339 void *priv; 2340 target_ulong start; 2341 int prot; 2342 }; 2343 2344 static int walk_memory_regions_end(struct walk_memory_regions_data *data, 2345 target_ulong end, int new_prot) 2346 { 2347 if (data->start != -1u) { 2348 int rc = data->fn(data->priv, data->start, end, data->prot); 2349 if (rc != 0) { 2350 return rc; 2351 } 2352 } 2353 2354 data->start = (new_prot ? end : -1u); 2355 data->prot = new_prot; 2356 2357 return 0; 2358 } 2359 2360 static int walk_memory_regions_1(struct walk_memory_regions_data *data, 2361 target_ulong base, int level, void **lp) 2362 { 2363 target_ulong pa; 2364 int i, rc; 2365 2366 if (*lp == NULL) { 2367 return walk_memory_regions_end(data, base, 0); 2368 } 2369 2370 if (level == 0) { 2371 PageDesc *pd = *lp; 2372 2373 for (i = 0; i < V_L2_SIZE; ++i) { 2374 int prot = pd[i].flags; 2375 2376 pa = base | (i << TARGET_PAGE_BITS); 2377 if (prot != data->prot) { 2378 rc = walk_memory_regions_end(data, pa, prot); 2379 if (rc != 0) { 2380 return rc; 2381 } 2382 } 2383 } 2384 } else { 2385 void **pp = *lp; 2386 2387 for (i = 0; i < V_L2_SIZE; ++i) { 2388 pa = base | ((target_ulong)i << 2389 (TARGET_PAGE_BITS + V_L2_BITS * level)); 2390 rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 2391 if (rc != 0) { 2392 return rc; 2393 } 2394 } 2395 } 2396 2397 return 0; 2398 } 2399 2400 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 2401 { 2402 struct walk_memory_regions_data data; 2403 uintptr_t i, l1_sz = v_l1_size; 2404 2405 data.fn = fn; 2406 data.priv = priv; 2407 data.start = -1u; 2408 data.prot = 0; 2409 2410 for (i = 0; i < l1_sz; i++) { 2411 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); 2412 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); 2413 if (rc != 0) { 2414 return rc; 2415 } 2416 } 2417 2418 return walk_memory_regions_end(&data, 0, 0); 2419 } 2420 2421 static int dump_region(void *priv, target_ulong start, 2422 target_ulong end, unsigned long prot) 2423 { 2424 FILE *f = (FILE *)priv; 2425 2426 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx 2427 " "TARGET_FMT_lx" %c%c%c\n", 2428 start, end, end - start, 2429 ((prot & PAGE_READ) ? 'r' : '-'), 2430 ((prot & PAGE_WRITE) ? 'w' : '-'), 2431 ((prot & PAGE_EXEC) ? 'x' : '-')); 2432 2433 return 0; 2434 } 2435 2436 /* dump memory mappings */ 2437 void page_dump(FILE *f) 2438 { 2439 const int length = sizeof(target_ulong) * 2; 2440 (void) fprintf(f, "%-*s %-*s %-*s %s\n", 2441 length, "start", length, "end", length, "size", "prot"); 2442 walk_memory_regions(f, dump_region); 2443 } 2444 2445 int page_get_flags(target_ulong address) 2446 { 2447 PageDesc *p; 2448 2449 p = page_find(address >> TARGET_PAGE_BITS); 2450 if (!p) { 2451 return 0; 2452 } 2453 return p->flags; 2454 } 2455 2456 /* Modify the flags of a page and invalidate the code if necessary. 2457 The flag PAGE_WRITE_ORG is positioned automatically depending 2458 on PAGE_WRITE. The mmap_lock should already be held. */ 2459 void page_set_flags(target_ulong start, target_ulong end, int flags) 2460 { 2461 target_ulong addr, len; 2462 2463 /* This function should never be called with addresses outside the 2464 guest address space. If this assert fires, it probably indicates 2465 a missing call to h2g_valid. */ 2466 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2467 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2468 #endif 2469 assert(start < end); 2470 assert_memory_lock(); 2471 2472 start = start & TARGET_PAGE_MASK; 2473 end = TARGET_PAGE_ALIGN(end); 2474 2475 if (flags & PAGE_WRITE) { 2476 flags |= PAGE_WRITE_ORG; 2477 } 2478 2479 for (addr = start, len = end - start; 2480 len != 0; 2481 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2482 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2483 2484 /* If the write protection bit is set, then we invalidate 2485 the code inside. */ 2486 if (!(p->flags & PAGE_WRITE) && 2487 (flags & PAGE_WRITE) && 2488 p->first_tb) { 2489 tb_invalidate_phys_page(addr, 0); 2490 } 2491 p->flags = flags; 2492 } 2493 } 2494 2495 int page_check_range(target_ulong start, target_ulong len, int flags) 2496 { 2497 PageDesc *p; 2498 target_ulong end; 2499 target_ulong addr; 2500 2501 /* This function should never be called with addresses outside the 2502 guest address space. If this assert fires, it probably indicates 2503 a missing call to h2g_valid. */ 2504 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2505 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2506 #endif 2507 2508 if (len == 0) { 2509 return 0; 2510 } 2511 if (start + len - 1 < start) { 2512 /* We've wrapped around. */ 2513 return -1; 2514 } 2515 2516 /* must do before we loose bits in the next step */ 2517 end = TARGET_PAGE_ALIGN(start + len); 2518 start = start & TARGET_PAGE_MASK; 2519 2520 for (addr = start, len = end - start; 2521 len != 0; 2522 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2523 p = page_find(addr >> TARGET_PAGE_BITS); 2524 if (!p) { 2525 return -1; 2526 } 2527 if (!(p->flags & PAGE_VALID)) { 2528 return -1; 2529 } 2530 2531 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { 2532 return -1; 2533 } 2534 if (flags & PAGE_WRITE) { 2535 if (!(p->flags & PAGE_WRITE_ORG)) { 2536 return -1; 2537 } 2538 /* unprotect the page if it was put read-only because it 2539 contains translated code */ 2540 if (!(p->flags & PAGE_WRITE)) { 2541 if (!page_unprotect(addr, 0)) { 2542 return -1; 2543 } 2544 } 2545 } 2546 } 2547 return 0; 2548 } 2549 2550 /* called from signal handler: invalidate the code and unprotect the 2551 * page. Return 0 if the fault was not handled, 1 if it was handled, 2552 * and 2 if it was handled but the caller must cause the TB to be 2553 * immediately exited. (We can only return 2 if the 'pc' argument is 2554 * non-zero.) 2555 */ 2556 int page_unprotect(target_ulong address, uintptr_t pc) 2557 { 2558 unsigned int prot; 2559 bool current_tb_invalidated; 2560 PageDesc *p; 2561 target_ulong host_start, host_end, addr; 2562 2563 /* Technically this isn't safe inside a signal handler. However we 2564 know this only ever happens in a synchronous SEGV handler, so in 2565 practice it seems to be ok. */ 2566 mmap_lock(); 2567 2568 p = page_find(address >> TARGET_PAGE_BITS); 2569 if (!p) { 2570 mmap_unlock(); 2571 return 0; 2572 } 2573 2574 /* if the page was really writable, then we change its 2575 protection back to writable */ 2576 if (p->flags & PAGE_WRITE_ORG) { 2577 current_tb_invalidated = false; 2578 if (p->flags & PAGE_WRITE) { 2579 /* If the page is actually marked WRITE then assume this is because 2580 * this thread raced with another one which got here first and 2581 * set the page to PAGE_WRITE and did the TB invalidate for us. 2582 */ 2583 #ifdef TARGET_HAS_PRECISE_SMC 2584 TranslationBlock *current_tb = tcg_tb_lookup(pc); 2585 if (current_tb) { 2586 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; 2587 } 2588 #endif 2589 } else { 2590 host_start = address & qemu_host_page_mask; 2591 host_end = host_start + qemu_host_page_size; 2592 2593 prot = 0; 2594 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) { 2595 p = page_find(addr >> TARGET_PAGE_BITS); 2596 p->flags |= PAGE_WRITE; 2597 prot |= p->flags; 2598 2599 /* and since the content will be modified, we must invalidate 2600 the corresponding translated code. */ 2601 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); 2602 #ifdef CONFIG_USER_ONLY 2603 if (DEBUG_TB_CHECK_GATE) { 2604 tb_invalidate_check(addr); 2605 } 2606 #endif 2607 } 2608 mprotect((void *)g2h(host_start), qemu_host_page_size, 2609 prot & PAGE_BITS); 2610 } 2611 mmap_unlock(); 2612 /* If current TB was invalidated return to main loop */ 2613 return current_tb_invalidated ? 2 : 1; 2614 } 2615 mmap_unlock(); 2616 return 0; 2617 } 2618 #endif /* CONFIG_USER_ONLY */ 2619 2620 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ 2621 void tcg_flush_softmmu_tlb(CPUState *cs) 2622 { 2623 #ifdef CONFIG_SOFTMMU 2624 tlb_flush(cs); 2625 #endif 2626 } 2627