1 /* 2 * Host code generation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 22 #define NO_CPU_IO_DEFS 23 #include "trace.h" 24 #include "disas/disas.h" 25 #include "exec/exec-all.h" 26 #include "tcg/tcg.h" 27 #if defined(CONFIG_USER_ONLY) 28 #include "qemu.h" 29 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 30 #include <sys/param.h> 31 #if __FreeBSD_version >= 700104 32 #define HAVE_KINFO_GETVMMAP 33 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 34 #include <sys/proc.h> 35 #include <machine/profile.h> 36 #define _KERNEL 37 #include <sys/user.h> 38 #undef _KERNEL 39 #undef sigqueue 40 #include <libutil.h> 41 #endif 42 #endif 43 #else 44 #include "exec/ram_addr.h" 45 #endif 46 47 #include "exec/cputlb.h" 48 #include "exec/translate-all.h" 49 #include "exec/translator.h" 50 #include "qemu/bitmap.h" 51 #include "qemu/qemu-print.h" 52 #include "qemu/timer.h" 53 #include "qemu/main-loop.h" 54 #include "qemu/cacheinfo.h" 55 #include "exec/log.h" 56 #include "sysemu/cpus.h" 57 #include "sysemu/cpu-timers.h" 58 #include "sysemu/tcg.h" 59 #include "qapi/error.h" 60 #include "hw/core/tcg-cpu-ops.h" 61 #include "tb-jmp-cache.h" 62 #include "tb-hash.h" 63 #include "tb-context.h" 64 #include "internal.h" 65 66 /* #define DEBUG_TB_INVALIDATE */ 67 /* #define DEBUG_TB_FLUSH */ 68 /* make various TB consistency checks */ 69 /* #define DEBUG_TB_CHECK */ 70 71 #ifdef DEBUG_TB_INVALIDATE 72 #define DEBUG_TB_INVALIDATE_GATE 1 73 #else 74 #define DEBUG_TB_INVALIDATE_GATE 0 75 #endif 76 77 #ifdef DEBUG_TB_FLUSH 78 #define DEBUG_TB_FLUSH_GATE 1 79 #else 80 #define DEBUG_TB_FLUSH_GATE 0 81 #endif 82 83 #if !defined(CONFIG_USER_ONLY) 84 /* TB consistency checks only implemented for usermode emulation. */ 85 #undef DEBUG_TB_CHECK 86 #endif 87 88 #ifdef DEBUG_TB_CHECK 89 #define DEBUG_TB_CHECK_GATE 1 90 #else 91 #define DEBUG_TB_CHECK_GATE 0 92 #endif 93 94 /* Access to the various translations structures need to be serialised via locks 95 * for consistency. 96 * In user-mode emulation access to the memory related structures are protected 97 * with mmap_lock. 98 * In !user-mode we use per-page locks. 99 */ 100 #ifdef CONFIG_SOFTMMU 101 #define assert_memory_lock() 102 #else 103 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) 104 #endif 105 106 typedef struct PageDesc { 107 /* list of TBs intersecting this ram page */ 108 uintptr_t first_tb; 109 #ifdef CONFIG_USER_ONLY 110 unsigned long flags; 111 void *target_data; 112 #endif 113 #ifdef CONFIG_SOFTMMU 114 QemuSpin lock; 115 #endif 116 } PageDesc; 117 118 /** 119 * struct page_entry - page descriptor entry 120 * @pd: pointer to the &struct PageDesc of the page this entry represents 121 * @index: page index of the page 122 * @locked: whether the page is locked 123 * 124 * This struct helps us keep track of the locked state of a page, without 125 * bloating &struct PageDesc. 126 * 127 * A page lock protects accesses to all fields of &struct PageDesc. 128 * 129 * See also: &struct page_collection. 130 */ 131 struct page_entry { 132 PageDesc *pd; 133 tb_page_addr_t index; 134 bool locked; 135 }; 136 137 /** 138 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's) 139 * @tree: Binary search tree (BST) of the pages, with key == page index 140 * @max: Pointer to the page in @tree with the highest page index 141 * 142 * To avoid deadlock we lock pages in ascending order of page index. 143 * When operating on a set of pages, we need to keep track of them so that 144 * we can lock them in order and also unlock them later. For this we collect 145 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the 146 * @tree implementation we use does not provide an O(1) operation to obtain the 147 * highest-ranked element, we use @max to keep track of the inserted page 148 * with the highest index. This is valuable because if a page is not in 149 * the tree and its index is higher than @max's, then we can lock it 150 * without breaking the locking order rule. 151 * 152 * Note on naming: 'struct page_set' would be shorter, but we already have a few 153 * page_set_*() helpers, so page_collection is used instead to avoid confusion. 154 * 155 * See also: page_collection_lock(). 156 */ 157 struct page_collection { 158 GTree *tree; 159 struct page_entry *max; 160 }; 161 162 /* list iterators for lists of tagged pointers in TranslationBlock */ 163 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \ 164 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ 165 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ 166 tb = (TranslationBlock *)((uintptr_t)tb & ~1)) 167 168 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ 169 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) 170 171 #define TB_FOR_EACH_JMP(head_tb, tb, n) \ 172 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) 173 174 /* 175 * In system mode we want L1_MAP to be based on ram offsets, 176 * while in user mode we want it to be based on virtual addresses. 177 * 178 * TODO: For user mode, see the caveat re host vs guest virtual 179 * address spaces near GUEST_ADDR_MAX. 180 */ 181 #if !defined(CONFIG_USER_ONLY) 182 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 183 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 184 #else 185 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 186 #endif 187 #else 188 # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS) 189 #endif 190 191 /* Size of the L2 (and L3, etc) page tables. */ 192 #define V_L2_BITS 10 193 #define V_L2_SIZE (1 << V_L2_BITS) 194 195 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ 196 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > 197 sizeof_field(TranslationBlock, trace_vcpu_dstate) 198 * BITS_PER_BYTE); 199 200 /* 201 * L1 Mapping properties 202 */ 203 static int v_l1_size; 204 static int v_l1_shift; 205 static int v_l2_levels; 206 207 /* The bottom level has pointers to PageDesc, and is indexed by 208 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. 209 */ 210 #define V_L1_MIN_BITS 4 211 #define V_L1_MAX_BITS (V_L2_BITS + 3) 212 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) 213 214 static void *l1_map[V_L1_MAX_SIZE]; 215 216 TBContext tb_ctx; 217 218 static void page_table_config_init(void) 219 { 220 uint32_t v_l1_bits; 221 222 assert(TARGET_PAGE_BITS); 223 /* The bits remaining after N lower levels of page tables. */ 224 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; 225 if (v_l1_bits < V_L1_MIN_BITS) { 226 v_l1_bits += V_L2_BITS; 227 } 228 229 v_l1_size = 1 << v_l1_bits; 230 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; 231 v_l2_levels = v_l1_shift / V_L2_BITS - 1; 232 233 assert(v_l1_bits <= V_L1_MAX_BITS); 234 assert(v_l1_shift % V_L2_BITS == 0); 235 assert(v_l2_levels >= 0); 236 } 237 238 /* Encode VAL as a signed leb128 sequence at P. 239 Return P incremented past the encoded value. */ 240 static uint8_t *encode_sleb128(uint8_t *p, target_long val) 241 { 242 int more, byte; 243 244 do { 245 byte = val & 0x7f; 246 val >>= 7; 247 more = !((val == 0 && (byte & 0x40) == 0) 248 || (val == -1 && (byte & 0x40) != 0)); 249 if (more) { 250 byte |= 0x80; 251 } 252 *p++ = byte; 253 } while (more); 254 255 return p; 256 } 257 258 /* Decode a signed leb128 sequence at *PP; increment *PP past the 259 decoded value. Return the decoded value. */ 260 static target_long decode_sleb128(const uint8_t **pp) 261 { 262 const uint8_t *p = *pp; 263 target_long val = 0; 264 int byte, shift = 0; 265 266 do { 267 byte = *p++; 268 val |= (target_ulong)(byte & 0x7f) << shift; 269 shift += 7; 270 } while (byte & 0x80); 271 if (shift < TARGET_LONG_BITS && (byte & 0x40)) { 272 val |= -(target_ulong)1 << shift; 273 } 274 275 *pp = p; 276 return val; 277 } 278 279 /* Encode the data collected about the instructions while compiling TB. 280 Place the data at BLOCK, and return the number of bytes consumed. 281 282 The logical table consists of TARGET_INSN_START_WORDS target_ulong's, 283 which come from the target's insn_start data, followed by a uintptr_t 284 which comes from the host pc of the end of the code implementing the insn. 285 286 Each line of the table is encoded as sleb128 deltas from the previous 287 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. 288 That is, the first column is seeded with the guest pc, the last column 289 with the host pc, and the middle columns with zeros. */ 290 291 static int encode_search(TranslationBlock *tb, uint8_t *block) 292 { 293 uint8_t *highwater = tcg_ctx->code_gen_highwater; 294 uint8_t *p = block; 295 int i, j, n; 296 297 for (i = 0, n = tb->icount; i < n; ++i) { 298 target_ulong prev; 299 300 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 301 if (i == 0) { 302 prev = (j == 0 ? tb_pc(tb) : 0); 303 } else { 304 prev = tcg_ctx->gen_insn_data[i - 1][j]; 305 } 306 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); 307 } 308 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); 309 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); 310 311 /* Test for (pending) buffer overflow. The assumption is that any 312 one row beginning below the high water mark cannot overrun 313 the buffer completely. Thus we can test for overflow after 314 encoding a row without having to check during encoding. */ 315 if (unlikely(p > highwater)) { 316 return -1; 317 } 318 } 319 320 return p - block; 321 } 322 323 /* The cpu state corresponding to 'searched_pc' is restored. 324 * When reset_icount is true, current TB will be interrupted and 325 * icount should be recalculated. 326 */ 327 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 328 uintptr_t searched_pc, bool reset_icount) 329 { 330 target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) }; 331 uintptr_t host_pc = (uintptr_t)tb->tc.ptr; 332 CPUArchState *env = cpu->env_ptr; 333 const uint8_t *p = tb->tc.ptr + tb->tc.size; 334 int i, j, num_insns = tb->icount; 335 #ifdef CONFIG_PROFILER 336 TCGProfile *prof = &tcg_ctx->prof; 337 int64_t ti = profile_getclock(); 338 #endif 339 340 searched_pc -= GETPC_ADJ; 341 342 if (searched_pc < host_pc) { 343 return -1; 344 } 345 346 /* Reconstruct the stored insn data while looking for the point at 347 which the end of the insn exceeds the searched_pc. */ 348 for (i = 0; i < num_insns; ++i) { 349 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 350 data[j] += decode_sleb128(&p); 351 } 352 host_pc += decode_sleb128(&p); 353 if (host_pc > searched_pc) { 354 goto found; 355 } 356 } 357 return -1; 358 359 found: 360 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { 361 assert(icount_enabled()); 362 /* Reset the cycle counter to the start of the block 363 and shift if to the number of actually executed instructions */ 364 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; 365 } 366 restore_state_to_opc(env, tb, data); 367 368 #ifdef CONFIG_PROFILER 369 qatomic_set(&prof->restore_time, 370 prof->restore_time + profile_getclock() - ti); 371 qatomic_set(&prof->restore_count, prof->restore_count + 1); 372 #endif 373 return 0; 374 } 375 376 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) 377 { 378 /* 379 * The host_pc has to be in the rx region of the code buffer. 380 * If it is not we will not be able to resolve it here. 381 * The two cases where host_pc will not be correct are: 382 * 383 * - fault during translation (instruction fetch) 384 * - fault from helper (not using GETPC() macro) 385 * 386 * Either way we need return early as we can't resolve it here. 387 */ 388 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { 389 TranslationBlock *tb = tcg_tb_lookup(host_pc); 390 if (tb) { 391 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); 392 return true; 393 } 394 } 395 return false; 396 } 397 398 void page_init(void) 399 { 400 page_size_init(); 401 page_table_config_init(); 402 403 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 404 { 405 #ifdef HAVE_KINFO_GETVMMAP 406 struct kinfo_vmentry *freep; 407 int i, cnt; 408 409 freep = kinfo_getvmmap(getpid(), &cnt); 410 if (freep) { 411 mmap_lock(); 412 for (i = 0; i < cnt; i++) { 413 unsigned long startaddr, endaddr; 414 415 startaddr = freep[i].kve_start; 416 endaddr = freep[i].kve_end; 417 if (h2g_valid(startaddr)) { 418 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 419 420 if (h2g_valid(endaddr)) { 421 endaddr = h2g(endaddr); 422 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 423 } else { 424 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 425 endaddr = ~0ul; 426 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 427 #endif 428 } 429 } 430 } 431 free(freep); 432 mmap_unlock(); 433 } 434 #else 435 FILE *f; 436 437 last_brk = (unsigned long)sbrk(0); 438 439 f = fopen("/compat/linux/proc/self/maps", "r"); 440 if (f) { 441 mmap_lock(); 442 443 do { 444 unsigned long startaddr, endaddr; 445 int n; 446 447 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 448 449 if (n == 2 && h2g_valid(startaddr)) { 450 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 451 452 if (h2g_valid(endaddr)) { 453 endaddr = h2g(endaddr); 454 } else { 455 endaddr = ~0ul; 456 } 457 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 458 } 459 } while (!feof(f)); 460 461 fclose(f); 462 mmap_unlock(); 463 } 464 #endif 465 } 466 #endif 467 } 468 469 static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc) 470 { 471 PageDesc *pd; 472 void **lp; 473 int i; 474 475 /* Level 1. Always allocated. */ 476 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); 477 478 /* Level 2..N-1. */ 479 for (i = v_l2_levels; i > 0; i--) { 480 void **p = qatomic_rcu_read(lp); 481 482 if (p == NULL) { 483 void *existing; 484 485 if (!alloc) { 486 return NULL; 487 } 488 p = g_new0(void *, V_L2_SIZE); 489 existing = qatomic_cmpxchg(lp, NULL, p); 490 if (unlikely(existing)) { 491 g_free(p); 492 p = existing; 493 } 494 } 495 496 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); 497 } 498 499 pd = qatomic_rcu_read(lp); 500 if (pd == NULL) { 501 void *existing; 502 503 if (!alloc) { 504 return NULL; 505 } 506 pd = g_new0(PageDesc, V_L2_SIZE); 507 #ifndef CONFIG_USER_ONLY 508 { 509 int i; 510 511 for (i = 0; i < V_L2_SIZE; i++) { 512 qemu_spin_init(&pd[i].lock); 513 } 514 } 515 #endif 516 existing = qatomic_cmpxchg(lp, NULL, pd); 517 if (unlikely(existing)) { 518 #ifndef CONFIG_USER_ONLY 519 { 520 int i; 521 522 for (i = 0; i < V_L2_SIZE; i++) { 523 qemu_spin_destroy(&pd[i].lock); 524 } 525 } 526 #endif 527 g_free(pd); 528 pd = existing; 529 } 530 } 531 532 return pd + (index & (V_L2_SIZE - 1)); 533 } 534 535 static inline PageDesc *page_find(tb_page_addr_t index) 536 { 537 return page_find_alloc(index, false); 538 } 539 540 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, 541 PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc); 542 543 /* In user-mode page locks aren't used; mmap_lock is enough */ 544 #ifdef CONFIG_USER_ONLY 545 546 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) 547 548 static inline void page_lock(PageDesc *pd) 549 { } 550 551 static inline void page_unlock(PageDesc *pd) 552 { } 553 554 static inline void page_lock_tb(const TranslationBlock *tb) 555 { } 556 557 static inline void page_unlock_tb(const TranslationBlock *tb) 558 { } 559 560 struct page_collection * 561 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) 562 { 563 return NULL; 564 } 565 566 void page_collection_unlock(struct page_collection *set) 567 { } 568 #else /* !CONFIG_USER_ONLY */ 569 570 #ifdef CONFIG_DEBUG_TCG 571 572 static __thread GHashTable *ht_pages_locked_debug; 573 574 static void ht_pages_locked_debug_init(void) 575 { 576 if (ht_pages_locked_debug) { 577 return; 578 } 579 ht_pages_locked_debug = g_hash_table_new(NULL, NULL); 580 } 581 582 static bool page_is_locked(const PageDesc *pd) 583 { 584 PageDesc *found; 585 586 ht_pages_locked_debug_init(); 587 found = g_hash_table_lookup(ht_pages_locked_debug, pd); 588 return !!found; 589 } 590 591 static void page_lock__debug(PageDesc *pd) 592 { 593 ht_pages_locked_debug_init(); 594 g_assert(!page_is_locked(pd)); 595 g_hash_table_insert(ht_pages_locked_debug, pd, pd); 596 } 597 598 static void page_unlock__debug(const PageDesc *pd) 599 { 600 bool removed; 601 602 ht_pages_locked_debug_init(); 603 g_assert(page_is_locked(pd)); 604 removed = g_hash_table_remove(ht_pages_locked_debug, pd); 605 g_assert(removed); 606 } 607 608 static void 609 do_assert_page_locked(const PageDesc *pd, const char *file, int line) 610 { 611 if (unlikely(!page_is_locked(pd))) { 612 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", 613 pd, file, line); 614 abort(); 615 } 616 } 617 618 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) 619 620 void assert_no_pages_locked(void) 621 { 622 ht_pages_locked_debug_init(); 623 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); 624 } 625 626 #else /* !CONFIG_DEBUG_TCG */ 627 628 #define assert_page_locked(pd) 629 630 static inline void page_lock__debug(const PageDesc *pd) 631 { 632 } 633 634 static inline void page_unlock__debug(const PageDesc *pd) 635 { 636 } 637 638 #endif /* CONFIG_DEBUG_TCG */ 639 640 static inline void page_lock(PageDesc *pd) 641 { 642 page_lock__debug(pd); 643 qemu_spin_lock(&pd->lock); 644 } 645 646 static inline void page_unlock(PageDesc *pd) 647 { 648 qemu_spin_unlock(&pd->lock); 649 page_unlock__debug(pd); 650 } 651 652 /* lock the page(s) of a TB in the correct acquisition order */ 653 static inline void page_lock_tb(const TranslationBlock *tb) 654 { 655 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false); 656 } 657 658 static inline void page_unlock_tb(const TranslationBlock *tb) 659 { 660 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 661 662 page_unlock(p1); 663 if (unlikely(tb->page_addr[1] != -1)) { 664 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 665 666 if (p2 != p1) { 667 page_unlock(p2); 668 } 669 } 670 } 671 672 static inline struct page_entry * 673 page_entry_new(PageDesc *pd, tb_page_addr_t index) 674 { 675 struct page_entry *pe = g_malloc(sizeof(*pe)); 676 677 pe->index = index; 678 pe->pd = pd; 679 pe->locked = false; 680 return pe; 681 } 682 683 static void page_entry_destroy(gpointer p) 684 { 685 struct page_entry *pe = p; 686 687 g_assert(pe->locked); 688 page_unlock(pe->pd); 689 g_free(pe); 690 } 691 692 /* returns false on success */ 693 static bool page_entry_trylock(struct page_entry *pe) 694 { 695 bool busy; 696 697 busy = qemu_spin_trylock(&pe->pd->lock); 698 if (!busy) { 699 g_assert(!pe->locked); 700 pe->locked = true; 701 page_lock__debug(pe->pd); 702 } 703 return busy; 704 } 705 706 static void do_page_entry_lock(struct page_entry *pe) 707 { 708 page_lock(pe->pd); 709 g_assert(!pe->locked); 710 pe->locked = true; 711 } 712 713 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) 714 { 715 struct page_entry *pe = value; 716 717 do_page_entry_lock(pe); 718 return FALSE; 719 } 720 721 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) 722 { 723 struct page_entry *pe = value; 724 725 if (pe->locked) { 726 pe->locked = false; 727 page_unlock(pe->pd); 728 } 729 return FALSE; 730 } 731 732 /* 733 * Trylock a page, and if successful, add the page to a collection. 734 * Returns true ("busy") if the page could not be locked; false otherwise. 735 */ 736 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) 737 { 738 tb_page_addr_t index = addr >> TARGET_PAGE_BITS; 739 struct page_entry *pe; 740 PageDesc *pd; 741 742 pe = g_tree_lookup(set->tree, &index); 743 if (pe) { 744 return false; 745 } 746 747 pd = page_find(index); 748 if (pd == NULL) { 749 return false; 750 } 751 752 pe = page_entry_new(pd, index); 753 g_tree_insert(set->tree, &pe->index, pe); 754 755 /* 756 * If this is either (1) the first insertion or (2) a page whose index 757 * is higher than any other so far, just lock the page and move on. 758 */ 759 if (set->max == NULL || pe->index > set->max->index) { 760 set->max = pe; 761 do_page_entry_lock(pe); 762 return false; 763 } 764 /* 765 * Try to acquire out-of-order lock; if busy, return busy so that we acquire 766 * locks in order. 767 */ 768 return page_entry_trylock(pe); 769 } 770 771 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) 772 { 773 tb_page_addr_t a = *(const tb_page_addr_t *)ap; 774 tb_page_addr_t b = *(const tb_page_addr_t *)bp; 775 776 if (a == b) { 777 return 0; 778 } else if (a < b) { 779 return -1; 780 } 781 return 1; 782 } 783 784 /* 785 * Lock a range of pages ([@start,@end[) as well as the pages of all 786 * intersecting TBs. 787 * Locking order: acquire locks in ascending order of page index. 788 */ 789 struct page_collection * 790 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) 791 { 792 struct page_collection *set = g_malloc(sizeof(*set)); 793 tb_page_addr_t index; 794 PageDesc *pd; 795 796 start >>= TARGET_PAGE_BITS; 797 end >>= TARGET_PAGE_BITS; 798 g_assert(start <= end); 799 800 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, 801 page_entry_destroy); 802 set->max = NULL; 803 assert_no_pages_locked(); 804 805 retry: 806 g_tree_foreach(set->tree, page_entry_lock, NULL); 807 808 for (index = start; index <= end; index++) { 809 TranslationBlock *tb; 810 int n; 811 812 pd = page_find(index); 813 if (pd == NULL) { 814 continue; 815 } 816 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { 817 g_tree_foreach(set->tree, page_entry_unlock, NULL); 818 goto retry; 819 } 820 assert_page_locked(pd); 821 PAGE_FOR_EACH_TB(pd, tb, n) { 822 if (page_trylock_add(set, tb->page_addr[0]) || 823 (tb->page_addr[1] != -1 && 824 page_trylock_add(set, tb->page_addr[1]))) { 825 /* drop all locks, and reacquire in order */ 826 g_tree_foreach(set->tree, page_entry_unlock, NULL); 827 goto retry; 828 } 829 } 830 } 831 return set; 832 } 833 834 void page_collection_unlock(struct page_collection *set) 835 { 836 /* entries are unlocked and freed via page_entry_destroy */ 837 g_tree_destroy(set->tree); 838 g_free(set); 839 } 840 841 #endif /* !CONFIG_USER_ONLY */ 842 843 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, 844 PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc) 845 { 846 PageDesc *p1, *p2; 847 tb_page_addr_t page1; 848 tb_page_addr_t page2; 849 850 assert_memory_lock(); 851 g_assert(phys1 != -1); 852 853 page1 = phys1 >> TARGET_PAGE_BITS; 854 page2 = phys2 >> TARGET_PAGE_BITS; 855 856 p1 = page_find_alloc(page1, alloc); 857 if (ret_p1) { 858 *ret_p1 = p1; 859 } 860 if (likely(phys2 == -1)) { 861 page_lock(p1); 862 return; 863 } else if (page1 == page2) { 864 page_lock(p1); 865 if (ret_p2) { 866 *ret_p2 = p1; 867 } 868 return; 869 } 870 p2 = page_find_alloc(page2, alloc); 871 if (ret_p2) { 872 *ret_p2 = p2; 873 } 874 if (page1 < page2) { 875 page_lock(p1); 876 page_lock(p2); 877 } else { 878 page_lock(p2); 879 page_lock(p1); 880 } 881 } 882 883 static bool tb_cmp(const void *ap, const void *bp) 884 { 885 const TranslationBlock *a = ap; 886 const TranslationBlock *b = bp; 887 888 return tb_pc(a) == tb_pc(b) && 889 a->cs_base == b->cs_base && 890 a->flags == b->flags && 891 (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && 892 a->trace_vcpu_dstate == b->trace_vcpu_dstate && 893 a->page_addr[0] == b->page_addr[0] && 894 a->page_addr[1] == b->page_addr[1]; 895 } 896 897 void tb_htable_init(void) 898 { 899 unsigned int mode = QHT_MODE_AUTO_RESIZE; 900 901 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); 902 } 903 904 /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 905 static void page_flush_tb_1(int level, void **lp) 906 { 907 int i; 908 909 if (*lp == NULL) { 910 return; 911 } 912 if (level == 0) { 913 PageDesc *pd = *lp; 914 915 for (i = 0; i < V_L2_SIZE; ++i) { 916 page_lock(&pd[i]); 917 pd[i].first_tb = (uintptr_t)NULL; 918 page_unlock(&pd[i]); 919 } 920 } else { 921 void **pp = *lp; 922 923 for (i = 0; i < V_L2_SIZE; ++i) { 924 page_flush_tb_1(level - 1, pp + i); 925 } 926 } 927 } 928 929 static void page_flush_tb(void) 930 { 931 int i, l1_sz = v_l1_size; 932 933 for (i = 0; i < l1_sz; i++) { 934 page_flush_tb_1(v_l2_levels, l1_map + i); 935 } 936 } 937 938 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) 939 { 940 const TranslationBlock *tb = value; 941 size_t *size = data; 942 943 *size += tb->tc.size; 944 return false; 945 } 946 947 /* flush all the translation blocks */ 948 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) 949 { 950 bool did_flush = false; 951 952 mmap_lock(); 953 /* If it is already been done on request of another CPU, 954 * just retry. 955 */ 956 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { 957 goto done; 958 } 959 did_flush = true; 960 961 if (DEBUG_TB_FLUSH_GATE) { 962 size_t nb_tbs = tcg_nb_tbs(); 963 size_t host_size = 0; 964 965 tcg_tb_foreach(tb_host_size_iter, &host_size); 966 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", 967 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); 968 } 969 970 CPU_FOREACH(cpu) { 971 tcg_flush_jmp_cache(cpu); 972 } 973 974 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); 975 page_flush_tb(); 976 977 tcg_region_reset_all(); 978 /* XXX: flush processor icache at this point if cache flush is 979 expensive */ 980 qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); 981 982 done: 983 mmap_unlock(); 984 if (did_flush) { 985 qemu_plugin_flush_cb(); 986 } 987 } 988 989 void tb_flush(CPUState *cpu) 990 { 991 if (tcg_enabled()) { 992 unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); 993 994 if (cpu_in_exclusive_context(cpu)) { 995 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); 996 } else { 997 async_safe_run_on_cpu(cpu, do_tb_flush, 998 RUN_ON_CPU_HOST_INT(tb_flush_count)); 999 } 1000 } 1001 } 1002 1003 /* 1004 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, 1005 * so in order to prevent bit rot we compile them unconditionally in user-mode, 1006 * and let the optimizer get rid of them by wrapping their user-only callers 1007 * with if (DEBUG_TB_CHECK_GATE). 1008 */ 1009 #ifdef CONFIG_USER_ONLY 1010 1011 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) 1012 { 1013 TranslationBlock *tb = p; 1014 target_ulong addr = *(target_ulong *)userp; 1015 1016 if (!(addr + TARGET_PAGE_SIZE <= tb_pc(tb) || 1017 addr >= tb_pc(tb) + tb->size)) { 1018 printf("ERROR invalidate: address=" TARGET_FMT_lx 1019 " PC=%08lx size=%04x\n", addr, (long)tb_pc(tb), tb->size); 1020 } 1021 } 1022 1023 /* verify that all the pages have correct rights for code 1024 * 1025 * Called with mmap_lock held. 1026 */ 1027 static void tb_invalidate_check(target_ulong address) 1028 { 1029 address &= TARGET_PAGE_MASK; 1030 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); 1031 } 1032 1033 static void do_tb_page_check(void *p, uint32_t hash, void *userp) 1034 { 1035 TranslationBlock *tb = p; 1036 int flags1, flags2; 1037 1038 flags1 = page_get_flags(tb_pc(tb)); 1039 flags2 = page_get_flags(tb_pc(tb) + tb->size - 1); 1040 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 1041 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 1042 (long)tb_pc(tb), tb->size, flags1, flags2); 1043 } 1044 } 1045 1046 /* verify that all the pages have correct rights for code */ 1047 static void tb_page_check(void) 1048 { 1049 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); 1050 } 1051 1052 #endif /* CONFIG_USER_ONLY */ 1053 1054 /* 1055 * user-mode: call with mmap_lock held 1056 * !user-mode: call with @pd->lock held 1057 */ 1058 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) 1059 { 1060 TranslationBlock *tb1; 1061 uintptr_t *pprev; 1062 unsigned int n1; 1063 1064 assert_page_locked(pd); 1065 pprev = &pd->first_tb; 1066 PAGE_FOR_EACH_TB(pd, tb1, n1) { 1067 if (tb1 == tb) { 1068 *pprev = tb1->page_next[n1]; 1069 return; 1070 } 1071 pprev = &tb1->page_next[n1]; 1072 } 1073 g_assert_not_reached(); 1074 } 1075 1076 /* remove @orig from its @n_orig-th jump list */ 1077 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) 1078 { 1079 uintptr_t ptr, ptr_locked; 1080 TranslationBlock *dest; 1081 TranslationBlock *tb; 1082 uintptr_t *pprev; 1083 int n; 1084 1085 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ 1086 ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); 1087 dest = (TranslationBlock *)(ptr & ~1); 1088 if (dest == NULL) { 1089 return; 1090 } 1091 1092 qemu_spin_lock(&dest->jmp_lock); 1093 /* 1094 * While acquiring the lock, the jump might have been removed if the 1095 * destination TB was invalidated; check again. 1096 */ 1097 ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); 1098 if (ptr_locked != ptr) { 1099 qemu_spin_unlock(&dest->jmp_lock); 1100 /* 1101 * The only possibility is that the jump was unlinked via 1102 * tb_jump_unlink(dest). Seeing here another destination would be a bug, 1103 * because we set the LSB above. 1104 */ 1105 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); 1106 return; 1107 } 1108 /* 1109 * We first acquired the lock, and since the destination pointer matches, 1110 * we know for sure that @orig is in the jmp list. 1111 */ 1112 pprev = &dest->jmp_list_head; 1113 TB_FOR_EACH_JMP(dest, tb, n) { 1114 if (tb == orig && n == n_orig) { 1115 *pprev = tb->jmp_list_next[n]; 1116 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ 1117 qemu_spin_unlock(&dest->jmp_lock); 1118 return; 1119 } 1120 pprev = &tb->jmp_list_next[n]; 1121 } 1122 g_assert_not_reached(); 1123 } 1124 1125 /* reset the jump entry 'n' of a TB so that it is not chained to 1126 another TB */ 1127 static inline void tb_reset_jump(TranslationBlock *tb, int n) 1128 { 1129 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); 1130 tb_set_jmp_target(tb, n, addr); 1131 } 1132 1133 /* remove any jumps to the TB */ 1134 static inline void tb_jmp_unlink(TranslationBlock *dest) 1135 { 1136 TranslationBlock *tb; 1137 int n; 1138 1139 qemu_spin_lock(&dest->jmp_lock); 1140 1141 TB_FOR_EACH_JMP(dest, tb, n) { 1142 tb_reset_jump(tb, n); 1143 qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); 1144 /* No need to clear the list entry; setting the dest ptr is enough */ 1145 } 1146 dest->jmp_list_head = (uintptr_t)NULL; 1147 1148 qemu_spin_unlock(&dest->jmp_lock); 1149 } 1150 1151 /* 1152 * In user-mode, call with mmap_lock held. 1153 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' 1154 * locks held. 1155 */ 1156 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) 1157 { 1158 CPUState *cpu; 1159 PageDesc *p; 1160 uint32_t h; 1161 tb_page_addr_t phys_pc; 1162 uint32_t orig_cflags = tb_cflags(tb); 1163 1164 assert_memory_lock(); 1165 1166 /* make sure no further incoming jumps will be chained to this TB */ 1167 qemu_spin_lock(&tb->jmp_lock); 1168 qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); 1169 qemu_spin_unlock(&tb->jmp_lock); 1170 1171 /* remove the TB from the hash list */ 1172 phys_pc = tb->page_addr[0]; 1173 h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags, 1174 tb->trace_vcpu_dstate); 1175 if (!qht_remove(&tb_ctx.htable, tb, h)) { 1176 return; 1177 } 1178 1179 /* remove the TB from the page list */ 1180 if (rm_from_page_list) { 1181 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 1182 tb_page_remove(p, tb); 1183 if (tb->page_addr[1] != -1) { 1184 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 1185 tb_page_remove(p, tb); 1186 } 1187 } 1188 1189 /* remove the TB from the hash list */ 1190 h = tb_jmp_cache_hash_func(tb->pc); 1191 CPU_FOREACH(cpu) { 1192 CPUJumpCache *jc = cpu->tb_jmp_cache; 1193 if (qatomic_read(&jc->array[h].tb) == tb) { 1194 qatomic_set(&jc->array[h].tb, NULL); 1195 } 1196 } 1197 1198 /* suppress this TB from the two jump lists */ 1199 tb_remove_from_jmp_list(tb, 0); 1200 tb_remove_from_jmp_list(tb, 1); 1201 1202 /* suppress any remaining jumps to this TB */ 1203 tb_jmp_unlink(tb); 1204 1205 qatomic_set(&tb_ctx.tb_phys_invalidate_count, 1206 tb_ctx.tb_phys_invalidate_count + 1); 1207 } 1208 1209 static void tb_phys_invalidate__locked(TranslationBlock *tb) 1210 { 1211 qemu_thread_jit_write(); 1212 do_tb_phys_invalidate(tb, true); 1213 qemu_thread_jit_execute(); 1214 } 1215 1216 /* invalidate one TB 1217 * 1218 * Called with mmap_lock held in user-mode. 1219 */ 1220 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 1221 { 1222 if (page_addr == -1 && tb->page_addr[0] != -1) { 1223 page_lock_tb(tb); 1224 do_tb_phys_invalidate(tb, true); 1225 page_unlock_tb(tb); 1226 } else { 1227 do_tb_phys_invalidate(tb, false); 1228 } 1229 } 1230 1231 /* add the tb in the target page and protect it if necessary 1232 * 1233 * Called with mmap_lock held for user-mode emulation. 1234 * Called with @p->lock held in !user-mode. 1235 */ 1236 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, 1237 unsigned int n, tb_page_addr_t page_addr) 1238 { 1239 #ifndef CONFIG_USER_ONLY 1240 bool page_already_protected; 1241 #endif 1242 1243 assert_page_locked(p); 1244 1245 tb->page_addr[n] = page_addr; 1246 tb->page_next[n] = p->first_tb; 1247 #ifndef CONFIG_USER_ONLY 1248 page_already_protected = p->first_tb != (uintptr_t)NULL; 1249 #endif 1250 p->first_tb = (uintptr_t)tb | n; 1251 1252 #if defined(CONFIG_USER_ONLY) 1253 /* translator_loop() must have made all TB pages non-writable */ 1254 assert(!(p->flags & PAGE_WRITE)); 1255 #else 1256 /* if some code is already present, then the pages are already 1257 protected. So we handle the case where only the first TB is 1258 allocated in a physical page */ 1259 if (!page_already_protected) { 1260 tlb_protect_code(page_addr); 1261 } 1262 #endif 1263 } 1264 1265 /* 1266 * Add a new TB and link it to the physical page tables. phys_page2 is 1267 * (-1) to indicate that only one page contains the TB. 1268 * 1269 * Called with mmap_lock held for user-mode emulation. 1270 * 1271 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. 1272 * Note that in !user-mode, another thread might have already added a TB 1273 * for the same block of guest code that @tb corresponds to. In that case, 1274 * the caller should discard the original @tb, and use instead the returned TB. 1275 */ 1276 static TranslationBlock * 1277 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, 1278 tb_page_addr_t phys_page2) 1279 { 1280 PageDesc *p; 1281 PageDesc *p2 = NULL; 1282 void *existing_tb = NULL; 1283 uint32_t h; 1284 1285 assert_memory_lock(); 1286 tcg_debug_assert(!(tb->cflags & CF_INVALID)); 1287 1288 /* 1289 * Add the TB to the page list, acquiring first the pages's locks. 1290 * We keep the locks held until after inserting the TB in the hash table, 1291 * so that if the insertion fails we know for sure that the TBs are still 1292 * in the page descriptors. 1293 * Note that inserting into the hash table first isn't an option, since 1294 * we can only insert TBs that are fully initialized. 1295 */ 1296 page_lock_pair(&p, phys_pc, &p2, phys_page2, true); 1297 tb_page_add(p, tb, 0, phys_pc); 1298 if (p2) { 1299 tb_page_add(p2, tb, 1, phys_page2); 1300 } else { 1301 tb->page_addr[1] = -1; 1302 } 1303 1304 /* add in the hash table */ 1305 h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags, 1306 tb->trace_vcpu_dstate); 1307 qht_insert(&tb_ctx.htable, tb, h, &existing_tb); 1308 1309 /* remove TB from the page(s) if we couldn't insert it */ 1310 if (unlikely(existing_tb)) { 1311 tb_page_remove(p, tb); 1312 if (p2) { 1313 tb_page_remove(p2, tb); 1314 } 1315 tb = existing_tb; 1316 } 1317 1318 if (p2 && p2 != p) { 1319 page_unlock(p2); 1320 } 1321 page_unlock(p); 1322 1323 #ifdef CONFIG_USER_ONLY 1324 if (DEBUG_TB_CHECK_GATE) { 1325 tb_page_check(); 1326 } 1327 #endif 1328 return tb; 1329 } 1330 1331 /* Called with mmap_lock held for user mode emulation. */ 1332 TranslationBlock *tb_gen_code(CPUState *cpu, 1333 target_ulong pc, target_ulong cs_base, 1334 uint32_t flags, int cflags) 1335 { 1336 CPUArchState *env = cpu->env_ptr; 1337 TranslationBlock *tb, *existing_tb; 1338 tb_page_addr_t phys_pc; 1339 tcg_insn_unit *gen_code_buf; 1340 int gen_code_size, search_size, max_insns; 1341 #ifdef CONFIG_PROFILER 1342 TCGProfile *prof = &tcg_ctx->prof; 1343 int64_t ti; 1344 #endif 1345 void *host_pc; 1346 1347 assert_memory_lock(); 1348 qemu_thread_jit_write(); 1349 1350 phys_pc = get_page_addr_code_hostp(env, pc, &host_pc); 1351 1352 if (phys_pc == -1) { 1353 /* Generate a one-shot TB with 1 insn in it */ 1354 cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1; 1355 } 1356 1357 max_insns = cflags & CF_COUNT_MASK; 1358 if (max_insns == 0) { 1359 max_insns = TCG_MAX_INSNS; 1360 } 1361 QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS); 1362 1363 buffer_overflow: 1364 tb = tcg_tb_alloc(tcg_ctx); 1365 if (unlikely(!tb)) { 1366 /* flush must be done */ 1367 tb_flush(cpu); 1368 mmap_unlock(); 1369 /* Make the execution loop process the flush as soon as possible. */ 1370 cpu->exception_index = EXCP_INTERRUPT; 1371 cpu_loop_exit(cpu); 1372 } 1373 1374 gen_code_buf = tcg_ctx->code_gen_ptr; 1375 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); 1376 tb->pc = pc; 1377 tb->cs_base = cs_base; 1378 tb->flags = flags; 1379 tb->cflags = cflags; 1380 tb->trace_vcpu_dstate = *cpu->trace_dstate; 1381 tb->page_addr[0] = phys_pc; 1382 tb->page_addr[1] = -1; 1383 tcg_ctx->tb_cflags = cflags; 1384 tb_overflow: 1385 1386 #ifdef CONFIG_PROFILER 1387 /* includes aborted translations because of exceptions */ 1388 qatomic_set(&prof->tb_count1, prof->tb_count1 + 1); 1389 ti = profile_getclock(); 1390 #endif 1391 1392 gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0); 1393 if (unlikely(gen_code_size != 0)) { 1394 goto error_return; 1395 } 1396 1397 tcg_func_start(tcg_ctx); 1398 1399 tcg_ctx->cpu = env_cpu(env); 1400 gen_intermediate_code(cpu, tb, max_insns, pc, host_pc); 1401 assert(tb->size != 0); 1402 tcg_ctx->cpu = NULL; 1403 max_insns = tb->icount; 1404 1405 trace_translate_block(tb, pc, tb->tc.ptr); 1406 1407 /* generate machine code */ 1408 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; 1409 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; 1410 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; 1411 if (TCG_TARGET_HAS_direct_jump) { 1412 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; 1413 tcg_ctx->tb_jmp_target_addr = NULL; 1414 } else { 1415 tcg_ctx->tb_jmp_insn_offset = NULL; 1416 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; 1417 } 1418 1419 #ifdef CONFIG_PROFILER 1420 qatomic_set(&prof->tb_count, prof->tb_count + 1); 1421 qatomic_set(&prof->interm_time, 1422 prof->interm_time + profile_getclock() - ti); 1423 ti = profile_getclock(); 1424 #endif 1425 1426 gen_code_size = tcg_gen_code(tcg_ctx, tb, pc); 1427 if (unlikely(gen_code_size < 0)) { 1428 error_return: 1429 switch (gen_code_size) { 1430 case -1: 1431 /* 1432 * Overflow of code_gen_buffer, or the current slice of it. 1433 * 1434 * TODO: We don't need to re-do gen_intermediate_code, nor 1435 * should we re-do the tcg optimization currently hidden 1436 * inside tcg_gen_code. All that should be required is to 1437 * flush the TBs, allocate a new TB, re-initialize it per 1438 * above, and re-do the actual code generation. 1439 */ 1440 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, 1441 "Restarting code generation for " 1442 "code_gen_buffer overflow\n"); 1443 goto buffer_overflow; 1444 1445 case -2: 1446 /* 1447 * The code generated for the TranslationBlock is too large. 1448 * The maximum size allowed by the unwind info is 64k. 1449 * There may be stricter constraints from relocations 1450 * in the tcg backend. 1451 * 1452 * Try again with half as many insns as we attempted this time. 1453 * If a single insn overflows, there's a bug somewhere... 1454 */ 1455 assert(max_insns > 1); 1456 max_insns /= 2; 1457 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, 1458 "Restarting code generation with " 1459 "smaller translation block (max %d insns)\n", 1460 max_insns); 1461 goto tb_overflow; 1462 1463 default: 1464 g_assert_not_reached(); 1465 } 1466 } 1467 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); 1468 if (unlikely(search_size < 0)) { 1469 goto buffer_overflow; 1470 } 1471 tb->tc.size = gen_code_size; 1472 1473 #ifdef CONFIG_PROFILER 1474 qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); 1475 qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size); 1476 qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); 1477 qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); 1478 #endif 1479 1480 #ifdef DEBUG_DISAS 1481 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && 1482 qemu_log_in_addr_range(pc)) { 1483 FILE *logfile = qemu_log_trylock(); 1484 if (logfile) { 1485 int code_size, data_size; 1486 const tcg_target_ulong *rx_data_gen_ptr; 1487 size_t chunk_start; 1488 int insn = 0; 1489 1490 if (tcg_ctx->data_gen_ptr) { 1491 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); 1492 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; 1493 data_size = gen_code_size - code_size; 1494 } else { 1495 rx_data_gen_ptr = 0; 1496 code_size = gen_code_size; 1497 data_size = 0; 1498 } 1499 1500 /* Dump header and the first instruction */ 1501 fprintf(logfile, "OUT: [size=%d]\n", gen_code_size); 1502 fprintf(logfile, 1503 " -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", 1504 tcg_ctx->gen_insn_data[insn][0]); 1505 chunk_start = tcg_ctx->gen_insn_end_off[insn]; 1506 disas(logfile, tb->tc.ptr, chunk_start); 1507 1508 /* 1509 * Dump each instruction chunk, wrapping up empty chunks into 1510 * the next instruction. The whole array is offset so the 1511 * first entry is the beginning of the 2nd instruction. 1512 */ 1513 while (insn < tb->icount) { 1514 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; 1515 if (chunk_end > chunk_start) { 1516 fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n", 1517 tcg_ctx->gen_insn_data[insn][0]); 1518 disas(logfile, tb->tc.ptr + chunk_start, 1519 chunk_end - chunk_start); 1520 chunk_start = chunk_end; 1521 } 1522 insn++; 1523 } 1524 1525 if (chunk_start < code_size) { 1526 fprintf(logfile, " -- tb slow paths + alignment\n"); 1527 disas(logfile, tb->tc.ptr + chunk_start, 1528 code_size - chunk_start); 1529 } 1530 1531 /* Finally dump any data we may have after the block */ 1532 if (data_size) { 1533 int i; 1534 fprintf(logfile, " data: [size=%d]\n", data_size); 1535 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { 1536 if (sizeof(tcg_target_ulong) == 8) { 1537 fprintf(logfile, 1538 "0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", 1539 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); 1540 } else if (sizeof(tcg_target_ulong) == 4) { 1541 fprintf(logfile, 1542 "0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", 1543 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); 1544 } else { 1545 qemu_build_not_reached(); 1546 } 1547 } 1548 } 1549 fprintf(logfile, "\n"); 1550 qemu_log_unlock(logfile); 1551 } 1552 } 1553 #endif 1554 1555 qatomic_set(&tcg_ctx->code_gen_ptr, (void *) 1556 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, 1557 CODE_GEN_ALIGN)); 1558 1559 /* init jump list */ 1560 qemu_spin_init(&tb->jmp_lock); 1561 tb->jmp_list_head = (uintptr_t)NULL; 1562 tb->jmp_list_next[0] = (uintptr_t)NULL; 1563 tb->jmp_list_next[1] = (uintptr_t)NULL; 1564 tb->jmp_dest[0] = (uintptr_t)NULL; 1565 tb->jmp_dest[1] = (uintptr_t)NULL; 1566 1567 /* init original jump addresses which have been set during tcg_gen_code() */ 1568 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 1569 tb_reset_jump(tb, 0); 1570 } 1571 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 1572 tb_reset_jump(tb, 1); 1573 } 1574 1575 /* 1576 * If the TB is not associated with a physical RAM page then it must be 1577 * a temporary one-insn TB, and we have nothing left to do. Return early 1578 * before attempting to link to other TBs or add to the lookup table. 1579 */ 1580 if (tb->page_addr[0] == -1) { 1581 return tb; 1582 } 1583 1584 /* 1585 * Insert TB into the corresponding region tree before publishing it 1586 * through QHT. Otherwise rewinding happened in the TB might fail to 1587 * lookup itself using host PC. 1588 */ 1589 tcg_tb_insert(tb); 1590 1591 /* 1592 * No explicit memory barrier is required -- tb_link_page() makes the 1593 * TB visible in a consistent state. 1594 */ 1595 existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]); 1596 /* if the TB already exists, discard what we just translated */ 1597 if (unlikely(existing_tb != tb)) { 1598 uintptr_t orig_aligned = (uintptr_t)gen_code_buf; 1599 1600 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); 1601 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); 1602 tcg_tb_remove(tb); 1603 return existing_tb; 1604 } 1605 return tb; 1606 } 1607 1608 /* 1609 * @p must be non-NULL. 1610 * user-mode: call with mmap_lock held. 1611 * !user-mode: call with all @pages locked. 1612 */ 1613 static void 1614 tb_invalidate_phys_page_range__locked(struct page_collection *pages, 1615 PageDesc *p, tb_page_addr_t start, 1616 tb_page_addr_t end, 1617 uintptr_t retaddr) 1618 { 1619 TranslationBlock *tb; 1620 tb_page_addr_t tb_start, tb_end; 1621 int n; 1622 #ifdef TARGET_HAS_PRECISE_SMC 1623 CPUState *cpu = current_cpu; 1624 CPUArchState *env = NULL; 1625 bool current_tb_not_found = retaddr != 0; 1626 bool current_tb_modified = false; 1627 TranslationBlock *current_tb = NULL; 1628 target_ulong current_pc = 0; 1629 target_ulong current_cs_base = 0; 1630 uint32_t current_flags = 0; 1631 #endif /* TARGET_HAS_PRECISE_SMC */ 1632 1633 assert_page_locked(p); 1634 1635 #if defined(TARGET_HAS_PRECISE_SMC) 1636 if (cpu != NULL) { 1637 env = cpu->env_ptr; 1638 } 1639 #endif 1640 1641 /* we remove all the TBs in the range [start, end[ */ 1642 /* XXX: see if in some cases it could be faster to invalidate all 1643 the code */ 1644 PAGE_FOR_EACH_TB(p, tb, n) { 1645 assert_page_locked(p); 1646 /* NOTE: this is subtle as a TB may span two physical pages */ 1647 if (n == 0) { 1648 /* NOTE: tb_end may be after the end of the page, but 1649 it is not a problem */ 1650 tb_start = tb->page_addr[0]; 1651 tb_end = tb_start + tb->size; 1652 } else { 1653 tb_start = tb->page_addr[1]; 1654 tb_end = tb_start + ((tb->page_addr[0] + tb->size) 1655 & ~TARGET_PAGE_MASK); 1656 } 1657 if (!(tb_end <= start || tb_start >= end)) { 1658 #ifdef TARGET_HAS_PRECISE_SMC 1659 if (current_tb_not_found) { 1660 current_tb_not_found = false; 1661 /* now we have a real cpu fault */ 1662 current_tb = tcg_tb_lookup(retaddr); 1663 } 1664 if (current_tb == tb && 1665 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { 1666 /* 1667 * If we are modifying the current TB, we must stop 1668 * its execution. We could be more precise by checking 1669 * that the modification is after the current PC, but it 1670 * would require a specialized function to partially 1671 * restore the CPU state. 1672 */ 1673 current_tb_modified = true; 1674 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); 1675 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1676 ¤t_flags); 1677 } 1678 #endif /* TARGET_HAS_PRECISE_SMC */ 1679 tb_phys_invalidate__locked(tb); 1680 } 1681 } 1682 #if !defined(CONFIG_USER_ONLY) 1683 /* if no code remaining, no need to continue to use slow writes */ 1684 if (!p->first_tb) { 1685 tlb_unprotect_code(start); 1686 } 1687 #endif 1688 #ifdef TARGET_HAS_PRECISE_SMC 1689 if (current_tb_modified) { 1690 page_collection_unlock(pages); 1691 /* Force execution of one insn next time. */ 1692 cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); 1693 mmap_unlock(); 1694 cpu_loop_exit_noexc(cpu); 1695 } 1696 #endif 1697 } 1698 1699 /* 1700 * Invalidate all TBs which intersect with the target physical address range 1701 * [start;end[. NOTE: start and end must refer to the *same* physical page. 1702 * 'is_cpu_write_access' should be true if called from a real cpu write 1703 * access: the virtual CPU will exit the current TB if code is modified inside 1704 * this TB. 1705 * 1706 * Called with mmap_lock held for user-mode emulation 1707 */ 1708 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end) 1709 { 1710 struct page_collection *pages; 1711 PageDesc *p; 1712 1713 assert_memory_lock(); 1714 1715 p = page_find(start >> TARGET_PAGE_BITS); 1716 if (p == NULL) { 1717 return; 1718 } 1719 pages = page_collection_lock(start, end); 1720 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); 1721 page_collection_unlock(pages); 1722 } 1723 1724 /* 1725 * Invalidate all TBs which intersect with the target physical address range 1726 * [start;end[. NOTE: start and end may refer to *different* physical pages. 1727 * 'is_cpu_write_access' should be true if called from a real cpu write 1728 * access: the virtual CPU will exit the current TB if code is modified inside 1729 * this TB. 1730 * 1731 * Called with mmap_lock held for user-mode emulation. 1732 */ 1733 #ifdef CONFIG_SOFTMMU 1734 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end) 1735 #else 1736 void tb_invalidate_phys_range(target_ulong start, target_ulong end) 1737 #endif 1738 { 1739 struct page_collection *pages; 1740 tb_page_addr_t next; 1741 1742 assert_memory_lock(); 1743 1744 pages = page_collection_lock(start, end); 1745 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 1746 start < end; 1747 start = next, next += TARGET_PAGE_SIZE) { 1748 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); 1749 tb_page_addr_t bound = MIN(next, end); 1750 1751 if (pd == NULL) { 1752 continue; 1753 } 1754 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); 1755 } 1756 page_collection_unlock(pages); 1757 } 1758 1759 #ifdef CONFIG_SOFTMMU 1760 /* len must be <= 8 and start must be a multiple of len. 1761 * Called via softmmu_template.h when code areas are written to with 1762 * iothread mutex not held. 1763 * 1764 * Call with all @pages in the range [@start, @start + len[ locked. 1765 */ 1766 void tb_invalidate_phys_page_fast(struct page_collection *pages, 1767 tb_page_addr_t start, int len, 1768 uintptr_t retaddr) 1769 { 1770 PageDesc *p; 1771 1772 assert_memory_lock(); 1773 1774 p = page_find(start >> TARGET_PAGE_BITS); 1775 if (!p) { 1776 return; 1777 } 1778 1779 assert_page_locked(p); 1780 tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1781 retaddr); 1782 } 1783 #else 1784 /* Called with mmap_lock held. If pc is not 0 then it indicates the 1785 * host PC of the faulting store instruction that caused this invalidate. 1786 * Returns true if the caller needs to abort execution of the current 1787 * TB (because it was modified by this store and the guest CPU has 1788 * precise-SMC semantics). 1789 */ 1790 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) 1791 { 1792 TranslationBlock *tb; 1793 PageDesc *p; 1794 int n; 1795 #ifdef TARGET_HAS_PRECISE_SMC 1796 TranslationBlock *current_tb = NULL; 1797 CPUState *cpu = current_cpu; 1798 CPUArchState *env = NULL; 1799 int current_tb_modified = 0; 1800 target_ulong current_pc = 0; 1801 target_ulong current_cs_base = 0; 1802 uint32_t current_flags = 0; 1803 #endif 1804 1805 assert_memory_lock(); 1806 1807 addr &= TARGET_PAGE_MASK; 1808 p = page_find(addr >> TARGET_PAGE_BITS); 1809 if (!p) { 1810 return false; 1811 } 1812 1813 #ifdef TARGET_HAS_PRECISE_SMC 1814 if (p->first_tb && pc != 0) { 1815 current_tb = tcg_tb_lookup(pc); 1816 } 1817 if (cpu != NULL) { 1818 env = cpu->env_ptr; 1819 } 1820 #endif 1821 assert_page_locked(p); 1822 PAGE_FOR_EACH_TB(p, tb, n) { 1823 #ifdef TARGET_HAS_PRECISE_SMC 1824 if (current_tb == tb && 1825 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { 1826 /* If we are modifying the current TB, we must stop 1827 its execution. We could be more precise by checking 1828 that the modification is after the current PC, but it 1829 would require a specialized function to partially 1830 restore the CPU state */ 1831 1832 current_tb_modified = 1; 1833 cpu_restore_state_from_tb(cpu, current_tb, pc, true); 1834 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1835 ¤t_flags); 1836 } 1837 #endif /* TARGET_HAS_PRECISE_SMC */ 1838 tb_phys_invalidate(tb, addr); 1839 } 1840 p->first_tb = (uintptr_t)NULL; 1841 #ifdef TARGET_HAS_PRECISE_SMC 1842 if (current_tb_modified) { 1843 /* Force execution of one insn next time. */ 1844 cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); 1845 return true; 1846 } 1847 #endif 1848 1849 return false; 1850 } 1851 #endif 1852 1853 /* user-mode: call with mmap_lock held */ 1854 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) 1855 { 1856 TranslationBlock *tb; 1857 1858 assert_memory_lock(); 1859 1860 tb = tcg_tb_lookup(retaddr); 1861 if (tb) { 1862 /* We can use retranslation to find the PC. */ 1863 cpu_restore_state_from_tb(cpu, tb, retaddr, true); 1864 tb_phys_invalidate(tb, -1); 1865 } else { 1866 /* The exception probably happened in a helper. The CPU state should 1867 have been saved before calling it. Fetch the PC from there. */ 1868 CPUArchState *env = cpu->env_ptr; 1869 target_ulong pc, cs_base; 1870 tb_page_addr_t addr; 1871 uint32_t flags; 1872 1873 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 1874 addr = get_page_addr_code(env, pc); 1875 if (addr != -1) { 1876 tb_invalidate_phys_range(addr, addr + 1); 1877 } 1878 } 1879 } 1880 1881 #ifndef CONFIG_USER_ONLY 1882 /* 1883 * In deterministic execution mode, instructions doing device I/Os 1884 * must be at the end of the TB. 1885 * 1886 * Called by softmmu_template.h, with iothread mutex not held. 1887 */ 1888 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) 1889 { 1890 TranslationBlock *tb; 1891 CPUClass *cc; 1892 uint32_t n; 1893 1894 tb = tcg_tb_lookup(retaddr); 1895 if (!tb) { 1896 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", 1897 (void *)retaddr); 1898 } 1899 cpu_restore_state_from_tb(cpu, tb, retaddr, true); 1900 1901 /* 1902 * Some guests must re-execute the branch when re-executing a delay 1903 * slot instruction. When this is the case, adjust icount and N 1904 * to account for the re-execution of the branch. 1905 */ 1906 n = 1; 1907 cc = CPU_GET_CLASS(cpu); 1908 if (cc->tcg_ops->io_recompile_replay_branch && 1909 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { 1910 cpu_neg(cpu)->icount_decr.u16.low++; 1911 n = 2; 1912 } 1913 1914 /* 1915 * Exit the loop and potentially generate a new TB executing the 1916 * just the I/O insns. We also limit instrumentation to memory 1917 * operations only (which execute after completion) so we don't 1918 * double instrument the instruction. 1919 */ 1920 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; 1921 1922 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 1923 target_ulong pc = log_pc(cpu, tb); 1924 if (qemu_log_in_addr_range(pc)) { 1925 qemu_log("cpu_io_recompile: rewound execution of TB to " 1926 TARGET_FMT_lx "\n", pc); 1927 } 1928 } 1929 1930 cpu_loop_exit_noexc(cpu); 1931 } 1932 1933 static void print_qht_statistics(struct qht_stats hst, GString *buf) 1934 { 1935 uint32_t hgram_opts; 1936 size_t hgram_bins; 1937 char *hgram; 1938 1939 if (!hst.head_buckets) { 1940 return; 1941 } 1942 g_string_append_printf(buf, "TB hash buckets %zu/%zu " 1943 "(%0.2f%% head buckets used)\n", 1944 hst.used_head_buckets, hst.head_buckets, 1945 (double)hst.used_head_buckets / 1946 hst.head_buckets * 100); 1947 1948 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 1949 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; 1950 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { 1951 hgram_opts |= QDIST_PR_NODECIMAL; 1952 } 1953 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); 1954 g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. " 1955 "Histogram: %s\n", 1956 qdist_avg(&hst.occupancy) * 100, hgram); 1957 g_free(hgram); 1958 1959 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 1960 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); 1961 if (hgram_bins > 10) { 1962 hgram_bins = 10; 1963 } else { 1964 hgram_bins = 0; 1965 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; 1966 } 1967 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); 1968 g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. " 1969 "Histogram: %s\n", 1970 qdist_avg(&hst.chain), hgram); 1971 g_free(hgram); 1972 } 1973 1974 struct tb_tree_stats { 1975 size_t nb_tbs; 1976 size_t host_size; 1977 size_t target_size; 1978 size_t max_target_size; 1979 size_t direct_jmp_count; 1980 size_t direct_jmp2_count; 1981 size_t cross_page; 1982 }; 1983 1984 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) 1985 { 1986 const TranslationBlock *tb = value; 1987 struct tb_tree_stats *tst = data; 1988 1989 tst->nb_tbs++; 1990 tst->host_size += tb->tc.size; 1991 tst->target_size += tb->size; 1992 if (tb->size > tst->max_target_size) { 1993 tst->max_target_size = tb->size; 1994 } 1995 if (tb->page_addr[1] != -1) { 1996 tst->cross_page++; 1997 } 1998 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 1999 tst->direct_jmp_count++; 2000 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 2001 tst->direct_jmp2_count++; 2002 } 2003 } 2004 return false; 2005 } 2006 2007 void dump_exec_info(GString *buf) 2008 { 2009 struct tb_tree_stats tst = {}; 2010 struct qht_stats hst; 2011 size_t nb_tbs, flush_full, flush_part, flush_elide; 2012 2013 tcg_tb_foreach(tb_tree_stats_iter, &tst); 2014 nb_tbs = tst.nb_tbs; 2015 /* XXX: avoid using doubles ? */ 2016 g_string_append_printf(buf, "Translation buffer state:\n"); 2017 /* 2018 * Report total code size including the padding and TB structs; 2019 * otherwise users might think "-accel tcg,tb-size" is not honoured. 2020 * For avg host size we use the precise numbers from tb_tree_stats though. 2021 */ 2022 g_string_append_printf(buf, "gen code size %zu/%zu\n", 2023 tcg_code_size(), tcg_code_capacity()); 2024 g_string_append_printf(buf, "TB count %zu\n", nb_tbs); 2025 g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n", 2026 nb_tbs ? tst.target_size / nb_tbs : 0, 2027 tst.max_target_size); 2028 g_string_append_printf(buf, "TB avg host size %zu bytes " 2029 "(expansion ratio: %0.1f)\n", 2030 nb_tbs ? tst.host_size / nb_tbs : 0, 2031 tst.target_size ? 2032 (double)tst.host_size / tst.target_size : 0); 2033 g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", 2034 tst.cross_page, 2035 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); 2036 g_string_append_printf(buf, "direct jump count %zu (%zu%%) " 2037 "(2 jumps=%zu %zu%%)\n", 2038 tst.direct_jmp_count, 2039 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, 2040 tst.direct_jmp2_count, 2041 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); 2042 2043 qht_statistics_init(&tb_ctx.htable, &hst); 2044 print_qht_statistics(hst, buf); 2045 qht_statistics_destroy(&hst); 2046 2047 g_string_append_printf(buf, "\nStatistics:\n"); 2048 g_string_append_printf(buf, "TB flush count %u\n", 2049 qatomic_read(&tb_ctx.tb_flush_count)); 2050 g_string_append_printf(buf, "TB invalidate count %u\n", 2051 qatomic_read(&tb_ctx.tb_phys_invalidate_count)); 2052 2053 tlb_flush_counts(&flush_full, &flush_part, &flush_elide); 2054 g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); 2055 g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); 2056 g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); 2057 tcg_dump_info(buf); 2058 } 2059 2060 #else /* CONFIG_USER_ONLY */ 2061 2062 void cpu_interrupt(CPUState *cpu, int mask) 2063 { 2064 g_assert(qemu_mutex_iothread_locked()); 2065 cpu->interrupt_request |= mask; 2066 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); 2067 } 2068 2069 /* 2070 * Walks guest process memory "regions" one by one 2071 * and calls callback function 'fn' for each region. 2072 */ 2073 struct walk_memory_regions_data { 2074 walk_memory_regions_fn fn; 2075 void *priv; 2076 target_ulong start; 2077 int prot; 2078 }; 2079 2080 static int walk_memory_regions_end(struct walk_memory_regions_data *data, 2081 target_ulong end, int new_prot) 2082 { 2083 if (data->start != -1u) { 2084 int rc = data->fn(data->priv, data->start, end, data->prot); 2085 if (rc != 0) { 2086 return rc; 2087 } 2088 } 2089 2090 data->start = (new_prot ? end : -1u); 2091 data->prot = new_prot; 2092 2093 return 0; 2094 } 2095 2096 static int walk_memory_regions_1(struct walk_memory_regions_data *data, 2097 target_ulong base, int level, void **lp) 2098 { 2099 target_ulong pa; 2100 int i, rc; 2101 2102 if (*lp == NULL) { 2103 return walk_memory_regions_end(data, base, 0); 2104 } 2105 2106 if (level == 0) { 2107 PageDesc *pd = *lp; 2108 2109 for (i = 0; i < V_L2_SIZE; ++i) { 2110 int prot = pd[i].flags; 2111 2112 pa = base | (i << TARGET_PAGE_BITS); 2113 if (prot != data->prot) { 2114 rc = walk_memory_regions_end(data, pa, prot); 2115 if (rc != 0) { 2116 return rc; 2117 } 2118 } 2119 } 2120 } else { 2121 void **pp = *lp; 2122 2123 for (i = 0; i < V_L2_SIZE; ++i) { 2124 pa = base | ((target_ulong)i << 2125 (TARGET_PAGE_BITS + V_L2_BITS * level)); 2126 rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 2127 if (rc != 0) { 2128 return rc; 2129 } 2130 } 2131 } 2132 2133 return 0; 2134 } 2135 2136 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 2137 { 2138 struct walk_memory_regions_data data; 2139 uintptr_t i, l1_sz = v_l1_size; 2140 2141 data.fn = fn; 2142 data.priv = priv; 2143 data.start = -1u; 2144 data.prot = 0; 2145 2146 for (i = 0; i < l1_sz; i++) { 2147 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); 2148 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); 2149 if (rc != 0) { 2150 return rc; 2151 } 2152 } 2153 2154 return walk_memory_regions_end(&data, 0, 0); 2155 } 2156 2157 static int dump_region(void *priv, target_ulong start, 2158 target_ulong end, unsigned long prot) 2159 { 2160 FILE *f = (FILE *)priv; 2161 2162 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx 2163 " "TARGET_FMT_lx" %c%c%c\n", 2164 start, end, end - start, 2165 ((prot & PAGE_READ) ? 'r' : '-'), 2166 ((prot & PAGE_WRITE) ? 'w' : '-'), 2167 ((prot & PAGE_EXEC) ? 'x' : '-')); 2168 2169 return 0; 2170 } 2171 2172 /* dump memory mappings */ 2173 void page_dump(FILE *f) 2174 { 2175 const int length = sizeof(target_ulong) * 2; 2176 (void) fprintf(f, "%-*s %-*s %-*s %s\n", 2177 length, "start", length, "end", length, "size", "prot"); 2178 walk_memory_regions(f, dump_region); 2179 } 2180 2181 int page_get_flags(target_ulong address) 2182 { 2183 PageDesc *p; 2184 2185 p = page_find(address >> TARGET_PAGE_BITS); 2186 if (!p) { 2187 return 0; 2188 } 2189 return p->flags; 2190 } 2191 2192 /* 2193 * Allow the target to decide if PAGE_TARGET_[12] may be reset. 2194 * By default, they are not kept. 2195 */ 2196 #ifndef PAGE_TARGET_STICKY 2197 #define PAGE_TARGET_STICKY 0 2198 #endif 2199 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) 2200 2201 /* Modify the flags of a page and invalidate the code if necessary. 2202 The flag PAGE_WRITE_ORG is positioned automatically depending 2203 on PAGE_WRITE. The mmap_lock should already be held. */ 2204 void page_set_flags(target_ulong start, target_ulong end, int flags) 2205 { 2206 target_ulong addr, len; 2207 bool reset_target_data; 2208 2209 /* This function should never be called with addresses outside the 2210 guest address space. If this assert fires, it probably indicates 2211 a missing call to h2g_valid. */ 2212 assert(end - 1 <= GUEST_ADDR_MAX); 2213 assert(start < end); 2214 /* Only set PAGE_ANON with new mappings. */ 2215 assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); 2216 assert_memory_lock(); 2217 2218 start = start & TARGET_PAGE_MASK; 2219 end = TARGET_PAGE_ALIGN(end); 2220 2221 if (flags & PAGE_WRITE) { 2222 flags |= PAGE_WRITE_ORG; 2223 } 2224 reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET); 2225 flags &= ~PAGE_RESET; 2226 2227 for (addr = start, len = end - start; 2228 len != 0; 2229 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2230 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true); 2231 2232 /* If the write protection bit is set, then we invalidate 2233 the code inside. */ 2234 if (!(p->flags & PAGE_WRITE) && 2235 (flags & PAGE_WRITE) && 2236 p->first_tb) { 2237 tb_invalidate_phys_page(addr, 0); 2238 } 2239 if (reset_target_data) { 2240 g_free(p->target_data); 2241 p->target_data = NULL; 2242 p->flags = flags; 2243 } else { 2244 /* Using mprotect on a page does not change sticky bits. */ 2245 p->flags = (p->flags & PAGE_STICKY) | flags; 2246 } 2247 } 2248 } 2249 2250 void page_reset_target_data(target_ulong start, target_ulong end) 2251 { 2252 target_ulong addr, len; 2253 2254 /* 2255 * This function should never be called with addresses outside the 2256 * guest address space. If this assert fires, it probably indicates 2257 * a missing call to h2g_valid. 2258 */ 2259 assert(end - 1 <= GUEST_ADDR_MAX); 2260 assert(start < end); 2261 assert_memory_lock(); 2262 2263 start = start & TARGET_PAGE_MASK; 2264 end = TARGET_PAGE_ALIGN(end); 2265 2266 for (addr = start, len = end - start; 2267 len != 0; 2268 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2269 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2270 2271 g_free(p->target_data); 2272 p->target_data = NULL; 2273 } 2274 } 2275 2276 void *page_get_target_data(target_ulong address) 2277 { 2278 PageDesc *p = page_find(address >> TARGET_PAGE_BITS); 2279 return p ? p->target_data : NULL; 2280 } 2281 2282 void *page_alloc_target_data(target_ulong address, size_t size) 2283 { 2284 PageDesc *p = page_find(address >> TARGET_PAGE_BITS); 2285 void *ret = NULL; 2286 2287 if (p->flags & PAGE_VALID) { 2288 ret = p->target_data; 2289 if (!ret) { 2290 p->target_data = ret = g_malloc0(size); 2291 } 2292 } 2293 return ret; 2294 } 2295 2296 int page_check_range(target_ulong start, target_ulong len, int flags) 2297 { 2298 PageDesc *p; 2299 target_ulong end; 2300 target_ulong addr; 2301 2302 /* This function should never be called with addresses outside the 2303 guest address space. If this assert fires, it probably indicates 2304 a missing call to h2g_valid. */ 2305 if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) { 2306 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2307 } 2308 2309 if (len == 0) { 2310 return 0; 2311 } 2312 if (start + len - 1 < start) { 2313 /* We've wrapped around. */ 2314 return -1; 2315 } 2316 2317 /* must do before we loose bits in the next step */ 2318 end = TARGET_PAGE_ALIGN(start + len); 2319 start = start & TARGET_PAGE_MASK; 2320 2321 for (addr = start, len = end - start; 2322 len != 0; 2323 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2324 p = page_find(addr >> TARGET_PAGE_BITS); 2325 if (!p) { 2326 return -1; 2327 } 2328 if (!(p->flags & PAGE_VALID)) { 2329 return -1; 2330 } 2331 2332 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { 2333 return -1; 2334 } 2335 if (flags & PAGE_WRITE) { 2336 if (!(p->flags & PAGE_WRITE_ORG)) { 2337 return -1; 2338 } 2339 /* unprotect the page if it was put read-only because it 2340 contains translated code */ 2341 if (!(p->flags & PAGE_WRITE)) { 2342 if (!page_unprotect(addr, 0)) { 2343 return -1; 2344 } 2345 } 2346 } 2347 } 2348 return 0; 2349 } 2350 2351 void page_protect(tb_page_addr_t page_addr) 2352 { 2353 target_ulong addr; 2354 PageDesc *p; 2355 int prot; 2356 2357 p = page_find(page_addr >> TARGET_PAGE_BITS); 2358 if (p && (p->flags & PAGE_WRITE)) { 2359 /* 2360 * Force the host page as non writable (writes will have a page fault + 2361 * mprotect overhead). 2362 */ 2363 page_addr &= qemu_host_page_mask; 2364 prot = 0; 2365 for (addr = page_addr; addr < page_addr + qemu_host_page_size; 2366 addr += TARGET_PAGE_SIZE) { 2367 2368 p = page_find(addr >> TARGET_PAGE_BITS); 2369 if (!p) { 2370 continue; 2371 } 2372 prot |= p->flags; 2373 p->flags &= ~PAGE_WRITE; 2374 } 2375 mprotect(g2h_untagged(page_addr), qemu_host_page_size, 2376 (prot & PAGE_BITS) & ~PAGE_WRITE); 2377 if (DEBUG_TB_INVALIDATE_GATE) { 2378 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); 2379 } 2380 } 2381 } 2382 2383 /* called from signal handler: invalidate the code and unprotect the 2384 * page. Return 0 if the fault was not handled, 1 if it was handled, 2385 * and 2 if it was handled but the caller must cause the TB to be 2386 * immediately exited. (We can only return 2 if the 'pc' argument is 2387 * non-zero.) 2388 */ 2389 int page_unprotect(target_ulong address, uintptr_t pc) 2390 { 2391 unsigned int prot; 2392 bool current_tb_invalidated; 2393 PageDesc *p; 2394 target_ulong host_start, host_end, addr; 2395 2396 /* Technically this isn't safe inside a signal handler. However we 2397 know this only ever happens in a synchronous SEGV handler, so in 2398 practice it seems to be ok. */ 2399 mmap_lock(); 2400 2401 p = page_find(address >> TARGET_PAGE_BITS); 2402 if (!p) { 2403 mmap_unlock(); 2404 return 0; 2405 } 2406 2407 /* if the page was really writable, then we change its 2408 protection back to writable */ 2409 if (p->flags & PAGE_WRITE_ORG) { 2410 current_tb_invalidated = false; 2411 if (p->flags & PAGE_WRITE) { 2412 /* If the page is actually marked WRITE then assume this is because 2413 * this thread raced with another one which got here first and 2414 * set the page to PAGE_WRITE and did the TB invalidate for us. 2415 */ 2416 #ifdef TARGET_HAS_PRECISE_SMC 2417 TranslationBlock *current_tb = tcg_tb_lookup(pc); 2418 if (current_tb) { 2419 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; 2420 } 2421 #endif 2422 } else { 2423 host_start = address & qemu_host_page_mask; 2424 host_end = host_start + qemu_host_page_size; 2425 2426 prot = 0; 2427 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) { 2428 p = page_find(addr >> TARGET_PAGE_BITS); 2429 p->flags |= PAGE_WRITE; 2430 prot |= p->flags; 2431 2432 /* and since the content will be modified, we must invalidate 2433 the corresponding translated code. */ 2434 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); 2435 #ifdef CONFIG_USER_ONLY 2436 if (DEBUG_TB_CHECK_GATE) { 2437 tb_invalidate_check(addr); 2438 } 2439 #endif 2440 } 2441 mprotect((void *)g2h_untagged(host_start), qemu_host_page_size, 2442 prot & PAGE_BITS); 2443 } 2444 mmap_unlock(); 2445 /* If current TB was invalidated return to main loop */ 2446 return current_tb_invalidated ? 2 : 1; 2447 } 2448 mmap_unlock(); 2449 return 0; 2450 } 2451 #endif /* CONFIG_USER_ONLY */ 2452 2453 /* 2454 * Called by generic code at e.g. cpu reset after cpu creation, 2455 * therefore we must be prepared to allocate the jump cache. 2456 */ 2457 void tcg_flush_jmp_cache(CPUState *cpu) 2458 { 2459 CPUJumpCache *jc = cpu->tb_jmp_cache; 2460 2461 if (likely(jc)) { 2462 for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) { 2463 qatomic_set(&jc->array[i].tb, NULL); 2464 } 2465 } else { 2466 /* This should happen once during realize, and thus never race. */ 2467 jc = g_new0(CPUJumpCache, 1); 2468 jc = qatomic_xchg(&cpu->tb_jmp_cache, jc); 2469 assert(jc == NULL); 2470 } 2471 } 2472 2473 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ 2474 void tcg_flush_softmmu_tlb(CPUState *cs) 2475 { 2476 #ifdef CONFIG_SOFTMMU 2477 tlb_flush(cs); 2478 #endif 2479 } 2480