1 /* 2 * Host code generation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifdef _WIN32 20 #include <windows.h> 21 #endif 22 #include "qemu/osdep.h" 23 24 25 #include "qemu-common.h" 26 #define NO_CPU_IO_DEFS 27 #include "cpu.h" 28 #include "trace.h" 29 #include "disas/disas.h" 30 #include "exec/exec-all.h" 31 #include "tcg.h" 32 #if defined(CONFIG_USER_ONLY) 33 #include "qemu.h" 34 #include "exec/exec-all.h" 35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 36 #include <sys/param.h> 37 #if __FreeBSD_version >= 700104 38 #define HAVE_KINFO_GETVMMAP 39 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 40 #include <sys/proc.h> 41 #include <machine/profile.h> 42 #define _KERNEL 43 #include <sys/user.h> 44 #undef _KERNEL 45 #undef sigqueue 46 #include <libutil.h> 47 #endif 48 #endif 49 #else 50 #include "exec/address-spaces.h" 51 #endif 52 53 #include "exec/cputlb.h" 54 #include "exec/tb-hash.h" 55 #include "translate-all.h" 56 #include "qemu/bitmap.h" 57 #include "qemu/error-report.h" 58 #include "qemu/timer.h" 59 #include "qemu/main-loop.h" 60 #include "exec/log.h" 61 #include "sysemu/cpus.h" 62 63 /* #define DEBUG_TB_INVALIDATE */ 64 /* #define DEBUG_TB_FLUSH */ 65 /* make various TB consistency checks */ 66 /* #define DEBUG_TB_CHECK */ 67 68 #if !defined(CONFIG_USER_ONLY) 69 /* TB consistency checks only implemented for usermode emulation. */ 70 #undef DEBUG_TB_CHECK 71 #endif 72 73 /* Access to the various translations structures need to be serialised via locks 74 * for consistency. This is automatic for SoftMMU based system 75 * emulation due to its single threaded nature. In user-mode emulation 76 * access to the memory related structures are protected with the 77 * mmap_lock. 78 */ 79 #ifdef CONFIG_SOFTMMU 80 #define assert_memory_lock() tcg_debug_assert(have_tb_lock) 81 #else 82 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) 83 #endif 84 85 #define SMC_BITMAP_USE_THRESHOLD 10 86 87 typedef struct PageDesc { 88 /* list of TBs intersecting this ram page */ 89 TranslationBlock *first_tb; 90 #ifdef CONFIG_SOFTMMU 91 /* in order to optimize self modifying code, we count the number 92 of lookups we do to a given page to use a bitmap */ 93 unsigned int code_write_count; 94 unsigned long *code_bitmap; 95 #else 96 unsigned long flags; 97 #endif 98 } PageDesc; 99 100 /* In system mode we want L1_MAP to be based on ram offsets, 101 while in user mode we want it to be based on virtual addresses. */ 102 #if !defined(CONFIG_USER_ONLY) 103 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 104 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 105 #else 106 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 107 #endif 108 #else 109 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS 110 #endif 111 112 /* Size of the L2 (and L3, etc) page tables. */ 113 #define V_L2_BITS 10 114 #define V_L2_SIZE (1 << V_L2_BITS) 115 116 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ 117 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > 118 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate) 119 * BITS_PER_BYTE); 120 121 /* 122 * L1 Mapping properties 123 */ 124 static int v_l1_size; 125 static int v_l1_shift; 126 static int v_l2_levels; 127 128 /* The bottom level has pointers to PageDesc, and is indexed by 129 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. 130 */ 131 #define V_L1_MIN_BITS 4 132 #define V_L1_MAX_BITS (V_L2_BITS + 3) 133 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) 134 135 static void *l1_map[V_L1_MAX_SIZE]; 136 137 /* code generation context */ 138 TCGContext tcg_ctx; 139 bool parallel_cpus; 140 141 /* translation block context */ 142 static __thread int have_tb_lock; 143 144 static void page_table_config_init(void) 145 { 146 uint32_t v_l1_bits; 147 148 assert(TARGET_PAGE_BITS); 149 /* The bits remaining after N lower levels of page tables. */ 150 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; 151 if (v_l1_bits < V_L1_MIN_BITS) { 152 v_l1_bits += V_L2_BITS; 153 } 154 155 v_l1_size = 1 << v_l1_bits; 156 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; 157 v_l2_levels = v_l1_shift / V_L2_BITS - 1; 158 159 assert(v_l1_bits <= V_L1_MAX_BITS); 160 assert(v_l1_shift % V_L2_BITS == 0); 161 assert(v_l2_levels >= 0); 162 } 163 164 #define assert_tb_locked() tcg_debug_assert(have_tb_lock) 165 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock) 166 167 void tb_lock(void) 168 { 169 assert_tb_unlocked(); 170 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); 171 have_tb_lock++; 172 } 173 174 void tb_unlock(void) 175 { 176 assert_tb_locked(); 177 have_tb_lock--; 178 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); 179 } 180 181 void tb_lock_reset(void) 182 { 183 if (have_tb_lock) { 184 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); 185 have_tb_lock = 0; 186 } 187 } 188 189 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); 190 191 void cpu_gen_init(void) 192 { 193 tcg_context_init(&tcg_ctx); 194 } 195 196 /* Encode VAL as a signed leb128 sequence at P. 197 Return P incremented past the encoded value. */ 198 static uint8_t *encode_sleb128(uint8_t *p, target_long val) 199 { 200 int more, byte; 201 202 do { 203 byte = val & 0x7f; 204 val >>= 7; 205 more = !((val == 0 && (byte & 0x40) == 0) 206 || (val == -1 && (byte & 0x40) != 0)); 207 if (more) { 208 byte |= 0x80; 209 } 210 *p++ = byte; 211 } while (more); 212 213 return p; 214 } 215 216 /* Decode a signed leb128 sequence at *PP; increment *PP past the 217 decoded value. Return the decoded value. */ 218 static target_long decode_sleb128(uint8_t **pp) 219 { 220 uint8_t *p = *pp; 221 target_long val = 0; 222 int byte, shift = 0; 223 224 do { 225 byte = *p++; 226 val |= (target_ulong)(byte & 0x7f) << shift; 227 shift += 7; 228 } while (byte & 0x80); 229 if (shift < TARGET_LONG_BITS && (byte & 0x40)) { 230 val |= -(target_ulong)1 << shift; 231 } 232 233 *pp = p; 234 return val; 235 } 236 237 /* Encode the data collected about the instructions while compiling TB. 238 Place the data at BLOCK, and return the number of bytes consumed. 239 240 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's, 241 which come from the target's insn_start data, followed by a uintptr_t 242 which comes from the host pc of the end of the code implementing the insn. 243 244 Each line of the table is encoded as sleb128 deltas from the previous 245 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }. 246 That is, the first column is seeded with the guest pc, the last column 247 with the host pc, and the middle columns with zeros. */ 248 249 static int encode_search(TranslationBlock *tb, uint8_t *block) 250 { 251 uint8_t *highwater = tcg_ctx.code_gen_highwater; 252 uint8_t *p = block; 253 int i, j, n; 254 255 tb->tc_search = block; 256 257 for (i = 0, n = tb->icount; i < n; ++i) { 258 target_ulong prev; 259 260 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 261 if (i == 0) { 262 prev = (j == 0 ? tb->pc : 0); 263 } else { 264 prev = tcg_ctx.gen_insn_data[i - 1][j]; 265 } 266 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev); 267 } 268 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]); 269 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev); 270 271 /* Test for (pending) buffer overflow. The assumption is that any 272 one row beginning below the high water mark cannot overrun 273 the buffer completely. Thus we can test for overflow after 274 encoding a row without having to check during encoding. */ 275 if (unlikely(p > highwater)) { 276 return -1; 277 } 278 } 279 280 return p - block; 281 } 282 283 /* The cpu state corresponding to 'searched_pc' is restored. 284 * Called with tb_lock held. 285 */ 286 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 287 uintptr_t searched_pc) 288 { 289 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; 290 uintptr_t host_pc = (uintptr_t)tb->tc_ptr; 291 CPUArchState *env = cpu->env_ptr; 292 uint8_t *p = tb->tc_search; 293 int i, j, num_insns = tb->icount; 294 #ifdef CONFIG_PROFILER 295 int64_t ti = profile_getclock(); 296 #endif 297 298 searched_pc -= GETPC_ADJ; 299 300 if (searched_pc < host_pc) { 301 return -1; 302 } 303 304 /* Reconstruct the stored insn data while looking for the point at 305 which the end of the insn exceeds the searched_pc. */ 306 for (i = 0; i < num_insns; ++i) { 307 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 308 data[j] += decode_sleb128(&p); 309 } 310 host_pc += decode_sleb128(&p); 311 if (host_pc > searched_pc) { 312 goto found; 313 } 314 } 315 return -1; 316 317 found: 318 if (tb->cflags & CF_USE_ICOUNT) { 319 assert(use_icount); 320 /* Reset the cycle counter to the start of the block. */ 321 cpu->icount_decr.u16.low += num_insns; 322 /* Clear the IO flag. */ 323 cpu->can_do_io = 0; 324 } 325 cpu->icount_decr.u16.low -= i; 326 restore_state_to_opc(env, tb, data); 327 328 #ifdef CONFIG_PROFILER 329 tcg_ctx.restore_time += profile_getclock() - ti; 330 tcg_ctx.restore_count++; 331 #endif 332 return 0; 333 } 334 335 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) 336 { 337 TranslationBlock *tb; 338 bool r = false; 339 340 /* A retaddr of zero is invalid so we really shouldn't have ended 341 * up here. The target code has likely forgotten to check retaddr 342 * != 0 before attempting to restore state. We return early to 343 * avoid blowing up on a recursive tb_lock(). The target must have 344 * previously survived a failed cpu_restore_state because 345 * tb_find_pc(0) would have failed anyway. It still should be 346 * fixed though. 347 */ 348 349 if (!retaddr) { 350 return r; 351 } 352 353 tb_lock(); 354 tb = tb_find_pc(retaddr); 355 if (tb) { 356 cpu_restore_state_from_tb(cpu, tb, retaddr); 357 if (tb->cflags & CF_NOCACHE) { 358 /* one-shot translation, invalidate it immediately */ 359 tb_phys_invalidate(tb, -1); 360 tb_free(tb); 361 } 362 r = true; 363 } 364 tb_unlock(); 365 366 return r; 367 } 368 369 static void page_init(void) 370 { 371 page_size_init(); 372 page_table_config_init(); 373 374 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 375 { 376 #ifdef HAVE_KINFO_GETVMMAP 377 struct kinfo_vmentry *freep; 378 int i, cnt; 379 380 freep = kinfo_getvmmap(getpid(), &cnt); 381 if (freep) { 382 mmap_lock(); 383 for (i = 0; i < cnt; i++) { 384 unsigned long startaddr, endaddr; 385 386 startaddr = freep[i].kve_start; 387 endaddr = freep[i].kve_end; 388 if (h2g_valid(startaddr)) { 389 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 390 391 if (h2g_valid(endaddr)) { 392 endaddr = h2g(endaddr); 393 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 394 } else { 395 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 396 endaddr = ~0ul; 397 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 398 #endif 399 } 400 } 401 } 402 free(freep); 403 mmap_unlock(); 404 } 405 #else 406 FILE *f; 407 408 last_brk = (unsigned long)sbrk(0); 409 410 f = fopen("/compat/linux/proc/self/maps", "r"); 411 if (f) { 412 mmap_lock(); 413 414 do { 415 unsigned long startaddr, endaddr; 416 int n; 417 418 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 419 420 if (n == 2 && h2g_valid(startaddr)) { 421 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 422 423 if (h2g_valid(endaddr)) { 424 endaddr = h2g(endaddr); 425 } else { 426 endaddr = ~0ul; 427 } 428 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 429 } 430 } while (!feof(f)); 431 432 fclose(f); 433 mmap_unlock(); 434 } 435 #endif 436 } 437 #endif 438 } 439 440 /* If alloc=1: 441 * Called with tb_lock held for system emulation. 442 * Called with mmap_lock held for user-mode emulation. 443 */ 444 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) 445 { 446 PageDesc *pd; 447 void **lp; 448 int i; 449 450 if (alloc) { 451 assert_memory_lock(); 452 } 453 454 /* Level 1. Always allocated. */ 455 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); 456 457 /* Level 2..N-1. */ 458 for (i = v_l2_levels; i > 0; i--) { 459 void **p = atomic_rcu_read(lp); 460 461 if (p == NULL) { 462 if (!alloc) { 463 return NULL; 464 } 465 p = g_new0(void *, V_L2_SIZE); 466 atomic_rcu_set(lp, p); 467 } 468 469 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); 470 } 471 472 pd = atomic_rcu_read(lp); 473 if (pd == NULL) { 474 if (!alloc) { 475 return NULL; 476 } 477 pd = g_new0(PageDesc, V_L2_SIZE); 478 atomic_rcu_set(lp, pd); 479 } 480 481 return pd + (index & (V_L2_SIZE - 1)); 482 } 483 484 static inline PageDesc *page_find(tb_page_addr_t index) 485 { 486 return page_find_alloc(index, 0); 487 } 488 489 #if defined(CONFIG_USER_ONLY) 490 /* Currently it is not recommended to allocate big chunks of data in 491 user mode. It will change when a dedicated libc will be used. */ 492 /* ??? 64-bit hosts ought to have no problem mmaping data outside the 493 region in which the guest needs to run. Revisit this. */ 494 #define USE_STATIC_CODE_GEN_BUFFER 495 #endif 496 497 /* Minimum size of the code gen buffer. This number is randomly chosen, 498 but not so small that we can't have a fair number of TB's live. */ 499 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) 500 501 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise 502 indicated, this is constrained by the range of direct branches on the 503 host cpu, as used by the TCG implementation of goto_tb. */ 504 #if defined(__x86_64__) 505 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 506 #elif defined(__sparc__) 507 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 508 #elif defined(__powerpc64__) 509 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 510 #elif defined(__powerpc__) 511 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024) 512 #elif defined(__aarch64__) 513 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 514 #elif defined(__s390x__) 515 /* We have a +- 4GB range on the branches; leave some slop. */ 516 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) 517 #elif defined(__mips__) 518 /* We have a 256MB branch region, but leave room to make sure the 519 main executable is also within that region. */ 520 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) 521 #else 522 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) 523 #endif 524 525 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) 526 527 #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 528 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 529 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 530 531 static inline size_t size_code_gen_buffer(size_t tb_size) 532 { 533 /* Size the buffer. */ 534 if (tb_size == 0) { 535 #ifdef USE_STATIC_CODE_GEN_BUFFER 536 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 537 #else 538 /* ??? Needs adjustments. */ 539 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the 540 static buffer, we could size this on RESERVED_VA, on the text 541 segment size of the executable, or continue to use the default. */ 542 tb_size = (unsigned long)(ram_size / 4); 543 #endif 544 } 545 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 546 tb_size = MIN_CODE_GEN_BUFFER_SIZE; 547 } 548 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 549 tb_size = MAX_CODE_GEN_BUFFER_SIZE; 550 } 551 return tb_size; 552 } 553 554 #ifdef __mips__ 555 /* In order to use J and JAL within the code_gen_buffer, we require 556 that the buffer not cross a 256MB boundary. */ 557 static inline bool cross_256mb(void *addr, size_t size) 558 { 559 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; 560 } 561 562 /* We weren't able to allocate a buffer without crossing that boundary, 563 so make do with the larger portion of the buffer that doesn't cross. 564 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ 565 static inline void *split_cross_256mb(void *buf1, size_t size1) 566 { 567 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); 568 size_t size2 = buf1 + size1 - buf2; 569 570 size1 = buf2 - buf1; 571 if (size1 < size2) { 572 size1 = size2; 573 buf1 = buf2; 574 } 575 576 tcg_ctx.code_gen_buffer_size = size1; 577 return buf1; 578 } 579 #endif 580 581 #ifdef USE_STATIC_CODE_GEN_BUFFER 582 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 583 __attribute__((aligned(CODE_GEN_ALIGN))); 584 585 # ifdef _WIN32 586 static inline void do_protect(void *addr, long size, int prot) 587 { 588 DWORD old_protect; 589 VirtualProtect(addr, size, prot, &old_protect); 590 } 591 592 static inline void map_exec(void *addr, long size) 593 { 594 do_protect(addr, size, PAGE_EXECUTE_READWRITE); 595 } 596 597 static inline void map_none(void *addr, long size) 598 { 599 do_protect(addr, size, PAGE_NOACCESS); 600 } 601 # else 602 static inline void do_protect(void *addr, long size, int prot) 603 { 604 uintptr_t start, end; 605 606 start = (uintptr_t)addr; 607 start &= qemu_real_host_page_mask; 608 609 end = (uintptr_t)addr + size; 610 end = ROUND_UP(end, qemu_real_host_page_size); 611 612 mprotect((void *)start, end - start, prot); 613 } 614 615 static inline void map_exec(void *addr, long size) 616 { 617 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); 618 } 619 620 static inline void map_none(void *addr, long size) 621 { 622 do_protect(addr, size, PROT_NONE); 623 } 624 # endif /* WIN32 */ 625 626 static inline void *alloc_code_gen_buffer(void) 627 { 628 void *buf = static_code_gen_buffer; 629 size_t full_size, size; 630 631 /* The size of the buffer, rounded down to end on a page boundary. */ 632 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer)) 633 & qemu_real_host_page_mask) - (uintptr_t)buf; 634 635 /* Reserve a guard page. */ 636 size = full_size - qemu_real_host_page_size; 637 638 /* Honor a command-line option limiting the size of the buffer. */ 639 if (size > tcg_ctx.code_gen_buffer_size) { 640 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size) 641 & qemu_real_host_page_mask) - (uintptr_t)buf; 642 } 643 tcg_ctx.code_gen_buffer_size = size; 644 645 #ifdef __mips__ 646 if (cross_256mb(buf, size)) { 647 buf = split_cross_256mb(buf, size); 648 size = tcg_ctx.code_gen_buffer_size; 649 } 650 #endif 651 652 map_exec(buf, size); 653 map_none(buf + size, qemu_real_host_page_size); 654 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 655 656 return buf; 657 } 658 #elif defined(_WIN32) 659 static inline void *alloc_code_gen_buffer(void) 660 { 661 size_t size = tcg_ctx.code_gen_buffer_size; 662 void *buf1, *buf2; 663 664 /* Perform the allocation in two steps, so that the guard page 665 is reserved but uncommitted. */ 666 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size, 667 MEM_RESERVE, PAGE_NOACCESS); 668 if (buf1 != NULL) { 669 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE); 670 assert(buf1 == buf2); 671 } 672 673 return buf1; 674 } 675 #else 676 static inline void *alloc_code_gen_buffer(void) 677 { 678 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 679 uintptr_t start = 0; 680 size_t size = tcg_ctx.code_gen_buffer_size; 681 void *buf; 682 683 /* Constrain the position of the buffer based on the host cpu. 684 Note that these addresses are chosen in concert with the 685 addresses assigned in the relevant linker script file. */ 686 # if defined(__PIE__) || defined(__PIC__) 687 /* Don't bother setting a preferred location if we're building 688 a position-independent executable. We're more likely to get 689 an address near the main executable if we let the kernel 690 choose the address. */ 691 # elif defined(__x86_64__) && defined(MAP_32BIT) 692 /* Force the memory down into low memory with the executable. 693 Leave the choice of exact location with the kernel. */ 694 flags |= MAP_32BIT; 695 /* Cannot expect to map more than 800MB in low memory. */ 696 if (size > 800u * 1024 * 1024) { 697 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024; 698 } 699 # elif defined(__sparc__) 700 start = 0x40000000ul; 701 # elif defined(__s390x__) 702 start = 0x90000000ul; 703 # elif defined(__mips__) 704 # if _MIPS_SIM == _ABI64 705 start = 0x128000000ul; 706 # else 707 start = 0x08000000ul; 708 # endif 709 # endif 710 711 buf = mmap((void *)start, size + qemu_real_host_page_size, 712 PROT_NONE, flags, -1, 0); 713 if (buf == MAP_FAILED) { 714 return NULL; 715 } 716 717 #ifdef __mips__ 718 if (cross_256mb(buf, size)) { 719 /* Try again, with the original still mapped, to avoid re-acquiring 720 that 256mb crossing. This time don't specify an address. */ 721 size_t size2; 722 void *buf2 = mmap(NULL, size + qemu_real_host_page_size, 723 PROT_NONE, flags, -1, 0); 724 switch ((int)(buf2 != MAP_FAILED)) { 725 case 1: 726 if (!cross_256mb(buf2, size)) { 727 /* Success! Use the new buffer. */ 728 munmap(buf, size + qemu_real_host_page_size); 729 break; 730 } 731 /* Failure. Work with what we had. */ 732 munmap(buf2, size + qemu_real_host_page_size); 733 /* fallthru */ 734 default: 735 /* Split the original buffer. Free the smaller half. */ 736 buf2 = split_cross_256mb(buf, size); 737 size2 = tcg_ctx.code_gen_buffer_size; 738 if (buf == buf2) { 739 munmap(buf + size2 + qemu_real_host_page_size, size - size2); 740 } else { 741 munmap(buf, size - size2); 742 } 743 size = size2; 744 break; 745 } 746 buf = buf2; 747 } 748 #endif 749 750 /* Make the final buffer accessible. The guard page at the end 751 will remain inaccessible with PROT_NONE. */ 752 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC); 753 754 /* Request large pages for the buffer. */ 755 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 756 757 return buf; 758 } 759 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 760 761 static inline void code_gen_alloc(size_t tb_size) 762 { 763 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size); 764 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer(); 765 if (tcg_ctx.code_gen_buffer == NULL) { 766 fprintf(stderr, "Could not allocate dynamic translator buffer\n"); 767 exit(1); 768 } 769 770 /* size this conservatively -- realloc later if needed */ 771 tcg_ctx.tb_ctx.tbs_size = 772 tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8; 773 if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) { 774 tcg_ctx.tb_ctx.tbs_size = 64 * 1024; 775 } 776 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size); 777 778 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); 779 } 780 781 static void tb_htable_init(void) 782 { 783 unsigned int mode = QHT_MODE_AUTO_RESIZE; 784 785 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode); 786 } 787 788 /* Must be called before using the QEMU cpus. 'tb_size' is the size 789 (in bytes) allocated to the translation buffer. Zero means default 790 size. */ 791 void tcg_exec_init(unsigned long tb_size) 792 { 793 tcg_allowed = true; 794 cpu_gen_init(); 795 page_init(); 796 tb_htable_init(); 797 code_gen_alloc(tb_size); 798 #if defined(CONFIG_SOFTMMU) 799 /* There's no guest base to take into account, so go ahead and 800 initialize the prologue now. */ 801 tcg_prologue_init(&tcg_ctx); 802 #endif 803 } 804 805 /* 806 * Allocate a new translation block. Flush the translation buffer if 807 * too many translation blocks or too much generated code. 808 * 809 * Called with tb_lock held. 810 */ 811 static TranslationBlock *tb_alloc(target_ulong pc) 812 { 813 TranslationBlock *tb; 814 TBContext *ctx; 815 816 assert_tb_locked(); 817 818 tb = tcg_tb_alloc(&tcg_ctx); 819 if (unlikely(tb == NULL)) { 820 return NULL; 821 } 822 ctx = &tcg_ctx.tb_ctx; 823 if (unlikely(ctx->nb_tbs == ctx->tbs_size)) { 824 ctx->tbs_size *= 2; 825 ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size); 826 } 827 ctx->tbs[ctx->nb_tbs++] = tb; 828 return tb; 829 } 830 831 /* Called with tb_lock held. */ 832 void tb_free(TranslationBlock *tb) 833 { 834 assert_tb_locked(); 835 836 /* In practice this is mostly used for single use temporary TB 837 Ignore the hard cases and just back up if this TB happens to 838 be the last one generated. */ 839 if (tcg_ctx.tb_ctx.nb_tbs > 0 && 840 tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) { 841 size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize); 842 843 tcg_ctx.code_gen_ptr = tb->tc_ptr - struct_size; 844 tcg_ctx.tb_ctx.nb_tbs--; 845 } 846 } 847 848 static inline void invalidate_page_bitmap(PageDesc *p) 849 { 850 #ifdef CONFIG_SOFTMMU 851 g_free(p->code_bitmap); 852 p->code_bitmap = NULL; 853 p->code_write_count = 0; 854 #endif 855 } 856 857 /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 858 static void page_flush_tb_1(int level, void **lp) 859 { 860 int i; 861 862 if (*lp == NULL) { 863 return; 864 } 865 if (level == 0) { 866 PageDesc *pd = *lp; 867 868 for (i = 0; i < V_L2_SIZE; ++i) { 869 pd[i].first_tb = NULL; 870 invalidate_page_bitmap(pd + i); 871 } 872 } else { 873 void **pp = *lp; 874 875 for (i = 0; i < V_L2_SIZE; ++i) { 876 page_flush_tb_1(level - 1, pp + i); 877 } 878 } 879 } 880 881 static void page_flush_tb(void) 882 { 883 int i, l1_sz = v_l1_size; 884 885 for (i = 0; i < l1_sz; i++) { 886 page_flush_tb_1(v_l2_levels, l1_map + i); 887 } 888 } 889 890 /* flush all the translation blocks */ 891 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) 892 { 893 tb_lock(); 894 895 /* If it is already been done on request of another CPU, 896 * just retry. 897 */ 898 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) { 899 goto done; 900 } 901 902 #if defined(DEBUG_TB_FLUSH) 903 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", 904 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), 905 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? 906 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) / 907 tcg_ctx.tb_ctx.nb_tbs : 0); 908 #endif 909 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) 910 > tcg_ctx.code_gen_buffer_size) { 911 cpu_abort(cpu, "Internal error: code buffer overflow\n"); 912 } 913 914 CPU_FOREACH(cpu) { 915 cpu_tb_jmp_cache_clear(cpu); 916 } 917 918 tcg_ctx.tb_ctx.nb_tbs = 0; 919 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE); 920 page_flush_tb(); 921 922 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; 923 /* XXX: flush processor icache at this point if cache flush is 924 expensive */ 925 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count, 926 tcg_ctx.tb_ctx.tb_flush_count + 1); 927 928 done: 929 tb_unlock(); 930 } 931 932 void tb_flush(CPUState *cpu) 933 { 934 if (tcg_enabled()) { 935 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count); 936 async_safe_run_on_cpu(cpu, do_tb_flush, 937 RUN_ON_CPU_HOST_INT(tb_flush_count)); 938 } 939 } 940 941 #ifdef DEBUG_TB_CHECK 942 943 static void 944 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp) 945 { 946 TranslationBlock *tb = p; 947 target_ulong addr = *(target_ulong *)userp; 948 949 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { 950 printf("ERROR invalidate: address=" TARGET_FMT_lx 951 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); 952 } 953 } 954 955 /* verify that all the pages have correct rights for code 956 * 957 * Called with tb_lock held. 958 */ 959 static void tb_invalidate_check(target_ulong address) 960 { 961 address &= TARGET_PAGE_MASK; 962 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address); 963 } 964 965 static void 966 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp) 967 { 968 TranslationBlock *tb = p; 969 int flags1, flags2; 970 971 flags1 = page_get_flags(tb->pc); 972 flags2 = page_get_flags(tb->pc + tb->size - 1); 973 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 974 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 975 (long)tb->pc, tb->size, flags1, flags2); 976 } 977 } 978 979 /* verify that all the pages have correct rights for code */ 980 static void tb_page_check(void) 981 { 982 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL); 983 } 984 985 #endif 986 987 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) 988 { 989 TranslationBlock *tb1; 990 unsigned int n1; 991 992 for (;;) { 993 tb1 = *ptb; 994 n1 = (uintptr_t)tb1 & 3; 995 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); 996 if (tb1 == tb) { 997 *ptb = tb1->page_next[n1]; 998 break; 999 } 1000 ptb = &tb1->page_next[n1]; 1001 } 1002 } 1003 1004 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */ 1005 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n) 1006 { 1007 TranslationBlock *tb1; 1008 uintptr_t *ptb, ntb; 1009 unsigned int n1; 1010 1011 ptb = &tb->jmp_list_next[n]; 1012 if (*ptb) { 1013 /* find tb(n) in circular list */ 1014 for (;;) { 1015 ntb = *ptb; 1016 n1 = ntb & 3; 1017 tb1 = (TranslationBlock *)(ntb & ~3); 1018 if (n1 == n && tb1 == tb) { 1019 break; 1020 } 1021 if (n1 == 2) { 1022 ptb = &tb1->jmp_list_first; 1023 } else { 1024 ptb = &tb1->jmp_list_next[n1]; 1025 } 1026 } 1027 /* now we can suppress tb(n) from the list */ 1028 *ptb = tb->jmp_list_next[n]; 1029 1030 tb->jmp_list_next[n] = (uintptr_t)NULL; 1031 } 1032 } 1033 1034 /* reset the jump entry 'n' of a TB so that it is not chained to 1035 another TB */ 1036 static inline void tb_reset_jump(TranslationBlock *tb, int n) 1037 { 1038 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]); 1039 tb_set_jmp_target(tb, n, addr); 1040 } 1041 1042 /* remove any jumps to the TB */ 1043 static inline void tb_jmp_unlink(TranslationBlock *tb) 1044 { 1045 TranslationBlock *tb1; 1046 uintptr_t *ptb, ntb; 1047 unsigned int n1; 1048 1049 ptb = &tb->jmp_list_first; 1050 for (;;) { 1051 ntb = *ptb; 1052 n1 = ntb & 3; 1053 tb1 = (TranslationBlock *)(ntb & ~3); 1054 if (n1 == 2) { 1055 break; 1056 } 1057 tb_reset_jump(tb1, n1); 1058 *ptb = tb1->jmp_list_next[n1]; 1059 tb1->jmp_list_next[n1] = (uintptr_t)NULL; 1060 } 1061 } 1062 1063 /* invalidate one TB 1064 * 1065 * Called with tb_lock held. 1066 */ 1067 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 1068 { 1069 CPUState *cpu; 1070 PageDesc *p; 1071 uint32_t h; 1072 tb_page_addr_t phys_pc; 1073 1074 assert_tb_locked(); 1075 1076 atomic_set(&tb->cflags, tb->cflags | CF_INVALID); 1077 1078 /* remove the TB from the hash list */ 1079 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1080 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate); 1081 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h); 1082 1083 /* remove the TB from the page list */ 1084 if (tb->page_addr[0] != page_addr) { 1085 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 1086 tb_page_remove(&p->first_tb, tb); 1087 invalidate_page_bitmap(p); 1088 } 1089 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { 1090 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 1091 tb_page_remove(&p->first_tb, tb); 1092 invalidate_page_bitmap(p); 1093 } 1094 1095 /* remove the TB from the hash list */ 1096 h = tb_jmp_cache_hash_func(tb->pc); 1097 CPU_FOREACH(cpu) { 1098 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { 1099 atomic_set(&cpu->tb_jmp_cache[h], NULL); 1100 } 1101 } 1102 1103 /* suppress this TB from the two jump lists */ 1104 tb_remove_from_jmp_list(tb, 0); 1105 tb_remove_from_jmp_list(tb, 1); 1106 1107 /* suppress any remaining jumps to this TB */ 1108 tb_jmp_unlink(tb); 1109 1110 tcg_ctx.tb_ctx.tb_phys_invalidate_count++; 1111 } 1112 1113 #ifdef CONFIG_SOFTMMU 1114 static void build_page_bitmap(PageDesc *p) 1115 { 1116 int n, tb_start, tb_end; 1117 TranslationBlock *tb; 1118 1119 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); 1120 1121 tb = p->first_tb; 1122 while (tb != NULL) { 1123 n = (uintptr_t)tb & 3; 1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3); 1125 /* NOTE: this is subtle as a TB may span two physical pages */ 1126 if (n == 0) { 1127 /* NOTE: tb_end may be after the end of the page, but 1128 it is not a problem */ 1129 tb_start = tb->pc & ~TARGET_PAGE_MASK; 1130 tb_end = tb_start + tb->size; 1131 if (tb_end > TARGET_PAGE_SIZE) { 1132 tb_end = TARGET_PAGE_SIZE; 1133 } 1134 } else { 1135 tb_start = 0; 1136 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1137 } 1138 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); 1139 tb = tb->page_next[n]; 1140 } 1141 } 1142 #endif 1143 1144 /* add the tb in the target page and protect it if necessary 1145 * 1146 * Called with mmap_lock held for user-mode emulation. 1147 */ 1148 static inline void tb_alloc_page(TranslationBlock *tb, 1149 unsigned int n, tb_page_addr_t page_addr) 1150 { 1151 PageDesc *p; 1152 #ifndef CONFIG_USER_ONLY 1153 bool page_already_protected; 1154 #endif 1155 1156 assert_memory_lock(); 1157 1158 tb->page_addr[n] = page_addr; 1159 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); 1160 tb->page_next[n] = p->first_tb; 1161 #ifndef CONFIG_USER_ONLY 1162 page_already_protected = p->first_tb != NULL; 1163 #endif 1164 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); 1165 invalidate_page_bitmap(p); 1166 1167 #if defined(CONFIG_USER_ONLY) 1168 if (p->flags & PAGE_WRITE) { 1169 target_ulong addr; 1170 PageDesc *p2; 1171 int prot; 1172 1173 /* force the host page as non writable (writes will have a 1174 page fault + mprotect overhead) */ 1175 page_addr &= qemu_host_page_mask; 1176 prot = 0; 1177 for (addr = page_addr; addr < page_addr + qemu_host_page_size; 1178 addr += TARGET_PAGE_SIZE) { 1179 1180 p2 = page_find(addr >> TARGET_PAGE_BITS); 1181 if (!p2) { 1182 continue; 1183 } 1184 prot |= p2->flags; 1185 p2->flags &= ~PAGE_WRITE; 1186 } 1187 mprotect(g2h(page_addr), qemu_host_page_size, 1188 (prot & PAGE_BITS) & ~PAGE_WRITE); 1189 #ifdef DEBUG_TB_INVALIDATE 1190 printf("protecting code page: 0x" TARGET_FMT_lx "\n", 1191 page_addr); 1192 #endif 1193 } 1194 #else 1195 /* if some code is already present, then the pages are already 1196 protected. So we handle the case where only the first TB is 1197 allocated in a physical page */ 1198 if (!page_already_protected) { 1199 tlb_protect_code(page_addr); 1200 } 1201 #endif 1202 } 1203 1204 /* add a new TB and link it to the physical page tables. phys_page2 is 1205 * (-1) to indicate that only one page contains the TB. 1206 * 1207 * Called with mmap_lock held for user-mode emulation. 1208 */ 1209 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, 1210 tb_page_addr_t phys_page2) 1211 { 1212 uint32_t h; 1213 1214 assert_memory_lock(); 1215 1216 /* add in the page list */ 1217 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); 1218 if (phys_page2 != -1) { 1219 tb_alloc_page(tb, 1, phys_page2); 1220 } else { 1221 tb->page_addr[1] = -1; 1222 } 1223 1224 /* add in the hash table */ 1225 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate); 1226 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h); 1227 1228 #ifdef DEBUG_TB_CHECK 1229 tb_page_check(); 1230 #endif 1231 } 1232 1233 /* Called with mmap_lock held for user mode emulation. */ 1234 TranslationBlock *tb_gen_code(CPUState *cpu, 1235 target_ulong pc, target_ulong cs_base, 1236 uint32_t flags, int cflags) 1237 { 1238 CPUArchState *env = cpu->env_ptr; 1239 TranslationBlock *tb; 1240 tb_page_addr_t phys_pc, phys_page2; 1241 target_ulong virt_page2; 1242 tcg_insn_unit *gen_code_buf; 1243 int gen_code_size, search_size; 1244 #ifdef CONFIG_PROFILER 1245 int64_t ti; 1246 #endif 1247 assert_memory_lock(); 1248 1249 phys_pc = get_page_addr_code(env, pc); 1250 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) { 1251 cflags |= CF_USE_ICOUNT; 1252 } 1253 1254 tb = tb_alloc(pc); 1255 if (unlikely(!tb)) { 1256 buffer_overflow: 1257 /* flush must be done */ 1258 tb_flush(cpu); 1259 mmap_unlock(); 1260 /* Make the execution loop process the flush as soon as possible. */ 1261 cpu->exception_index = EXCP_INTERRUPT; 1262 cpu_loop_exit(cpu); 1263 } 1264 1265 gen_code_buf = tcg_ctx.code_gen_ptr; 1266 tb->tc_ptr = gen_code_buf; 1267 tb->pc = pc; 1268 tb->cs_base = cs_base; 1269 tb->flags = flags; 1270 tb->cflags = cflags; 1271 tb->trace_vcpu_dstate = *cpu->trace_dstate; 1272 1273 #ifdef CONFIG_PROFILER 1274 tcg_ctx.tb_count1++; /* includes aborted translations because of 1275 exceptions */ 1276 ti = profile_getclock(); 1277 #endif 1278 1279 tcg_func_start(&tcg_ctx); 1280 1281 tcg_ctx.cpu = ENV_GET_CPU(env); 1282 gen_intermediate_code(cpu, tb); 1283 tcg_ctx.cpu = NULL; 1284 1285 trace_translate_block(tb, tb->pc, tb->tc_ptr); 1286 1287 /* generate machine code */ 1288 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; 1289 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; 1290 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset; 1291 if (TCG_TARGET_HAS_direct_jump) { 1292 tcg_ctx.tb_jmp_insn_offset = tb->jmp_target_arg; 1293 tcg_ctx.tb_jmp_target_addr = NULL; 1294 } else { 1295 tcg_ctx.tb_jmp_insn_offset = NULL; 1296 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_arg; 1297 } 1298 1299 #ifdef CONFIG_PROFILER 1300 tcg_ctx.tb_count++; 1301 tcg_ctx.interm_time += profile_getclock() - ti; 1302 ti = profile_getclock(); 1303 #endif 1304 1305 /* ??? Overflow could be handled better here. In particular, we 1306 don't need to re-do gen_intermediate_code, nor should we re-do 1307 the tcg optimization currently hidden inside tcg_gen_code. All 1308 that should be required is to flush the TBs, allocate a new TB, 1309 re-initialize it per above, and re-do the actual code generation. */ 1310 gen_code_size = tcg_gen_code(&tcg_ctx, tb); 1311 if (unlikely(gen_code_size < 0)) { 1312 goto buffer_overflow; 1313 } 1314 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); 1315 if (unlikely(search_size < 0)) { 1316 goto buffer_overflow; 1317 } 1318 1319 #ifdef CONFIG_PROFILER 1320 tcg_ctx.code_time += profile_getclock() - ti; 1321 tcg_ctx.code_in_len += tb->size; 1322 tcg_ctx.code_out_len += gen_code_size; 1323 tcg_ctx.search_out_len += search_size; 1324 #endif 1325 1326 #ifdef DEBUG_DISAS 1327 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && 1328 qemu_log_in_addr_range(tb->pc)) { 1329 qemu_log_lock(); 1330 qemu_log("OUT: [size=%d]\n", gen_code_size); 1331 if (tcg_ctx.data_gen_ptr) { 1332 size_t code_size = tcg_ctx.data_gen_ptr - tb->tc_ptr; 1333 size_t data_size = gen_code_size - code_size; 1334 size_t i; 1335 1336 log_disas(tb->tc_ptr, code_size); 1337 1338 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { 1339 if (sizeof(tcg_target_ulong) == 8) { 1340 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", 1341 (uintptr_t)tcg_ctx.data_gen_ptr + i, 1342 *(uint64_t *)(tcg_ctx.data_gen_ptr + i)); 1343 } else { 1344 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n", 1345 (uintptr_t)tcg_ctx.data_gen_ptr + i, 1346 *(uint32_t *)(tcg_ctx.data_gen_ptr + i)); 1347 } 1348 } 1349 } else { 1350 log_disas(tb->tc_ptr, gen_code_size); 1351 } 1352 qemu_log("\n"); 1353 qemu_log_flush(); 1354 qemu_log_unlock(); 1355 } 1356 #endif 1357 1358 tcg_ctx.code_gen_ptr = (void *) 1359 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, 1360 CODE_GEN_ALIGN); 1361 1362 /* init jump list */ 1363 assert(((uintptr_t)tb & 3) == 0); 1364 tb->jmp_list_first = (uintptr_t)tb | 2; 1365 tb->jmp_list_next[0] = (uintptr_t)NULL; 1366 tb->jmp_list_next[1] = (uintptr_t)NULL; 1367 1368 /* init original jump addresses wich has been set during tcg_gen_code() */ 1369 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 1370 tb_reset_jump(tb, 0); 1371 } 1372 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 1373 tb_reset_jump(tb, 1); 1374 } 1375 1376 /* check next page if needed */ 1377 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; 1378 phys_page2 = -1; 1379 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 1380 phys_page2 = get_page_addr_code(env, virt_page2); 1381 } 1382 /* As long as consistency of the TB stuff is provided by tb_lock in user 1383 * mode and is implicit in single-threaded softmmu emulation, no explicit 1384 * memory barrier is required before tb_link_page() makes the TB visible 1385 * through the physical hash table and physical page list. 1386 */ 1387 tb_link_page(tb, phys_pc, phys_page2); 1388 return tb; 1389 } 1390 1391 /* 1392 * Invalidate all TBs which intersect with the target physical address range 1393 * [start;end[. NOTE: start and end may refer to *different* physical pages. 1394 * 'is_cpu_write_access' should be true if called from a real cpu write 1395 * access: the virtual CPU will exit the current TB if code is modified inside 1396 * this TB. 1397 * 1398 * Called with mmap_lock held for user-mode emulation, grabs tb_lock 1399 * Called with tb_lock held for system-mode emulation 1400 */ 1401 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end) 1402 { 1403 while (start < end) { 1404 tb_invalidate_phys_page_range(start, end, 0); 1405 start &= TARGET_PAGE_MASK; 1406 start += TARGET_PAGE_SIZE; 1407 } 1408 } 1409 1410 #ifdef CONFIG_SOFTMMU 1411 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) 1412 { 1413 assert_tb_locked(); 1414 tb_invalidate_phys_range_1(start, end); 1415 } 1416 #else 1417 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) 1418 { 1419 assert_memory_lock(); 1420 tb_lock(); 1421 tb_invalidate_phys_range_1(start, end); 1422 tb_unlock(); 1423 } 1424 #endif 1425 /* 1426 * Invalidate all TBs which intersect with the target physical address range 1427 * [start;end[. NOTE: start and end must refer to the *same* physical page. 1428 * 'is_cpu_write_access' should be true if called from a real cpu write 1429 * access: the virtual CPU will exit the current TB if code is modified inside 1430 * this TB. 1431 * 1432 * Called with tb_lock/mmap_lock held for user-mode emulation 1433 * Called with tb_lock held for system-mode emulation 1434 */ 1435 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 1436 int is_cpu_write_access) 1437 { 1438 TranslationBlock *tb, *tb_next; 1439 #if defined(TARGET_HAS_PRECISE_SMC) 1440 CPUState *cpu = current_cpu; 1441 CPUArchState *env = NULL; 1442 #endif 1443 tb_page_addr_t tb_start, tb_end; 1444 PageDesc *p; 1445 int n; 1446 #ifdef TARGET_HAS_PRECISE_SMC 1447 int current_tb_not_found = is_cpu_write_access; 1448 TranslationBlock *current_tb = NULL; 1449 int current_tb_modified = 0; 1450 target_ulong current_pc = 0; 1451 target_ulong current_cs_base = 0; 1452 uint32_t current_flags = 0; 1453 #endif /* TARGET_HAS_PRECISE_SMC */ 1454 1455 assert_memory_lock(); 1456 assert_tb_locked(); 1457 1458 p = page_find(start >> TARGET_PAGE_BITS); 1459 if (!p) { 1460 return; 1461 } 1462 #if defined(TARGET_HAS_PRECISE_SMC) 1463 if (cpu != NULL) { 1464 env = cpu->env_ptr; 1465 } 1466 #endif 1467 1468 /* we remove all the TBs in the range [start, end[ */ 1469 /* XXX: see if in some cases it could be faster to invalidate all 1470 the code */ 1471 tb = p->first_tb; 1472 while (tb != NULL) { 1473 n = (uintptr_t)tb & 3; 1474 tb = (TranslationBlock *)((uintptr_t)tb & ~3); 1475 tb_next = tb->page_next[n]; 1476 /* NOTE: this is subtle as a TB may span two physical pages */ 1477 if (n == 0) { 1478 /* NOTE: tb_end may be after the end of the page, but 1479 it is not a problem */ 1480 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1481 tb_end = tb_start + tb->size; 1482 } else { 1483 tb_start = tb->page_addr[1]; 1484 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1485 } 1486 if (!(tb_end <= start || tb_start >= end)) { 1487 #ifdef TARGET_HAS_PRECISE_SMC 1488 if (current_tb_not_found) { 1489 current_tb_not_found = 0; 1490 current_tb = NULL; 1491 if (cpu->mem_io_pc) { 1492 /* now we have a real cpu fault */ 1493 current_tb = tb_find_pc(cpu->mem_io_pc); 1494 } 1495 } 1496 if (current_tb == tb && 1497 (current_tb->cflags & CF_COUNT_MASK) != 1) { 1498 /* If we are modifying the current TB, we must stop 1499 its execution. We could be more precise by checking 1500 that the modification is after the current PC, but it 1501 would require a specialized function to partially 1502 restore the CPU state */ 1503 1504 current_tb_modified = 1; 1505 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); 1506 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1507 ¤t_flags); 1508 } 1509 #endif /* TARGET_HAS_PRECISE_SMC */ 1510 tb_phys_invalidate(tb, -1); 1511 } 1512 tb = tb_next; 1513 } 1514 #if !defined(CONFIG_USER_ONLY) 1515 /* if no code remaining, no need to continue to use slow writes */ 1516 if (!p->first_tb) { 1517 invalidate_page_bitmap(p); 1518 tlb_unprotect_code(start); 1519 } 1520 #endif 1521 #ifdef TARGET_HAS_PRECISE_SMC 1522 if (current_tb_modified) { 1523 /* we generate a block containing just the instruction 1524 modifying the memory. It will ensure that it cannot modify 1525 itself */ 1526 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); 1527 cpu_loop_exit_noexc(cpu); 1528 } 1529 #endif 1530 } 1531 1532 #ifdef CONFIG_SOFTMMU 1533 /* len must be <= 8 and start must be a multiple of len. 1534 * Called via softmmu_template.h when code areas are written to with 1535 * iothread mutex not held. 1536 */ 1537 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) 1538 { 1539 PageDesc *p; 1540 1541 #if 0 1542 if (1) { 1543 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 1544 cpu_single_env->mem_io_vaddr, len, 1545 cpu_single_env->eip, 1546 cpu_single_env->eip + 1547 (intptr_t)cpu_single_env->segs[R_CS].base); 1548 } 1549 #endif 1550 assert_memory_lock(); 1551 1552 p = page_find(start >> TARGET_PAGE_BITS); 1553 if (!p) { 1554 return; 1555 } 1556 if (!p->code_bitmap && 1557 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { 1558 /* build code bitmap. FIXME: writes should be protected by 1559 * tb_lock, reads by tb_lock or RCU. 1560 */ 1561 build_page_bitmap(p); 1562 } 1563 if (p->code_bitmap) { 1564 unsigned int nr; 1565 unsigned long b; 1566 1567 nr = start & ~TARGET_PAGE_MASK; 1568 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); 1569 if (b & ((1 << len) - 1)) { 1570 goto do_invalidate; 1571 } 1572 } else { 1573 do_invalidate: 1574 tb_invalidate_phys_page_range(start, start + len, 1); 1575 } 1576 } 1577 #else 1578 /* Called with mmap_lock held. If pc is not 0 then it indicates the 1579 * host PC of the faulting store instruction that caused this invalidate. 1580 * Returns true if the caller needs to abort execution of the current 1581 * TB (because it was modified by this store and the guest CPU has 1582 * precise-SMC semantics). 1583 */ 1584 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) 1585 { 1586 TranslationBlock *tb; 1587 PageDesc *p; 1588 int n; 1589 #ifdef TARGET_HAS_PRECISE_SMC 1590 TranslationBlock *current_tb = NULL; 1591 CPUState *cpu = current_cpu; 1592 CPUArchState *env = NULL; 1593 int current_tb_modified = 0; 1594 target_ulong current_pc = 0; 1595 target_ulong current_cs_base = 0; 1596 uint32_t current_flags = 0; 1597 #endif 1598 1599 assert_memory_lock(); 1600 1601 addr &= TARGET_PAGE_MASK; 1602 p = page_find(addr >> TARGET_PAGE_BITS); 1603 if (!p) { 1604 return false; 1605 } 1606 1607 tb_lock(); 1608 tb = p->first_tb; 1609 #ifdef TARGET_HAS_PRECISE_SMC 1610 if (tb && pc != 0) { 1611 current_tb = tb_find_pc(pc); 1612 } 1613 if (cpu != NULL) { 1614 env = cpu->env_ptr; 1615 } 1616 #endif 1617 while (tb != NULL) { 1618 n = (uintptr_t)tb & 3; 1619 tb = (TranslationBlock *)((uintptr_t)tb & ~3); 1620 #ifdef TARGET_HAS_PRECISE_SMC 1621 if (current_tb == tb && 1622 (current_tb->cflags & CF_COUNT_MASK) != 1) { 1623 /* If we are modifying the current TB, we must stop 1624 its execution. We could be more precise by checking 1625 that the modification is after the current PC, but it 1626 would require a specialized function to partially 1627 restore the CPU state */ 1628 1629 current_tb_modified = 1; 1630 cpu_restore_state_from_tb(cpu, current_tb, pc); 1631 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1632 ¤t_flags); 1633 } 1634 #endif /* TARGET_HAS_PRECISE_SMC */ 1635 tb_phys_invalidate(tb, addr); 1636 tb = tb->page_next[n]; 1637 } 1638 p->first_tb = NULL; 1639 #ifdef TARGET_HAS_PRECISE_SMC 1640 if (current_tb_modified) { 1641 /* we generate a block containing just the instruction 1642 modifying the memory. It will ensure that it cannot modify 1643 itself */ 1644 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); 1645 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps 1646 * back into the cpu_exec loop. */ 1647 return true; 1648 } 1649 #endif 1650 tb_unlock(); 1651 1652 return false; 1653 } 1654 #endif 1655 1656 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < 1657 tb[1].tc_ptr. Return NULL if not found */ 1658 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) 1659 { 1660 int m_min, m_max, m; 1661 uintptr_t v; 1662 TranslationBlock *tb; 1663 1664 if (tcg_ctx.tb_ctx.nb_tbs <= 0) { 1665 return NULL; 1666 } 1667 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer || 1668 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) { 1669 return NULL; 1670 } 1671 /* binary search (cf Knuth) */ 1672 m_min = 0; 1673 m_max = tcg_ctx.tb_ctx.nb_tbs - 1; 1674 while (m_min <= m_max) { 1675 m = (m_min + m_max) >> 1; 1676 tb = tcg_ctx.tb_ctx.tbs[m]; 1677 v = (uintptr_t)tb->tc_ptr; 1678 if (v == tc_ptr) { 1679 return tb; 1680 } else if (tc_ptr < v) { 1681 m_max = m - 1; 1682 } else { 1683 m_min = m + 1; 1684 } 1685 } 1686 return tcg_ctx.tb_ctx.tbs[m_max]; 1687 } 1688 1689 #if !defined(CONFIG_USER_ONLY) 1690 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) 1691 { 1692 ram_addr_t ram_addr; 1693 MemoryRegion *mr; 1694 hwaddr l = 1; 1695 1696 rcu_read_lock(); 1697 mr = address_space_translate(as, addr, &addr, &l, false); 1698 if (!(memory_region_is_ram(mr) 1699 || memory_region_is_romd(mr))) { 1700 rcu_read_unlock(); 1701 return; 1702 } 1703 ram_addr = memory_region_get_ram_addr(mr) + addr; 1704 tb_lock(); 1705 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); 1706 tb_unlock(); 1707 rcu_read_unlock(); 1708 } 1709 #endif /* !defined(CONFIG_USER_ONLY) */ 1710 1711 /* Called with tb_lock held. */ 1712 void tb_check_watchpoint(CPUState *cpu) 1713 { 1714 TranslationBlock *tb; 1715 1716 tb = tb_find_pc(cpu->mem_io_pc); 1717 if (tb) { 1718 /* We can use retranslation to find the PC. */ 1719 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); 1720 tb_phys_invalidate(tb, -1); 1721 } else { 1722 /* The exception probably happened in a helper. The CPU state should 1723 have been saved before calling it. Fetch the PC from there. */ 1724 CPUArchState *env = cpu->env_ptr; 1725 target_ulong pc, cs_base; 1726 tb_page_addr_t addr; 1727 uint32_t flags; 1728 1729 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 1730 addr = get_page_addr_code(env, pc); 1731 tb_invalidate_phys_range(addr, addr + 1); 1732 } 1733 } 1734 1735 #ifndef CONFIG_USER_ONLY 1736 /* in deterministic execution mode, instructions doing device I/Os 1737 * must be at the end of the TB. 1738 * 1739 * Called by softmmu_template.h, with iothread mutex not held. 1740 */ 1741 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) 1742 { 1743 #if defined(TARGET_MIPS) || defined(TARGET_SH4) 1744 CPUArchState *env = cpu->env_ptr; 1745 #endif 1746 TranslationBlock *tb; 1747 uint32_t n, cflags; 1748 target_ulong pc, cs_base; 1749 uint32_t flags; 1750 1751 tb_lock(); 1752 tb = tb_find_pc(retaddr); 1753 if (!tb) { 1754 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", 1755 (void *)retaddr); 1756 } 1757 n = cpu->icount_decr.u16.low + tb->icount; 1758 cpu_restore_state_from_tb(cpu, tb, retaddr); 1759 /* Calculate how many instructions had been executed before the fault 1760 occurred. */ 1761 n = n - cpu->icount_decr.u16.low; 1762 /* Generate a new TB ending on the I/O insn. */ 1763 n++; 1764 /* On MIPS and SH, delay slot instructions can only be restarted if 1765 they were already the first instruction in the TB. If this is not 1766 the first instruction in a TB then re-execute the preceding 1767 branch. */ 1768 #if defined(TARGET_MIPS) 1769 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { 1770 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); 1771 cpu->icount_decr.u16.low++; 1772 env->hflags &= ~MIPS_HFLAG_BMASK; 1773 } 1774 #elif defined(TARGET_SH4) 1775 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 1776 && n > 1) { 1777 env->pc -= 2; 1778 cpu->icount_decr.u16.low++; 1779 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 1780 } 1781 #endif 1782 /* This should never happen. */ 1783 if (n > CF_COUNT_MASK) { 1784 cpu_abort(cpu, "TB too big during recompile"); 1785 } 1786 1787 cflags = n | CF_LAST_IO; 1788 pc = tb->pc; 1789 cs_base = tb->cs_base; 1790 flags = tb->flags; 1791 tb_phys_invalidate(tb, -1); 1792 if (tb->cflags & CF_NOCACHE) { 1793 if (tb->orig_tb) { 1794 /* Invalidate original TB if this TB was generated in 1795 * cpu_exec_nocache() */ 1796 tb_phys_invalidate(tb->orig_tb, -1); 1797 } 1798 tb_free(tb); 1799 } 1800 /* FIXME: In theory this could raise an exception. In practice 1801 we have already translated the block once so it's probably ok. */ 1802 tb_gen_code(cpu, pc, cs_base, flags, cflags); 1803 1804 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not 1805 * the first in the TB) then we end up generating a whole new TB and 1806 * repeating the fault, which is horribly inefficient. 1807 * Better would be to execute just this insn uncached, or generate a 1808 * second new TB. 1809 * 1810 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the 1811 * tb_lock gets reset. 1812 */ 1813 cpu_loop_exit_noexc(cpu); 1814 } 1815 1816 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1817 { 1818 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); 1819 1820 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 1821 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); 1822 } 1823 } 1824 1825 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) 1826 { 1827 /* Discard jump cache entries for any tb which might potentially 1828 overlap the flushed page. */ 1829 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 1830 tb_jmp_cache_clear_page(cpu, addr); 1831 } 1832 1833 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf, 1834 struct qht_stats hst) 1835 { 1836 uint32_t hgram_opts; 1837 size_t hgram_bins; 1838 char *hgram; 1839 1840 if (!hst.head_buckets) { 1841 return; 1842 } 1843 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", 1844 hst.used_head_buckets, hst.head_buckets, 1845 (double)hst.used_head_buckets / hst.head_buckets * 100); 1846 1847 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 1848 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; 1849 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { 1850 hgram_opts |= QDIST_PR_NODECIMAL; 1851 } 1852 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); 1853 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", 1854 qdist_avg(&hst.occupancy) * 100, hgram); 1855 g_free(hgram); 1856 1857 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 1858 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); 1859 if (hgram_bins > 10) { 1860 hgram_bins = 10; 1861 } else { 1862 hgram_bins = 0; 1863 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; 1864 } 1865 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); 1866 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n", 1867 qdist_avg(&hst.chain), hgram); 1868 g_free(hgram); 1869 } 1870 1871 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) 1872 { 1873 int i, target_code_size, max_target_code_size; 1874 int direct_jmp_count, direct_jmp2_count, cross_page; 1875 TranslationBlock *tb; 1876 struct qht_stats hst; 1877 1878 tb_lock(); 1879 1880 target_code_size = 0; 1881 max_target_code_size = 0; 1882 cross_page = 0; 1883 direct_jmp_count = 0; 1884 direct_jmp2_count = 0; 1885 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) { 1886 tb = tcg_ctx.tb_ctx.tbs[i]; 1887 target_code_size += tb->size; 1888 if (tb->size > max_target_code_size) { 1889 max_target_code_size = tb->size; 1890 } 1891 if (tb->page_addr[1] != -1) { 1892 cross_page++; 1893 } 1894 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 1895 direct_jmp_count++; 1896 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 1897 direct_jmp2_count++; 1898 } 1899 } 1900 } 1901 /* XXX: avoid using doubles ? */ 1902 cpu_fprintf(f, "Translation buffer state:\n"); 1903 cpu_fprintf(f, "gen code size %td/%zd\n", 1904 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, 1905 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer); 1906 cpu_fprintf(f, "TB count %d\n", tcg_ctx.tb_ctx.nb_tbs); 1907 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", 1908 tcg_ctx.tb_ctx.nb_tbs ? target_code_size / 1909 tcg_ctx.tb_ctx.nb_tbs : 0, 1910 max_target_code_size); 1911 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", 1912 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - 1913 tcg_ctx.code_gen_buffer) / 1914 tcg_ctx.tb_ctx.nb_tbs : 0, 1915 target_code_size ? (double) (tcg_ctx.code_gen_ptr - 1916 tcg_ctx.code_gen_buffer) / 1917 target_code_size : 0); 1918 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, 1919 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / 1920 tcg_ctx.tb_ctx.nb_tbs : 0); 1921 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", 1922 direct_jmp_count, 1923 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / 1924 tcg_ctx.tb_ctx.nb_tbs : 0, 1925 direct_jmp2_count, 1926 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / 1927 tcg_ctx.tb_ctx.nb_tbs : 0); 1928 1929 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst); 1930 print_qht_statistics(f, cpu_fprintf, hst); 1931 qht_statistics_destroy(&hst); 1932 1933 cpu_fprintf(f, "\nStatistics:\n"); 1934 cpu_fprintf(f, "TB flush count %u\n", 1935 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count)); 1936 cpu_fprintf(f, "TB invalidate count %d\n", 1937 tcg_ctx.tb_ctx.tb_phys_invalidate_count); 1938 cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count()); 1939 tcg_dump_info(f, cpu_fprintf); 1940 1941 tb_unlock(); 1942 } 1943 1944 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) 1945 { 1946 tcg_dump_op_count(f, cpu_fprintf); 1947 } 1948 1949 #else /* CONFIG_USER_ONLY */ 1950 1951 void cpu_interrupt(CPUState *cpu, int mask) 1952 { 1953 g_assert(qemu_mutex_iothread_locked()); 1954 cpu->interrupt_request |= mask; 1955 cpu->icount_decr.u16.high = -1; 1956 } 1957 1958 /* 1959 * Walks guest process memory "regions" one by one 1960 * and calls callback function 'fn' for each region. 1961 */ 1962 struct walk_memory_regions_data { 1963 walk_memory_regions_fn fn; 1964 void *priv; 1965 target_ulong start; 1966 int prot; 1967 }; 1968 1969 static int walk_memory_regions_end(struct walk_memory_regions_data *data, 1970 target_ulong end, int new_prot) 1971 { 1972 if (data->start != -1u) { 1973 int rc = data->fn(data->priv, data->start, end, data->prot); 1974 if (rc != 0) { 1975 return rc; 1976 } 1977 } 1978 1979 data->start = (new_prot ? end : -1u); 1980 data->prot = new_prot; 1981 1982 return 0; 1983 } 1984 1985 static int walk_memory_regions_1(struct walk_memory_regions_data *data, 1986 target_ulong base, int level, void **lp) 1987 { 1988 target_ulong pa; 1989 int i, rc; 1990 1991 if (*lp == NULL) { 1992 return walk_memory_regions_end(data, base, 0); 1993 } 1994 1995 if (level == 0) { 1996 PageDesc *pd = *lp; 1997 1998 for (i = 0; i < V_L2_SIZE; ++i) { 1999 int prot = pd[i].flags; 2000 2001 pa = base | (i << TARGET_PAGE_BITS); 2002 if (prot != data->prot) { 2003 rc = walk_memory_regions_end(data, pa, prot); 2004 if (rc != 0) { 2005 return rc; 2006 } 2007 } 2008 } 2009 } else { 2010 void **pp = *lp; 2011 2012 for (i = 0; i < V_L2_SIZE; ++i) { 2013 pa = base | ((target_ulong)i << 2014 (TARGET_PAGE_BITS + V_L2_BITS * level)); 2015 rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 2016 if (rc != 0) { 2017 return rc; 2018 } 2019 } 2020 } 2021 2022 return 0; 2023 } 2024 2025 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 2026 { 2027 struct walk_memory_regions_data data; 2028 uintptr_t i, l1_sz = v_l1_size; 2029 2030 data.fn = fn; 2031 data.priv = priv; 2032 data.start = -1u; 2033 data.prot = 0; 2034 2035 for (i = 0; i < l1_sz; i++) { 2036 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); 2037 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); 2038 if (rc != 0) { 2039 return rc; 2040 } 2041 } 2042 2043 return walk_memory_regions_end(&data, 0, 0); 2044 } 2045 2046 static int dump_region(void *priv, target_ulong start, 2047 target_ulong end, unsigned long prot) 2048 { 2049 FILE *f = (FILE *)priv; 2050 2051 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx 2052 " "TARGET_FMT_lx" %c%c%c\n", 2053 start, end, end - start, 2054 ((prot & PAGE_READ) ? 'r' : '-'), 2055 ((prot & PAGE_WRITE) ? 'w' : '-'), 2056 ((prot & PAGE_EXEC) ? 'x' : '-')); 2057 2058 return 0; 2059 } 2060 2061 /* dump memory mappings */ 2062 void page_dump(FILE *f) 2063 { 2064 const int length = sizeof(target_ulong) * 2; 2065 (void) fprintf(f, "%-*s %-*s %-*s %s\n", 2066 length, "start", length, "end", length, "size", "prot"); 2067 walk_memory_regions(f, dump_region); 2068 } 2069 2070 int page_get_flags(target_ulong address) 2071 { 2072 PageDesc *p; 2073 2074 p = page_find(address >> TARGET_PAGE_BITS); 2075 if (!p) { 2076 return 0; 2077 } 2078 return p->flags; 2079 } 2080 2081 /* Modify the flags of a page and invalidate the code if necessary. 2082 The flag PAGE_WRITE_ORG is positioned automatically depending 2083 on PAGE_WRITE. The mmap_lock should already be held. */ 2084 void page_set_flags(target_ulong start, target_ulong end, int flags) 2085 { 2086 target_ulong addr, len; 2087 2088 /* This function should never be called with addresses outside the 2089 guest address space. If this assert fires, it probably indicates 2090 a missing call to h2g_valid. */ 2091 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2092 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2093 #endif 2094 assert(start < end); 2095 assert_memory_lock(); 2096 2097 start = start & TARGET_PAGE_MASK; 2098 end = TARGET_PAGE_ALIGN(end); 2099 2100 if (flags & PAGE_WRITE) { 2101 flags |= PAGE_WRITE_ORG; 2102 } 2103 2104 for (addr = start, len = end - start; 2105 len != 0; 2106 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2107 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2108 2109 /* If the write protection bit is set, then we invalidate 2110 the code inside. */ 2111 if (!(p->flags & PAGE_WRITE) && 2112 (flags & PAGE_WRITE) && 2113 p->first_tb) { 2114 tb_invalidate_phys_page(addr, 0); 2115 } 2116 p->flags = flags; 2117 } 2118 } 2119 2120 int page_check_range(target_ulong start, target_ulong len, int flags) 2121 { 2122 PageDesc *p; 2123 target_ulong end; 2124 target_ulong addr; 2125 2126 /* This function should never be called with addresses outside the 2127 guest address space. If this assert fires, it probably indicates 2128 a missing call to h2g_valid. */ 2129 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2130 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2131 #endif 2132 2133 if (len == 0) { 2134 return 0; 2135 } 2136 if (start + len - 1 < start) { 2137 /* We've wrapped around. */ 2138 return -1; 2139 } 2140 2141 /* must do before we loose bits in the next step */ 2142 end = TARGET_PAGE_ALIGN(start + len); 2143 start = start & TARGET_PAGE_MASK; 2144 2145 for (addr = start, len = end - start; 2146 len != 0; 2147 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2148 p = page_find(addr >> TARGET_PAGE_BITS); 2149 if (!p) { 2150 return -1; 2151 } 2152 if (!(p->flags & PAGE_VALID)) { 2153 return -1; 2154 } 2155 2156 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { 2157 return -1; 2158 } 2159 if (flags & PAGE_WRITE) { 2160 if (!(p->flags & PAGE_WRITE_ORG)) { 2161 return -1; 2162 } 2163 /* unprotect the page if it was put read-only because it 2164 contains translated code */ 2165 if (!(p->flags & PAGE_WRITE)) { 2166 if (!page_unprotect(addr, 0)) { 2167 return -1; 2168 } 2169 } 2170 } 2171 } 2172 return 0; 2173 } 2174 2175 /* called from signal handler: invalidate the code and unprotect the 2176 * page. Return 0 if the fault was not handled, 1 if it was handled, 2177 * and 2 if it was handled but the caller must cause the TB to be 2178 * immediately exited. (We can only return 2 if the 'pc' argument is 2179 * non-zero.) 2180 */ 2181 int page_unprotect(target_ulong address, uintptr_t pc) 2182 { 2183 unsigned int prot; 2184 bool current_tb_invalidated; 2185 PageDesc *p; 2186 target_ulong host_start, host_end, addr; 2187 2188 /* Technically this isn't safe inside a signal handler. However we 2189 know this only ever happens in a synchronous SEGV handler, so in 2190 practice it seems to be ok. */ 2191 mmap_lock(); 2192 2193 p = page_find(address >> TARGET_PAGE_BITS); 2194 if (!p) { 2195 mmap_unlock(); 2196 return 0; 2197 } 2198 2199 /* if the page was really writable, then we change its 2200 protection back to writable */ 2201 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { 2202 host_start = address & qemu_host_page_mask; 2203 host_end = host_start + qemu_host_page_size; 2204 2205 prot = 0; 2206 current_tb_invalidated = false; 2207 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { 2208 p = page_find(addr >> TARGET_PAGE_BITS); 2209 p->flags |= PAGE_WRITE; 2210 prot |= p->flags; 2211 2212 /* and since the content will be modified, we must invalidate 2213 the corresponding translated code. */ 2214 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); 2215 #ifdef DEBUG_TB_CHECK 2216 tb_invalidate_check(addr); 2217 #endif 2218 } 2219 mprotect((void *)g2h(host_start), qemu_host_page_size, 2220 prot & PAGE_BITS); 2221 2222 mmap_unlock(); 2223 /* If current TB was invalidated return to main loop */ 2224 return current_tb_invalidated ? 2 : 1; 2225 } 2226 mmap_unlock(); 2227 return 0; 2228 } 2229 #endif /* CONFIG_USER_ONLY */ 2230 2231 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ 2232 void tcg_flush_softmmu_tlb(CPUState *cs) 2233 { 2234 #ifdef CONFIG_SOFTMMU 2235 tlb_flush(cs); 2236 #endif 2237 } 2238