1 /* 2 * Host code generation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifdef _WIN32 20 #include <windows.h> 21 #endif 22 #include "qemu/osdep.h" 23 24 25 #include "qemu-common.h" 26 #define NO_CPU_IO_DEFS 27 #include "cpu.h" 28 #include "trace.h" 29 #include "disas/disas.h" 30 #include "exec/exec-all.h" 31 #include "tcg.h" 32 #if defined(CONFIG_USER_ONLY) 33 #include "qemu.h" 34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 35 #include <sys/param.h> 36 #if __FreeBSD_version >= 700104 37 #define HAVE_KINFO_GETVMMAP 38 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 39 #include <sys/proc.h> 40 #include <machine/profile.h> 41 #define _KERNEL 42 #include <sys/user.h> 43 #undef _KERNEL 44 #undef sigqueue 45 #include <libutil.h> 46 #endif 47 #endif 48 #else 49 #include "exec/address-spaces.h" 50 #endif 51 52 #include "exec/cputlb.h" 53 #include "exec/tb-hash.h" 54 #include "translate-all.h" 55 #include "qemu/bitmap.h" 56 #include "qemu/error-report.h" 57 #include "qemu/timer.h" 58 #include "qemu/main-loop.h" 59 #include "exec/log.h" 60 #include "sysemu/cpus.h" 61 62 /* #define DEBUG_TB_INVALIDATE */ 63 /* #define DEBUG_TB_FLUSH */ 64 /* make various TB consistency checks */ 65 /* #define DEBUG_TB_CHECK */ 66 67 #ifdef DEBUG_TB_INVALIDATE 68 #define DEBUG_TB_INVALIDATE_GATE 1 69 #else 70 #define DEBUG_TB_INVALIDATE_GATE 0 71 #endif 72 73 #ifdef DEBUG_TB_FLUSH 74 #define DEBUG_TB_FLUSH_GATE 1 75 #else 76 #define DEBUG_TB_FLUSH_GATE 0 77 #endif 78 79 #if !defined(CONFIG_USER_ONLY) 80 /* TB consistency checks only implemented for usermode emulation. */ 81 #undef DEBUG_TB_CHECK 82 #endif 83 84 #ifdef DEBUG_TB_CHECK 85 #define DEBUG_TB_CHECK_GATE 1 86 #else 87 #define DEBUG_TB_CHECK_GATE 0 88 #endif 89 90 /* Access to the various translations structures need to be serialised via locks 91 * for consistency. This is automatic for SoftMMU based system 92 * emulation due to its single threaded nature. In user-mode emulation 93 * access to the memory related structures are protected with the 94 * mmap_lock. 95 */ 96 #ifdef CONFIG_SOFTMMU 97 #define assert_memory_lock() tcg_debug_assert(have_tb_lock) 98 #else 99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) 100 #endif 101 102 #define SMC_BITMAP_USE_THRESHOLD 10 103 104 typedef struct PageDesc { 105 /* list of TBs intersecting this ram page */ 106 TranslationBlock *first_tb; 107 #ifdef CONFIG_SOFTMMU 108 /* in order to optimize self modifying code, we count the number 109 of lookups we do to a given page to use a bitmap */ 110 unsigned int code_write_count; 111 unsigned long *code_bitmap; 112 #else 113 unsigned long flags; 114 #endif 115 } PageDesc; 116 117 /* In system mode we want L1_MAP to be based on ram offsets, 118 while in user mode we want it to be based on virtual addresses. */ 119 #if !defined(CONFIG_USER_ONLY) 120 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 121 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 122 #else 123 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 124 #endif 125 #else 126 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS 127 #endif 128 129 /* Size of the L2 (and L3, etc) page tables. */ 130 #define V_L2_BITS 10 131 #define V_L2_SIZE (1 << V_L2_BITS) 132 133 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ 134 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > 135 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate) 136 * BITS_PER_BYTE); 137 138 /* 139 * L1 Mapping properties 140 */ 141 static int v_l1_size; 142 static int v_l1_shift; 143 static int v_l2_levels; 144 145 /* The bottom level has pointers to PageDesc, and is indexed by 146 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. 147 */ 148 #define V_L1_MIN_BITS 4 149 #define V_L1_MAX_BITS (V_L2_BITS + 3) 150 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) 151 152 static void *l1_map[V_L1_MAX_SIZE]; 153 154 /* code generation context */ 155 TCGContext tcg_init_ctx; 156 __thread TCGContext *tcg_ctx; 157 TBContext tb_ctx; 158 bool parallel_cpus; 159 160 /* translation block context */ 161 static __thread int have_tb_lock; 162 163 static void page_table_config_init(void) 164 { 165 uint32_t v_l1_bits; 166 167 assert(TARGET_PAGE_BITS); 168 /* The bits remaining after N lower levels of page tables. */ 169 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; 170 if (v_l1_bits < V_L1_MIN_BITS) { 171 v_l1_bits += V_L2_BITS; 172 } 173 174 v_l1_size = 1 << v_l1_bits; 175 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; 176 v_l2_levels = v_l1_shift / V_L2_BITS - 1; 177 178 assert(v_l1_bits <= V_L1_MAX_BITS); 179 assert(v_l1_shift % V_L2_BITS == 0); 180 assert(v_l2_levels >= 0); 181 } 182 183 #define assert_tb_locked() tcg_debug_assert(have_tb_lock) 184 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock) 185 186 void tb_lock(void) 187 { 188 assert_tb_unlocked(); 189 qemu_mutex_lock(&tb_ctx.tb_lock); 190 have_tb_lock++; 191 } 192 193 void tb_unlock(void) 194 { 195 assert_tb_locked(); 196 have_tb_lock--; 197 qemu_mutex_unlock(&tb_ctx.tb_lock); 198 } 199 200 void tb_lock_reset(void) 201 { 202 if (have_tb_lock) { 203 qemu_mutex_unlock(&tb_ctx.tb_lock); 204 have_tb_lock = 0; 205 } 206 } 207 208 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); 209 210 void cpu_gen_init(void) 211 { 212 tcg_context_init(&tcg_init_ctx); 213 } 214 215 /* Encode VAL as a signed leb128 sequence at P. 216 Return P incremented past the encoded value. */ 217 static uint8_t *encode_sleb128(uint8_t *p, target_long val) 218 { 219 int more, byte; 220 221 do { 222 byte = val & 0x7f; 223 val >>= 7; 224 more = !((val == 0 && (byte & 0x40) == 0) 225 || (val == -1 && (byte & 0x40) != 0)); 226 if (more) { 227 byte |= 0x80; 228 } 229 *p++ = byte; 230 } while (more); 231 232 return p; 233 } 234 235 /* Decode a signed leb128 sequence at *PP; increment *PP past the 236 decoded value. Return the decoded value. */ 237 static target_long decode_sleb128(uint8_t **pp) 238 { 239 uint8_t *p = *pp; 240 target_long val = 0; 241 int byte, shift = 0; 242 243 do { 244 byte = *p++; 245 val |= (target_ulong)(byte & 0x7f) << shift; 246 shift += 7; 247 } while (byte & 0x80); 248 if (shift < TARGET_LONG_BITS && (byte & 0x40)) { 249 val |= -(target_ulong)1 << shift; 250 } 251 252 *pp = p; 253 return val; 254 } 255 256 /* Encode the data collected about the instructions while compiling TB. 257 Place the data at BLOCK, and return the number of bytes consumed. 258 259 The logical table consists of TARGET_INSN_START_WORDS target_ulong's, 260 which come from the target's insn_start data, followed by a uintptr_t 261 which comes from the host pc of the end of the code implementing the insn. 262 263 Each line of the table is encoded as sleb128 deltas from the previous 264 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. 265 That is, the first column is seeded with the guest pc, the last column 266 with the host pc, and the middle columns with zeros. */ 267 268 static int encode_search(TranslationBlock *tb, uint8_t *block) 269 { 270 uint8_t *highwater = tcg_ctx->code_gen_highwater; 271 uint8_t *p = block; 272 int i, j, n; 273 274 for (i = 0, n = tb->icount; i < n; ++i) { 275 target_ulong prev; 276 277 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 278 if (i == 0) { 279 prev = (j == 0 ? tb->pc : 0); 280 } else { 281 prev = tcg_ctx->gen_insn_data[i - 1][j]; 282 } 283 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); 284 } 285 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); 286 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); 287 288 /* Test for (pending) buffer overflow. The assumption is that any 289 one row beginning below the high water mark cannot overrun 290 the buffer completely. Thus we can test for overflow after 291 encoding a row without having to check during encoding. */ 292 if (unlikely(p > highwater)) { 293 return -1; 294 } 295 } 296 297 return p - block; 298 } 299 300 /* The cpu state corresponding to 'searched_pc' is restored. 301 * Called with tb_lock held. 302 */ 303 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 304 uintptr_t searched_pc) 305 { 306 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; 307 uintptr_t host_pc = (uintptr_t)tb->tc.ptr; 308 CPUArchState *env = cpu->env_ptr; 309 uint8_t *p = tb->tc.ptr + tb->tc.size; 310 int i, j, num_insns = tb->icount; 311 #ifdef CONFIG_PROFILER 312 TCGProfile *prof = &tcg_ctx->prof; 313 int64_t ti = profile_getclock(); 314 #endif 315 316 searched_pc -= GETPC_ADJ; 317 318 if (searched_pc < host_pc) { 319 return -1; 320 } 321 322 /* Reconstruct the stored insn data while looking for the point at 323 which the end of the insn exceeds the searched_pc. */ 324 for (i = 0; i < num_insns; ++i) { 325 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 326 data[j] += decode_sleb128(&p); 327 } 328 host_pc += decode_sleb128(&p); 329 if (host_pc > searched_pc) { 330 goto found; 331 } 332 } 333 return -1; 334 335 found: 336 if (tb->cflags & CF_USE_ICOUNT) { 337 assert(use_icount); 338 /* Reset the cycle counter to the start of the block. */ 339 cpu->icount_decr.u16.low += num_insns; 340 /* Clear the IO flag. */ 341 cpu->can_do_io = 0; 342 } 343 cpu->icount_decr.u16.low -= i; 344 restore_state_to_opc(env, tb, data); 345 346 #ifdef CONFIG_PROFILER 347 atomic_set(&prof->restore_time, 348 prof->restore_time + profile_getclock() - ti); 349 atomic_set(&prof->restore_count, prof->restore_count + 1); 350 #endif 351 return 0; 352 } 353 354 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) 355 { 356 TranslationBlock *tb; 357 bool r = false; 358 uintptr_t check_offset; 359 360 /* The host_pc has to be in the region of current code buffer. If 361 * it is not we will not be able to resolve it here. The two cases 362 * where host_pc will not be correct are: 363 * 364 * - fault during translation (instruction fetch) 365 * - fault from helper (not using GETPC() macro) 366 * 367 * Either way we need return early to avoid blowing up on a 368 * recursive tb_lock() as we can't resolve it here. 369 * 370 * We are using unsigned arithmetic so if host_pc < 371 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way 372 * above the code_gen_buffer_size 373 */ 374 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer; 375 376 if (check_offset < tcg_init_ctx.code_gen_buffer_size) { 377 tb_lock(); 378 tb = tb_find_pc(host_pc); 379 if (tb) { 380 cpu_restore_state_from_tb(cpu, tb, host_pc); 381 if (tb->cflags & CF_NOCACHE) { 382 /* one-shot translation, invalidate it immediately */ 383 tb_phys_invalidate(tb, -1); 384 tb_remove(tb); 385 } 386 r = true; 387 } 388 tb_unlock(); 389 } 390 391 return r; 392 } 393 394 static void page_init(void) 395 { 396 page_size_init(); 397 page_table_config_init(); 398 399 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 400 { 401 #ifdef HAVE_KINFO_GETVMMAP 402 struct kinfo_vmentry *freep; 403 int i, cnt; 404 405 freep = kinfo_getvmmap(getpid(), &cnt); 406 if (freep) { 407 mmap_lock(); 408 for (i = 0; i < cnt; i++) { 409 unsigned long startaddr, endaddr; 410 411 startaddr = freep[i].kve_start; 412 endaddr = freep[i].kve_end; 413 if (h2g_valid(startaddr)) { 414 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 415 416 if (h2g_valid(endaddr)) { 417 endaddr = h2g(endaddr); 418 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 419 } else { 420 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 421 endaddr = ~0ul; 422 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 423 #endif 424 } 425 } 426 } 427 free(freep); 428 mmap_unlock(); 429 } 430 #else 431 FILE *f; 432 433 last_brk = (unsigned long)sbrk(0); 434 435 f = fopen("/compat/linux/proc/self/maps", "r"); 436 if (f) { 437 mmap_lock(); 438 439 do { 440 unsigned long startaddr, endaddr; 441 int n; 442 443 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 444 445 if (n == 2 && h2g_valid(startaddr)) { 446 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 447 448 if (h2g_valid(endaddr)) { 449 endaddr = h2g(endaddr); 450 } else { 451 endaddr = ~0ul; 452 } 453 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 454 } 455 } while (!feof(f)); 456 457 fclose(f); 458 mmap_unlock(); 459 } 460 #endif 461 } 462 #endif 463 } 464 465 /* If alloc=1: 466 * Called with tb_lock held for system emulation. 467 * Called with mmap_lock held for user-mode emulation. 468 */ 469 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) 470 { 471 PageDesc *pd; 472 void **lp; 473 int i; 474 475 if (alloc) { 476 assert_memory_lock(); 477 } 478 479 /* Level 1. Always allocated. */ 480 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); 481 482 /* Level 2..N-1. */ 483 for (i = v_l2_levels; i > 0; i--) { 484 void **p = atomic_rcu_read(lp); 485 486 if (p == NULL) { 487 if (!alloc) { 488 return NULL; 489 } 490 p = g_new0(void *, V_L2_SIZE); 491 atomic_rcu_set(lp, p); 492 } 493 494 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); 495 } 496 497 pd = atomic_rcu_read(lp); 498 if (pd == NULL) { 499 if (!alloc) { 500 return NULL; 501 } 502 pd = g_new0(PageDesc, V_L2_SIZE); 503 atomic_rcu_set(lp, pd); 504 } 505 506 return pd + (index & (V_L2_SIZE - 1)); 507 } 508 509 static inline PageDesc *page_find(tb_page_addr_t index) 510 { 511 return page_find_alloc(index, 0); 512 } 513 514 #if defined(CONFIG_USER_ONLY) 515 /* Currently it is not recommended to allocate big chunks of data in 516 user mode. It will change when a dedicated libc will be used. */ 517 /* ??? 64-bit hosts ought to have no problem mmaping data outside the 518 region in which the guest needs to run. Revisit this. */ 519 #define USE_STATIC_CODE_GEN_BUFFER 520 #endif 521 522 /* Minimum size of the code gen buffer. This number is randomly chosen, 523 but not so small that we can't have a fair number of TB's live. */ 524 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) 525 526 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise 527 indicated, this is constrained by the range of direct branches on the 528 host cpu, as used by the TCG implementation of goto_tb. */ 529 #if defined(__x86_64__) 530 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 531 #elif defined(__sparc__) 532 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 533 #elif defined(__powerpc64__) 534 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 535 #elif defined(__powerpc__) 536 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024) 537 #elif defined(__aarch64__) 538 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) 539 #elif defined(__s390x__) 540 /* We have a +- 4GB range on the branches; leave some slop. */ 541 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) 542 #elif defined(__mips__) 543 /* We have a 256MB branch region, but leave room to make sure the 544 main executable is also within that region. */ 545 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) 546 #else 547 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) 548 #endif 549 550 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) 551 552 #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 553 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 554 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 555 556 static inline size_t size_code_gen_buffer(size_t tb_size) 557 { 558 /* Size the buffer. */ 559 if (tb_size == 0) { 560 #ifdef USE_STATIC_CODE_GEN_BUFFER 561 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 562 #else 563 /* ??? Needs adjustments. */ 564 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the 565 static buffer, we could size this on RESERVED_VA, on the text 566 segment size of the executable, or continue to use the default. */ 567 tb_size = (unsigned long)(ram_size / 4); 568 #endif 569 } 570 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 571 tb_size = MIN_CODE_GEN_BUFFER_SIZE; 572 } 573 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 574 tb_size = MAX_CODE_GEN_BUFFER_SIZE; 575 } 576 return tb_size; 577 } 578 579 #ifdef __mips__ 580 /* In order to use J and JAL within the code_gen_buffer, we require 581 that the buffer not cross a 256MB boundary. */ 582 static inline bool cross_256mb(void *addr, size_t size) 583 { 584 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; 585 } 586 587 /* We weren't able to allocate a buffer without crossing that boundary, 588 so make do with the larger portion of the buffer that doesn't cross. 589 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ 590 static inline void *split_cross_256mb(void *buf1, size_t size1) 591 { 592 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); 593 size_t size2 = buf1 + size1 - buf2; 594 595 size1 = buf2 - buf1; 596 if (size1 < size2) { 597 size1 = size2; 598 buf1 = buf2; 599 } 600 601 tcg_ctx->code_gen_buffer_size = size1; 602 return buf1; 603 } 604 #endif 605 606 #ifdef USE_STATIC_CODE_GEN_BUFFER 607 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 608 __attribute__((aligned(CODE_GEN_ALIGN))); 609 610 static inline void *alloc_code_gen_buffer(void) 611 { 612 void *buf = static_code_gen_buffer; 613 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer); 614 size_t size; 615 616 /* page-align the beginning and end of the buffer */ 617 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); 618 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); 619 620 size = end - buf; 621 622 /* Honor a command-line option limiting the size of the buffer. */ 623 if (size > tcg_ctx->code_gen_buffer_size) { 624 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size, 625 qemu_real_host_page_size); 626 } 627 tcg_ctx->code_gen_buffer_size = size; 628 629 #ifdef __mips__ 630 if (cross_256mb(buf, size)) { 631 buf = split_cross_256mb(buf, size); 632 size = tcg_ctx->code_gen_buffer_size; 633 } 634 #endif 635 636 if (qemu_mprotect_rwx(buf, size)) { 637 abort(); 638 } 639 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 640 641 return buf; 642 } 643 #elif defined(_WIN32) 644 static inline void *alloc_code_gen_buffer(void) 645 { 646 size_t size = tcg_ctx->code_gen_buffer_size; 647 void *buf; 648 649 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, 650 PAGE_EXECUTE_READWRITE); 651 return buf; 652 } 653 #else 654 static inline void *alloc_code_gen_buffer(void) 655 { 656 int prot = PROT_WRITE | PROT_READ | PROT_EXEC; 657 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 658 uintptr_t start = 0; 659 size_t size = tcg_ctx->code_gen_buffer_size; 660 void *buf; 661 662 /* Constrain the position of the buffer based on the host cpu. 663 Note that these addresses are chosen in concert with the 664 addresses assigned in the relevant linker script file. */ 665 # if defined(__PIE__) || defined(__PIC__) 666 /* Don't bother setting a preferred location if we're building 667 a position-independent executable. We're more likely to get 668 an address near the main executable if we let the kernel 669 choose the address. */ 670 # elif defined(__x86_64__) && defined(MAP_32BIT) 671 /* Force the memory down into low memory with the executable. 672 Leave the choice of exact location with the kernel. */ 673 flags |= MAP_32BIT; 674 /* Cannot expect to map more than 800MB in low memory. */ 675 if (size > 800u * 1024 * 1024) { 676 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024; 677 } 678 # elif defined(__sparc__) 679 start = 0x40000000ul; 680 # elif defined(__s390x__) 681 start = 0x90000000ul; 682 # elif defined(__mips__) 683 # if _MIPS_SIM == _ABI64 684 start = 0x128000000ul; 685 # else 686 start = 0x08000000ul; 687 # endif 688 # endif 689 690 buf = mmap((void *)start, size, prot, flags, -1, 0); 691 if (buf == MAP_FAILED) { 692 return NULL; 693 } 694 695 #ifdef __mips__ 696 if (cross_256mb(buf, size)) { 697 /* Try again, with the original still mapped, to avoid re-acquiring 698 that 256mb crossing. This time don't specify an address. */ 699 size_t size2; 700 void *buf2 = mmap(NULL, size, prot, flags, -1, 0); 701 switch ((int)(buf2 != MAP_FAILED)) { 702 case 1: 703 if (!cross_256mb(buf2, size)) { 704 /* Success! Use the new buffer. */ 705 munmap(buf, size); 706 break; 707 } 708 /* Failure. Work with what we had. */ 709 munmap(buf2, size); 710 /* fallthru */ 711 default: 712 /* Split the original buffer. Free the smaller half. */ 713 buf2 = split_cross_256mb(buf, size); 714 size2 = tcg_ctx->code_gen_buffer_size; 715 if (buf == buf2) { 716 munmap(buf + size2, size - size2); 717 } else { 718 munmap(buf, size - size2); 719 } 720 size = size2; 721 break; 722 } 723 buf = buf2; 724 } 725 #endif 726 727 /* Request large pages for the buffer. */ 728 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 729 730 return buf; 731 } 732 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 733 734 /* compare a pointer @ptr and a tb_tc @s */ 735 static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) 736 { 737 if (ptr >= s->ptr + s->size) { 738 return 1; 739 } else if (ptr < s->ptr) { 740 return -1; 741 } 742 return 0; 743 } 744 745 static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp) 746 { 747 const struct tb_tc *a = ap; 748 const struct tb_tc *b = bp; 749 750 /* 751 * When both sizes are set, we know this isn't a lookup. 752 * This is the most likely case: every TB must be inserted; lookups 753 * are a lot less frequent. 754 */ 755 if (likely(a->size && b->size)) { 756 if (a->ptr > b->ptr) { 757 return 1; 758 } else if (a->ptr < b->ptr) { 759 return -1; 760 } 761 /* a->ptr == b->ptr should happen only on deletions */ 762 g_assert(a->size == b->size); 763 return 0; 764 } 765 /* 766 * All lookups have either .size field set to 0. 767 * From the glib sources we see that @ap is always the lookup key. However 768 * the docs provide no guarantee, so we just mark this case as likely. 769 */ 770 if (likely(a->size == 0)) { 771 return ptr_cmp_tb_tc(a->ptr, b); 772 } 773 return ptr_cmp_tb_tc(b->ptr, a); 774 } 775 776 static inline void code_gen_alloc(size_t tb_size) 777 { 778 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size); 779 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(); 780 if (tcg_ctx->code_gen_buffer == NULL) { 781 fprintf(stderr, "Could not allocate dynamic translator buffer\n"); 782 exit(1); 783 } 784 tb_ctx.tb_tree = g_tree_new(tb_tc_cmp); 785 qemu_mutex_init(&tb_ctx.tb_lock); 786 } 787 788 static void tb_htable_init(void) 789 { 790 unsigned int mode = QHT_MODE_AUTO_RESIZE; 791 792 qht_init(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode); 793 } 794 795 /* Must be called before using the QEMU cpus. 'tb_size' is the size 796 (in bytes) allocated to the translation buffer. Zero means default 797 size. */ 798 void tcg_exec_init(unsigned long tb_size) 799 { 800 tcg_allowed = true; 801 cpu_gen_init(); 802 page_init(); 803 tb_htable_init(); 804 code_gen_alloc(tb_size); 805 #if defined(CONFIG_SOFTMMU) 806 /* There's no guest base to take into account, so go ahead and 807 initialize the prologue now. */ 808 tcg_prologue_init(tcg_ctx); 809 #endif 810 } 811 812 /* 813 * Allocate a new translation block. Flush the translation buffer if 814 * too many translation blocks or too much generated code. 815 * 816 * Called with tb_lock held. 817 */ 818 static TranslationBlock *tb_alloc(target_ulong pc) 819 { 820 TranslationBlock *tb; 821 822 assert_tb_locked(); 823 824 tb = tcg_tb_alloc(tcg_ctx); 825 if (unlikely(tb == NULL)) { 826 return NULL; 827 } 828 return tb; 829 } 830 831 /* Called with tb_lock held. */ 832 void tb_remove(TranslationBlock *tb) 833 { 834 assert_tb_locked(); 835 836 g_tree_remove(tb_ctx.tb_tree, &tb->tc); 837 } 838 839 static inline void invalidate_page_bitmap(PageDesc *p) 840 { 841 #ifdef CONFIG_SOFTMMU 842 g_free(p->code_bitmap); 843 p->code_bitmap = NULL; 844 p->code_write_count = 0; 845 #endif 846 } 847 848 /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 849 static void page_flush_tb_1(int level, void **lp) 850 { 851 int i; 852 853 if (*lp == NULL) { 854 return; 855 } 856 if (level == 0) { 857 PageDesc *pd = *lp; 858 859 for (i = 0; i < V_L2_SIZE; ++i) { 860 pd[i].first_tb = NULL; 861 invalidate_page_bitmap(pd + i); 862 } 863 } else { 864 void **pp = *lp; 865 866 for (i = 0; i < V_L2_SIZE; ++i) { 867 page_flush_tb_1(level - 1, pp + i); 868 } 869 } 870 } 871 872 static void page_flush_tb(void) 873 { 874 int i, l1_sz = v_l1_size; 875 876 for (i = 0; i < l1_sz; i++) { 877 page_flush_tb_1(v_l2_levels, l1_map + i); 878 } 879 } 880 881 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) 882 { 883 const TranslationBlock *tb = value; 884 size_t *size = data; 885 886 *size += tb->tc.size; 887 return false; 888 } 889 890 /* flush all the translation blocks */ 891 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) 892 { 893 tb_lock(); 894 895 /* If it is already been done on request of another CPU, 896 * just retry. 897 */ 898 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { 899 goto done; 900 } 901 902 if (DEBUG_TB_FLUSH_GATE) { 903 size_t nb_tbs = g_tree_nnodes(tb_ctx.tb_tree); 904 size_t host_size = 0; 905 906 g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size); 907 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", 908 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); 909 } 910 911 CPU_FOREACH(cpu) { 912 cpu_tb_jmp_cache_clear(cpu); 913 } 914 915 /* Increment the refcount first so that destroy acts as a reset */ 916 g_tree_ref(tb_ctx.tb_tree); 917 g_tree_destroy(tb_ctx.tb_tree); 918 919 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); 920 page_flush_tb(); 921 922 tcg_region_reset_all(); 923 /* XXX: flush processor icache at this point if cache flush is 924 expensive */ 925 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); 926 927 done: 928 tb_unlock(); 929 } 930 931 void tb_flush(CPUState *cpu) 932 { 933 if (tcg_enabled()) { 934 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count); 935 async_safe_run_on_cpu(cpu, do_tb_flush, 936 RUN_ON_CPU_HOST_INT(tb_flush_count)); 937 } 938 } 939 940 /* 941 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, 942 * so in order to prevent bit rot we compile them unconditionally in user-mode, 943 * and let the optimizer get rid of them by wrapping their user-only callers 944 * with if (DEBUG_TB_CHECK_GATE). 945 */ 946 #ifdef CONFIG_USER_ONLY 947 948 static void 949 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp) 950 { 951 TranslationBlock *tb = p; 952 target_ulong addr = *(target_ulong *)userp; 953 954 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { 955 printf("ERROR invalidate: address=" TARGET_FMT_lx 956 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); 957 } 958 } 959 960 /* verify that all the pages have correct rights for code 961 * 962 * Called with tb_lock held. 963 */ 964 static void tb_invalidate_check(target_ulong address) 965 { 966 address &= TARGET_PAGE_MASK; 967 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); 968 } 969 970 static void 971 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp) 972 { 973 TranslationBlock *tb = p; 974 int flags1, flags2; 975 976 flags1 = page_get_flags(tb->pc); 977 flags2 = page_get_flags(tb->pc + tb->size - 1); 978 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 979 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 980 (long)tb->pc, tb->size, flags1, flags2); 981 } 982 } 983 984 /* verify that all the pages have correct rights for code */ 985 static void tb_page_check(void) 986 { 987 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); 988 } 989 990 #endif /* CONFIG_USER_ONLY */ 991 992 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) 993 { 994 TranslationBlock *tb1; 995 unsigned int n1; 996 997 for (;;) { 998 tb1 = *ptb; 999 n1 = (uintptr_t)tb1 & 3; 1000 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); 1001 if (tb1 == tb) { 1002 *ptb = tb1->page_next[n1]; 1003 break; 1004 } 1005 ptb = &tb1->page_next[n1]; 1006 } 1007 } 1008 1009 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */ 1010 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n) 1011 { 1012 TranslationBlock *tb1; 1013 uintptr_t *ptb, ntb; 1014 unsigned int n1; 1015 1016 ptb = &tb->jmp_list_next[n]; 1017 if (*ptb) { 1018 /* find tb(n) in circular list */ 1019 for (;;) { 1020 ntb = *ptb; 1021 n1 = ntb & 3; 1022 tb1 = (TranslationBlock *)(ntb & ~3); 1023 if (n1 == n && tb1 == tb) { 1024 break; 1025 } 1026 if (n1 == 2) { 1027 ptb = &tb1->jmp_list_first; 1028 } else { 1029 ptb = &tb1->jmp_list_next[n1]; 1030 } 1031 } 1032 /* now we can suppress tb(n) from the list */ 1033 *ptb = tb->jmp_list_next[n]; 1034 1035 tb->jmp_list_next[n] = (uintptr_t)NULL; 1036 } 1037 } 1038 1039 /* reset the jump entry 'n' of a TB so that it is not chained to 1040 another TB */ 1041 static inline void tb_reset_jump(TranslationBlock *tb, int n) 1042 { 1043 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); 1044 tb_set_jmp_target(tb, n, addr); 1045 } 1046 1047 /* remove any jumps to the TB */ 1048 static inline void tb_jmp_unlink(TranslationBlock *tb) 1049 { 1050 TranslationBlock *tb1; 1051 uintptr_t *ptb, ntb; 1052 unsigned int n1; 1053 1054 ptb = &tb->jmp_list_first; 1055 for (;;) { 1056 ntb = *ptb; 1057 n1 = ntb & 3; 1058 tb1 = (TranslationBlock *)(ntb & ~3); 1059 if (n1 == 2) { 1060 break; 1061 } 1062 tb_reset_jump(tb1, n1); 1063 *ptb = tb1->jmp_list_next[n1]; 1064 tb1->jmp_list_next[n1] = (uintptr_t)NULL; 1065 } 1066 } 1067 1068 /* invalidate one TB 1069 * 1070 * Called with tb_lock held. 1071 */ 1072 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 1073 { 1074 CPUState *cpu; 1075 PageDesc *p; 1076 uint32_t h; 1077 tb_page_addr_t phys_pc; 1078 1079 assert_tb_locked(); 1080 1081 atomic_set(&tb->cflags, tb->cflags | CF_INVALID); 1082 1083 /* remove the TB from the hash list */ 1084 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1085 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, 1086 tb->trace_vcpu_dstate); 1087 if (!qht_remove(&tb_ctx.htable, tb, h)) { 1088 return; 1089 } 1090 1091 /* remove the TB from the page list */ 1092 if (tb->page_addr[0] != page_addr) { 1093 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 1094 tb_page_remove(&p->first_tb, tb); 1095 invalidate_page_bitmap(p); 1096 } 1097 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { 1098 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 1099 tb_page_remove(&p->first_tb, tb); 1100 invalidate_page_bitmap(p); 1101 } 1102 1103 /* remove the TB from the hash list */ 1104 h = tb_jmp_cache_hash_func(tb->pc); 1105 CPU_FOREACH(cpu) { 1106 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { 1107 atomic_set(&cpu->tb_jmp_cache[h], NULL); 1108 } 1109 } 1110 1111 /* suppress this TB from the two jump lists */ 1112 tb_remove_from_jmp_list(tb, 0); 1113 tb_remove_from_jmp_list(tb, 1); 1114 1115 /* suppress any remaining jumps to this TB */ 1116 tb_jmp_unlink(tb); 1117 1118 tb_ctx.tb_phys_invalidate_count++; 1119 } 1120 1121 #ifdef CONFIG_SOFTMMU 1122 static void build_page_bitmap(PageDesc *p) 1123 { 1124 int n, tb_start, tb_end; 1125 TranslationBlock *tb; 1126 1127 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); 1128 1129 tb = p->first_tb; 1130 while (tb != NULL) { 1131 n = (uintptr_t)tb & 3; 1132 tb = (TranslationBlock *)((uintptr_t)tb & ~3); 1133 /* NOTE: this is subtle as a TB may span two physical pages */ 1134 if (n == 0) { 1135 /* NOTE: tb_end may be after the end of the page, but 1136 it is not a problem */ 1137 tb_start = tb->pc & ~TARGET_PAGE_MASK; 1138 tb_end = tb_start + tb->size; 1139 if (tb_end > TARGET_PAGE_SIZE) { 1140 tb_end = TARGET_PAGE_SIZE; 1141 } 1142 } else { 1143 tb_start = 0; 1144 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1145 } 1146 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); 1147 tb = tb->page_next[n]; 1148 } 1149 } 1150 #endif 1151 1152 /* add the tb in the target page and protect it if necessary 1153 * 1154 * Called with mmap_lock held for user-mode emulation. 1155 */ 1156 static inline void tb_alloc_page(TranslationBlock *tb, 1157 unsigned int n, tb_page_addr_t page_addr) 1158 { 1159 PageDesc *p; 1160 #ifndef CONFIG_USER_ONLY 1161 bool page_already_protected; 1162 #endif 1163 1164 assert_memory_lock(); 1165 1166 tb->page_addr[n] = page_addr; 1167 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); 1168 tb->page_next[n] = p->first_tb; 1169 #ifndef CONFIG_USER_ONLY 1170 page_already_protected = p->first_tb != NULL; 1171 #endif 1172 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); 1173 invalidate_page_bitmap(p); 1174 1175 #if defined(CONFIG_USER_ONLY) 1176 if (p->flags & PAGE_WRITE) { 1177 target_ulong addr; 1178 PageDesc *p2; 1179 int prot; 1180 1181 /* force the host page as non writable (writes will have a 1182 page fault + mprotect overhead) */ 1183 page_addr &= qemu_host_page_mask; 1184 prot = 0; 1185 for (addr = page_addr; addr < page_addr + qemu_host_page_size; 1186 addr += TARGET_PAGE_SIZE) { 1187 1188 p2 = page_find(addr >> TARGET_PAGE_BITS); 1189 if (!p2) { 1190 continue; 1191 } 1192 prot |= p2->flags; 1193 p2->flags &= ~PAGE_WRITE; 1194 } 1195 mprotect(g2h(page_addr), qemu_host_page_size, 1196 (prot & PAGE_BITS) & ~PAGE_WRITE); 1197 if (DEBUG_TB_INVALIDATE_GATE) { 1198 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); 1199 } 1200 } 1201 #else 1202 /* if some code is already present, then the pages are already 1203 protected. So we handle the case where only the first TB is 1204 allocated in a physical page */ 1205 if (!page_already_protected) { 1206 tlb_protect_code(page_addr); 1207 } 1208 #endif 1209 } 1210 1211 /* add a new TB and link it to the physical page tables. phys_page2 is 1212 * (-1) to indicate that only one page contains the TB. 1213 * 1214 * Called with mmap_lock held for user-mode emulation. 1215 */ 1216 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, 1217 tb_page_addr_t phys_page2) 1218 { 1219 uint32_t h; 1220 1221 assert_memory_lock(); 1222 1223 /* add in the page list */ 1224 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); 1225 if (phys_page2 != -1) { 1226 tb_alloc_page(tb, 1, phys_page2); 1227 } else { 1228 tb->page_addr[1] = -1; 1229 } 1230 1231 /* add in the hash table */ 1232 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, 1233 tb->trace_vcpu_dstate); 1234 qht_insert(&tb_ctx.htable, tb, h); 1235 1236 #ifdef CONFIG_USER_ONLY 1237 if (DEBUG_TB_CHECK_GATE) { 1238 tb_page_check(); 1239 } 1240 #endif 1241 } 1242 1243 /* Called with mmap_lock held for user mode emulation. */ 1244 TranslationBlock *tb_gen_code(CPUState *cpu, 1245 target_ulong pc, target_ulong cs_base, 1246 uint32_t flags, int cflags) 1247 { 1248 CPUArchState *env = cpu->env_ptr; 1249 TranslationBlock *tb; 1250 tb_page_addr_t phys_pc, phys_page2; 1251 target_ulong virt_page2; 1252 tcg_insn_unit *gen_code_buf; 1253 int gen_code_size, search_size; 1254 #ifdef CONFIG_PROFILER 1255 TCGProfile *prof = &tcg_ctx->prof; 1256 int64_t ti; 1257 #endif 1258 assert_memory_lock(); 1259 1260 phys_pc = get_page_addr_code(env, pc); 1261 1262 buffer_overflow: 1263 tb = tb_alloc(pc); 1264 if (unlikely(!tb)) { 1265 /* flush must be done */ 1266 tb_flush(cpu); 1267 mmap_unlock(); 1268 /* Make the execution loop process the flush as soon as possible. */ 1269 cpu->exception_index = EXCP_INTERRUPT; 1270 cpu_loop_exit(cpu); 1271 } 1272 1273 gen_code_buf = tcg_ctx->code_gen_ptr; 1274 tb->tc.ptr = gen_code_buf; 1275 tb->pc = pc; 1276 tb->cs_base = cs_base; 1277 tb->flags = flags; 1278 tb->cflags = cflags; 1279 tb->trace_vcpu_dstate = *cpu->trace_dstate; 1280 tcg_ctx->tb_cflags = cflags; 1281 1282 #ifdef CONFIG_PROFILER 1283 /* includes aborted translations because of exceptions */ 1284 atomic_set(&prof->tb_count1, prof->tb_count1 + 1); 1285 ti = profile_getclock(); 1286 #endif 1287 1288 tcg_func_start(tcg_ctx); 1289 1290 tcg_ctx->cpu = ENV_GET_CPU(env); 1291 gen_intermediate_code(cpu, tb); 1292 tcg_ctx->cpu = NULL; 1293 1294 trace_translate_block(tb, tb->pc, tb->tc.ptr); 1295 1296 /* generate machine code */ 1297 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; 1298 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; 1299 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; 1300 if (TCG_TARGET_HAS_direct_jump) { 1301 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; 1302 tcg_ctx->tb_jmp_target_addr = NULL; 1303 } else { 1304 tcg_ctx->tb_jmp_insn_offset = NULL; 1305 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; 1306 } 1307 1308 #ifdef CONFIG_PROFILER 1309 atomic_set(&prof->tb_count, prof->tb_count + 1); 1310 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti); 1311 ti = profile_getclock(); 1312 #endif 1313 1314 /* ??? Overflow could be handled better here. In particular, we 1315 don't need to re-do gen_intermediate_code, nor should we re-do 1316 the tcg optimization currently hidden inside tcg_gen_code. All 1317 that should be required is to flush the TBs, allocate a new TB, 1318 re-initialize it per above, and re-do the actual code generation. */ 1319 gen_code_size = tcg_gen_code(tcg_ctx, tb); 1320 if (unlikely(gen_code_size < 0)) { 1321 goto buffer_overflow; 1322 } 1323 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); 1324 if (unlikely(search_size < 0)) { 1325 goto buffer_overflow; 1326 } 1327 tb->tc.size = gen_code_size; 1328 1329 #ifdef CONFIG_PROFILER 1330 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); 1331 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size); 1332 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); 1333 atomic_set(&prof->search_out_len, prof->search_out_len + search_size); 1334 #endif 1335 1336 #ifdef DEBUG_DISAS 1337 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && 1338 qemu_log_in_addr_range(tb->pc)) { 1339 qemu_log_lock(); 1340 qemu_log("OUT: [size=%d]\n", gen_code_size); 1341 if (tcg_ctx->data_gen_ptr) { 1342 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr; 1343 size_t data_size = gen_code_size - code_size; 1344 size_t i; 1345 1346 log_disas(tb->tc.ptr, code_size); 1347 1348 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { 1349 if (sizeof(tcg_target_ulong) == 8) { 1350 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", 1351 (uintptr_t)tcg_ctx->data_gen_ptr + i, 1352 *(uint64_t *)(tcg_ctx->data_gen_ptr + i)); 1353 } else { 1354 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n", 1355 (uintptr_t)tcg_ctx->data_gen_ptr + i, 1356 *(uint32_t *)(tcg_ctx->data_gen_ptr + i)); 1357 } 1358 } 1359 } else { 1360 log_disas(tb->tc.ptr, gen_code_size); 1361 } 1362 qemu_log("\n"); 1363 qemu_log_flush(); 1364 qemu_log_unlock(); 1365 } 1366 #endif 1367 1368 atomic_set(&tcg_ctx->code_gen_ptr, (void *) 1369 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, 1370 CODE_GEN_ALIGN)); 1371 1372 /* init jump list */ 1373 assert(((uintptr_t)tb & 3) == 0); 1374 tb->jmp_list_first = (uintptr_t)tb | 2; 1375 tb->jmp_list_next[0] = (uintptr_t)NULL; 1376 tb->jmp_list_next[1] = (uintptr_t)NULL; 1377 1378 /* init original jump addresses wich has been set during tcg_gen_code() */ 1379 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 1380 tb_reset_jump(tb, 0); 1381 } 1382 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 1383 tb_reset_jump(tb, 1); 1384 } 1385 1386 /* check next page if needed */ 1387 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; 1388 phys_page2 = -1; 1389 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 1390 phys_page2 = get_page_addr_code(env, virt_page2); 1391 } 1392 /* As long as consistency of the TB stuff is provided by tb_lock in user 1393 * mode and is implicit in single-threaded softmmu emulation, no explicit 1394 * memory barrier is required before tb_link_page() makes the TB visible 1395 * through the physical hash table and physical page list. 1396 */ 1397 tb_link_page(tb, phys_pc, phys_page2); 1398 g_tree_insert(tb_ctx.tb_tree, &tb->tc, tb); 1399 return tb; 1400 } 1401 1402 /* 1403 * Invalidate all TBs which intersect with the target physical address range 1404 * [start;end[. NOTE: start and end may refer to *different* physical pages. 1405 * 'is_cpu_write_access' should be true if called from a real cpu write 1406 * access: the virtual CPU will exit the current TB if code is modified inside 1407 * this TB. 1408 * 1409 * Called with mmap_lock held for user-mode emulation, grabs tb_lock 1410 * Called with tb_lock held for system-mode emulation 1411 */ 1412 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end) 1413 { 1414 while (start < end) { 1415 tb_invalidate_phys_page_range(start, end, 0); 1416 start &= TARGET_PAGE_MASK; 1417 start += TARGET_PAGE_SIZE; 1418 } 1419 } 1420 1421 #ifdef CONFIG_SOFTMMU 1422 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) 1423 { 1424 assert_tb_locked(); 1425 tb_invalidate_phys_range_1(start, end); 1426 } 1427 #else 1428 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) 1429 { 1430 assert_memory_lock(); 1431 tb_lock(); 1432 tb_invalidate_phys_range_1(start, end); 1433 tb_unlock(); 1434 } 1435 #endif 1436 /* 1437 * Invalidate all TBs which intersect with the target physical address range 1438 * [start;end[. NOTE: start and end must refer to the *same* physical page. 1439 * 'is_cpu_write_access' should be true if called from a real cpu write 1440 * access: the virtual CPU will exit the current TB if code is modified inside 1441 * this TB. 1442 * 1443 * Called with tb_lock/mmap_lock held for user-mode emulation 1444 * Called with tb_lock held for system-mode emulation 1445 */ 1446 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 1447 int is_cpu_write_access) 1448 { 1449 TranslationBlock *tb, *tb_next; 1450 tb_page_addr_t tb_start, tb_end; 1451 PageDesc *p; 1452 int n; 1453 #ifdef TARGET_HAS_PRECISE_SMC 1454 CPUState *cpu = current_cpu; 1455 CPUArchState *env = NULL; 1456 int current_tb_not_found = is_cpu_write_access; 1457 TranslationBlock *current_tb = NULL; 1458 int current_tb_modified = 0; 1459 target_ulong current_pc = 0; 1460 target_ulong current_cs_base = 0; 1461 uint32_t current_flags = 0; 1462 #endif /* TARGET_HAS_PRECISE_SMC */ 1463 1464 assert_memory_lock(); 1465 assert_tb_locked(); 1466 1467 p = page_find(start >> TARGET_PAGE_BITS); 1468 if (!p) { 1469 return; 1470 } 1471 #if defined(TARGET_HAS_PRECISE_SMC) 1472 if (cpu != NULL) { 1473 env = cpu->env_ptr; 1474 } 1475 #endif 1476 1477 /* we remove all the TBs in the range [start, end[ */ 1478 /* XXX: see if in some cases it could be faster to invalidate all 1479 the code */ 1480 tb = p->first_tb; 1481 while (tb != NULL) { 1482 n = (uintptr_t)tb & 3; 1483 tb = (TranslationBlock *)((uintptr_t)tb & ~3); 1484 tb_next = tb->page_next[n]; 1485 /* NOTE: this is subtle as a TB may span two physical pages */ 1486 if (n == 0) { 1487 /* NOTE: tb_end may be after the end of the page, but 1488 it is not a problem */ 1489 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1490 tb_end = tb_start + tb->size; 1491 } else { 1492 tb_start = tb->page_addr[1]; 1493 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1494 } 1495 if (!(tb_end <= start || tb_start >= end)) { 1496 #ifdef TARGET_HAS_PRECISE_SMC 1497 if (current_tb_not_found) { 1498 current_tb_not_found = 0; 1499 current_tb = NULL; 1500 if (cpu->mem_io_pc) { 1501 /* now we have a real cpu fault */ 1502 current_tb = tb_find_pc(cpu->mem_io_pc); 1503 } 1504 } 1505 if (current_tb == tb && 1506 (current_tb->cflags & CF_COUNT_MASK) != 1) { 1507 /* If we are modifying the current TB, we must stop 1508 its execution. We could be more precise by checking 1509 that the modification is after the current PC, but it 1510 would require a specialized function to partially 1511 restore the CPU state */ 1512 1513 current_tb_modified = 1; 1514 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); 1515 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1516 ¤t_flags); 1517 } 1518 #endif /* TARGET_HAS_PRECISE_SMC */ 1519 tb_phys_invalidate(tb, -1); 1520 } 1521 tb = tb_next; 1522 } 1523 #if !defined(CONFIG_USER_ONLY) 1524 /* if no code remaining, no need to continue to use slow writes */ 1525 if (!p->first_tb) { 1526 invalidate_page_bitmap(p); 1527 tlb_unprotect_code(start); 1528 } 1529 #endif 1530 #ifdef TARGET_HAS_PRECISE_SMC 1531 if (current_tb_modified) { 1532 /* Force execution of one insn next time. */ 1533 cpu->cflags_next_tb = 1 | curr_cflags(); 1534 cpu_loop_exit_noexc(cpu); 1535 } 1536 #endif 1537 } 1538 1539 #ifdef CONFIG_SOFTMMU 1540 /* len must be <= 8 and start must be a multiple of len. 1541 * Called via softmmu_template.h when code areas are written to with 1542 * iothread mutex not held. 1543 */ 1544 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) 1545 { 1546 PageDesc *p; 1547 1548 #if 0 1549 if (1) { 1550 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 1551 cpu_single_env->mem_io_vaddr, len, 1552 cpu_single_env->eip, 1553 cpu_single_env->eip + 1554 (intptr_t)cpu_single_env->segs[R_CS].base); 1555 } 1556 #endif 1557 assert_memory_lock(); 1558 1559 p = page_find(start >> TARGET_PAGE_BITS); 1560 if (!p) { 1561 return; 1562 } 1563 if (!p->code_bitmap && 1564 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { 1565 /* build code bitmap. FIXME: writes should be protected by 1566 * tb_lock, reads by tb_lock or RCU. 1567 */ 1568 build_page_bitmap(p); 1569 } 1570 if (p->code_bitmap) { 1571 unsigned int nr; 1572 unsigned long b; 1573 1574 nr = start & ~TARGET_PAGE_MASK; 1575 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); 1576 if (b & ((1 << len) - 1)) { 1577 goto do_invalidate; 1578 } 1579 } else { 1580 do_invalidate: 1581 tb_invalidate_phys_page_range(start, start + len, 1); 1582 } 1583 } 1584 #else 1585 /* Called with mmap_lock held. If pc is not 0 then it indicates the 1586 * host PC of the faulting store instruction that caused this invalidate. 1587 * Returns true if the caller needs to abort execution of the current 1588 * TB (because it was modified by this store and the guest CPU has 1589 * precise-SMC semantics). 1590 */ 1591 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) 1592 { 1593 TranslationBlock *tb; 1594 PageDesc *p; 1595 int n; 1596 #ifdef TARGET_HAS_PRECISE_SMC 1597 TranslationBlock *current_tb = NULL; 1598 CPUState *cpu = current_cpu; 1599 CPUArchState *env = NULL; 1600 int current_tb_modified = 0; 1601 target_ulong current_pc = 0; 1602 target_ulong current_cs_base = 0; 1603 uint32_t current_flags = 0; 1604 #endif 1605 1606 assert_memory_lock(); 1607 1608 addr &= TARGET_PAGE_MASK; 1609 p = page_find(addr >> TARGET_PAGE_BITS); 1610 if (!p) { 1611 return false; 1612 } 1613 1614 tb_lock(); 1615 tb = p->first_tb; 1616 #ifdef TARGET_HAS_PRECISE_SMC 1617 if (tb && pc != 0) { 1618 current_tb = tb_find_pc(pc); 1619 } 1620 if (cpu != NULL) { 1621 env = cpu->env_ptr; 1622 } 1623 #endif 1624 while (tb != NULL) { 1625 n = (uintptr_t)tb & 3; 1626 tb = (TranslationBlock *)((uintptr_t)tb & ~3); 1627 #ifdef TARGET_HAS_PRECISE_SMC 1628 if (current_tb == tb && 1629 (current_tb->cflags & CF_COUNT_MASK) != 1) { 1630 /* If we are modifying the current TB, we must stop 1631 its execution. We could be more precise by checking 1632 that the modification is after the current PC, but it 1633 would require a specialized function to partially 1634 restore the CPU state */ 1635 1636 current_tb_modified = 1; 1637 cpu_restore_state_from_tb(cpu, current_tb, pc); 1638 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 1639 ¤t_flags); 1640 } 1641 #endif /* TARGET_HAS_PRECISE_SMC */ 1642 tb_phys_invalidate(tb, addr); 1643 tb = tb->page_next[n]; 1644 } 1645 p->first_tb = NULL; 1646 #ifdef TARGET_HAS_PRECISE_SMC 1647 if (current_tb_modified) { 1648 /* Force execution of one insn next time. */ 1649 cpu->cflags_next_tb = 1 | curr_cflags(); 1650 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps 1651 * back into the cpu_exec loop. */ 1652 return true; 1653 } 1654 #endif 1655 tb_unlock(); 1656 1657 return false; 1658 } 1659 #endif 1660 1661 /* 1662 * Find the TB 'tb' such that 1663 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size 1664 * Return NULL if not found. 1665 */ 1666 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr) 1667 { 1668 struct tb_tc s = { .ptr = (void *)tc_ptr }; 1669 1670 return g_tree_lookup(tb_ctx.tb_tree, &s); 1671 } 1672 1673 #if !defined(CONFIG_USER_ONLY) 1674 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) 1675 { 1676 ram_addr_t ram_addr; 1677 MemoryRegion *mr; 1678 hwaddr l = 1; 1679 1680 rcu_read_lock(); 1681 mr = address_space_translate(as, addr, &addr, &l, false); 1682 if (!(memory_region_is_ram(mr) 1683 || memory_region_is_romd(mr))) { 1684 rcu_read_unlock(); 1685 return; 1686 } 1687 ram_addr = memory_region_get_ram_addr(mr) + addr; 1688 tb_lock(); 1689 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); 1690 tb_unlock(); 1691 rcu_read_unlock(); 1692 } 1693 #endif /* !defined(CONFIG_USER_ONLY) */ 1694 1695 /* Called with tb_lock held. */ 1696 void tb_check_watchpoint(CPUState *cpu) 1697 { 1698 TranslationBlock *tb; 1699 1700 tb = tb_find_pc(cpu->mem_io_pc); 1701 if (tb) { 1702 /* We can use retranslation to find the PC. */ 1703 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); 1704 tb_phys_invalidate(tb, -1); 1705 } else { 1706 /* The exception probably happened in a helper. The CPU state should 1707 have been saved before calling it. Fetch the PC from there. */ 1708 CPUArchState *env = cpu->env_ptr; 1709 target_ulong pc, cs_base; 1710 tb_page_addr_t addr; 1711 uint32_t flags; 1712 1713 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 1714 addr = get_page_addr_code(env, pc); 1715 tb_invalidate_phys_range(addr, addr + 1); 1716 } 1717 } 1718 1719 #ifndef CONFIG_USER_ONLY 1720 /* in deterministic execution mode, instructions doing device I/Os 1721 * must be at the end of the TB. 1722 * 1723 * Called by softmmu_template.h, with iothread mutex not held. 1724 */ 1725 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) 1726 { 1727 #if defined(TARGET_MIPS) || defined(TARGET_SH4) 1728 CPUArchState *env = cpu->env_ptr; 1729 #endif 1730 TranslationBlock *tb; 1731 uint32_t n; 1732 1733 tb_lock(); 1734 tb = tb_find_pc(retaddr); 1735 if (!tb) { 1736 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", 1737 (void *)retaddr); 1738 } 1739 n = cpu->icount_decr.u16.low + tb->icount; 1740 cpu_restore_state_from_tb(cpu, tb, retaddr); 1741 /* Calculate how many instructions had been executed before the fault 1742 occurred. */ 1743 n = n - cpu->icount_decr.u16.low; 1744 /* Generate a new TB ending on the I/O insn. */ 1745 n++; 1746 /* On MIPS and SH, delay slot instructions can only be restarted if 1747 they were already the first instruction in the TB. If this is not 1748 the first instruction in a TB then re-execute the preceding 1749 branch. */ 1750 #if defined(TARGET_MIPS) 1751 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { 1752 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); 1753 cpu->icount_decr.u16.low++; 1754 env->hflags &= ~MIPS_HFLAG_BMASK; 1755 } 1756 #elif defined(TARGET_SH4) 1757 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 1758 && n > 1) { 1759 env->pc -= 2; 1760 cpu->icount_decr.u16.low++; 1761 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 1762 } 1763 #endif 1764 /* This should never happen. */ 1765 if (n > CF_COUNT_MASK) { 1766 cpu_abort(cpu, "TB too big during recompile"); 1767 } 1768 1769 /* Adjust the execution state of the next TB. */ 1770 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n; 1771 1772 if (tb->cflags & CF_NOCACHE) { 1773 if (tb->orig_tb) { 1774 /* Invalidate original TB if this TB was generated in 1775 * cpu_exec_nocache() */ 1776 tb_phys_invalidate(tb->orig_tb, -1); 1777 } 1778 tb_remove(tb); 1779 } 1780 1781 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not 1782 * the first in the TB) then we end up generating a whole new TB and 1783 * repeating the fault, which is horribly inefficient. 1784 * Better would be to execute just this insn uncached, or generate a 1785 * second new TB. 1786 * 1787 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the 1788 * tb_lock gets reset. 1789 */ 1790 cpu_loop_exit_noexc(cpu); 1791 } 1792 1793 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1794 { 1795 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); 1796 1797 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 1798 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); 1799 } 1800 } 1801 1802 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) 1803 { 1804 /* Discard jump cache entries for any tb which might potentially 1805 overlap the flushed page. */ 1806 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 1807 tb_jmp_cache_clear_page(cpu, addr); 1808 } 1809 1810 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf, 1811 struct qht_stats hst) 1812 { 1813 uint32_t hgram_opts; 1814 size_t hgram_bins; 1815 char *hgram; 1816 1817 if (!hst.head_buckets) { 1818 return; 1819 } 1820 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", 1821 hst.used_head_buckets, hst.head_buckets, 1822 (double)hst.used_head_buckets / hst.head_buckets * 100); 1823 1824 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 1825 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; 1826 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { 1827 hgram_opts |= QDIST_PR_NODECIMAL; 1828 } 1829 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); 1830 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", 1831 qdist_avg(&hst.occupancy) * 100, hgram); 1832 g_free(hgram); 1833 1834 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 1835 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); 1836 if (hgram_bins > 10) { 1837 hgram_bins = 10; 1838 } else { 1839 hgram_bins = 0; 1840 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; 1841 } 1842 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); 1843 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n", 1844 qdist_avg(&hst.chain), hgram); 1845 g_free(hgram); 1846 } 1847 1848 struct tb_tree_stats { 1849 size_t host_size; 1850 size_t target_size; 1851 size_t max_target_size; 1852 size_t direct_jmp_count; 1853 size_t direct_jmp2_count; 1854 size_t cross_page; 1855 }; 1856 1857 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) 1858 { 1859 const TranslationBlock *tb = value; 1860 struct tb_tree_stats *tst = data; 1861 1862 tst->host_size += tb->tc.size; 1863 tst->target_size += tb->size; 1864 if (tb->size > tst->max_target_size) { 1865 tst->max_target_size = tb->size; 1866 } 1867 if (tb->page_addr[1] != -1) { 1868 tst->cross_page++; 1869 } 1870 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 1871 tst->direct_jmp_count++; 1872 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 1873 tst->direct_jmp2_count++; 1874 } 1875 } 1876 return false; 1877 } 1878 1879 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) 1880 { 1881 struct tb_tree_stats tst = {}; 1882 struct qht_stats hst; 1883 size_t nb_tbs; 1884 1885 tb_lock(); 1886 1887 nb_tbs = g_tree_nnodes(tb_ctx.tb_tree); 1888 g_tree_foreach(tb_ctx.tb_tree, tb_tree_stats_iter, &tst); 1889 /* XXX: avoid using doubles ? */ 1890 cpu_fprintf(f, "Translation buffer state:\n"); 1891 /* 1892 * Report total code size including the padding and TB structs; 1893 * otherwise users might think "-tb-size" is not honoured. 1894 * For avg host size we use the precise numbers from tb_tree_stats though. 1895 */ 1896 cpu_fprintf(f, "gen code size %zu/%zu\n", 1897 tcg_code_size(), tcg_code_capacity()); 1898 cpu_fprintf(f, "TB count %zu\n", nb_tbs); 1899 cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n", 1900 nb_tbs ? tst.target_size / nb_tbs : 0, 1901 tst.max_target_size); 1902 cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n", 1903 nb_tbs ? tst.host_size / nb_tbs : 0, 1904 tst.target_size ? (double)tst.host_size / tst.target_size : 0); 1905 cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page, 1906 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); 1907 cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n", 1908 tst.direct_jmp_count, 1909 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, 1910 tst.direct_jmp2_count, 1911 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); 1912 1913 qht_statistics_init(&tb_ctx.htable, &hst); 1914 print_qht_statistics(f, cpu_fprintf, hst); 1915 qht_statistics_destroy(&hst); 1916 1917 cpu_fprintf(f, "\nStatistics:\n"); 1918 cpu_fprintf(f, "TB flush count %u\n", 1919 atomic_read(&tb_ctx.tb_flush_count)); 1920 cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count); 1921 cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count()); 1922 tcg_dump_info(f, cpu_fprintf); 1923 1924 tb_unlock(); 1925 } 1926 1927 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) 1928 { 1929 tcg_dump_op_count(f, cpu_fprintf); 1930 } 1931 1932 #else /* CONFIG_USER_ONLY */ 1933 1934 void cpu_interrupt(CPUState *cpu, int mask) 1935 { 1936 g_assert(qemu_mutex_iothread_locked()); 1937 cpu->interrupt_request |= mask; 1938 cpu->icount_decr.u16.high = -1; 1939 } 1940 1941 /* 1942 * Walks guest process memory "regions" one by one 1943 * and calls callback function 'fn' for each region. 1944 */ 1945 struct walk_memory_regions_data { 1946 walk_memory_regions_fn fn; 1947 void *priv; 1948 target_ulong start; 1949 int prot; 1950 }; 1951 1952 static int walk_memory_regions_end(struct walk_memory_regions_data *data, 1953 target_ulong end, int new_prot) 1954 { 1955 if (data->start != -1u) { 1956 int rc = data->fn(data->priv, data->start, end, data->prot); 1957 if (rc != 0) { 1958 return rc; 1959 } 1960 } 1961 1962 data->start = (new_prot ? end : -1u); 1963 data->prot = new_prot; 1964 1965 return 0; 1966 } 1967 1968 static int walk_memory_regions_1(struct walk_memory_regions_data *data, 1969 target_ulong base, int level, void **lp) 1970 { 1971 target_ulong pa; 1972 int i, rc; 1973 1974 if (*lp == NULL) { 1975 return walk_memory_regions_end(data, base, 0); 1976 } 1977 1978 if (level == 0) { 1979 PageDesc *pd = *lp; 1980 1981 for (i = 0; i < V_L2_SIZE; ++i) { 1982 int prot = pd[i].flags; 1983 1984 pa = base | (i << TARGET_PAGE_BITS); 1985 if (prot != data->prot) { 1986 rc = walk_memory_regions_end(data, pa, prot); 1987 if (rc != 0) { 1988 return rc; 1989 } 1990 } 1991 } 1992 } else { 1993 void **pp = *lp; 1994 1995 for (i = 0; i < V_L2_SIZE; ++i) { 1996 pa = base | ((target_ulong)i << 1997 (TARGET_PAGE_BITS + V_L2_BITS * level)); 1998 rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 1999 if (rc != 0) { 2000 return rc; 2001 } 2002 } 2003 } 2004 2005 return 0; 2006 } 2007 2008 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 2009 { 2010 struct walk_memory_regions_data data; 2011 uintptr_t i, l1_sz = v_l1_size; 2012 2013 data.fn = fn; 2014 data.priv = priv; 2015 data.start = -1u; 2016 data.prot = 0; 2017 2018 for (i = 0; i < l1_sz; i++) { 2019 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); 2020 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); 2021 if (rc != 0) { 2022 return rc; 2023 } 2024 } 2025 2026 return walk_memory_regions_end(&data, 0, 0); 2027 } 2028 2029 static int dump_region(void *priv, target_ulong start, 2030 target_ulong end, unsigned long prot) 2031 { 2032 FILE *f = (FILE *)priv; 2033 2034 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx 2035 " "TARGET_FMT_lx" %c%c%c\n", 2036 start, end, end - start, 2037 ((prot & PAGE_READ) ? 'r' : '-'), 2038 ((prot & PAGE_WRITE) ? 'w' : '-'), 2039 ((prot & PAGE_EXEC) ? 'x' : '-')); 2040 2041 return 0; 2042 } 2043 2044 /* dump memory mappings */ 2045 void page_dump(FILE *f) 2046 { 2047 const int length = sizeof(target_ulong) * 2; 2048 (void) fprintf(f, "%-*s %-*s %-*s %s\n", 2049 length, "start", length, "end", length, "size", "prot"); 2050 walk_memory_regions(f, dump_region); 2051 } 2052 2053 int page_get_flags(target_ulong address) 2054 { 2055 PageDesc *p; 2056 2057 p = page_find(address >> TARGET_PAGE_BITS); 2058 if (!p) { 2059 return 0; 2060 } 2061 return p->flags; 2062 } 2063 2064 /* Modify the flags of a page and invalidate the code if necessary. 2065 The flag PAGE_WRITE_ORG is positioned automatically depending 2066 on PAGE_WRITE. The mmap_lock should already be held. */ 2067 void page_set_flags(target_ulong start, target_ulong end, int flags) 2068 { 2069 target_ulong addr, len; 2070 2071 /* This function should never be called with addresses outside the 2072 guest address space. If this assert fires, it probably indicates 2073 a missing call to h2g_valid. */ 2074 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2075 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2076 #endif 2077 assert(start < end); 2078 assert_memory_lock(); 2079 2080 start = start & TARGET_PAGE_MASK; 2081 end = TARGET_PAGE_ALIGN(end); 2082 2083 if (flags & PAGE_WRITE) { 2084 flags |= PAGE_WRITE_ORG; 2085 } 2086 2087 for (addr = start, len = end - start; 2088 len != 0; 2089 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2090 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2091 2092 /* If the write protection bit is set, then we invalidate 2093 the code inside. */ 2094 if (!(p->flags & PAGE_WRITE) && 2095 (flags & PAGE_WRITE) && 2096 p->first_tb) { 2097 tb_invalidate_phys_page(addr, 0); 2098 } 2099 p->flags = flags; 2100 } 2101 } 2102 2103 int page_check_range(target_ulong start, target_ulong len, int flags) 2104 { 2105 PageDesc *p; 2106 target_ulong end; 2107 target_ulong addr; 2108 2109 /* This function should never be called with addresses outside the 2110 guest address space. If this assert fires, it probably indicates 2111 a missing call to h2g_valid. */ 2112 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2113 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2114 #endif 2115 2116 if (len == 0) { 2117 return 0; 2118 } 2119 if (start + len - 1 < start) { 2120 /* We've wrapped around. */ 2121 return -1; 2122 } 2123 2124 /* must do before we loose bits in the next step */ 2125 end = TARGET_PAGE_ALIGN(start + len); 2126 start = start & TARGET_PAGE_MASK; 2127 2128 for (addr = start, len = end - start; 2129 len != 0; 2130 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2131 p = page_find(addr >> TARGET_PAGE_BITS); 2132 if (!p) { 2133 return -1; 2134 } 2135 if (!(p->flags & PAGE_VALID)) { 2136 return -1; 2137 } 2138 2139 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { 2140 return -1; 2141 } 2142 if (flags & PAGE_WRITE) { 2143 if (!(p->flags & PAGE_WRITE_ORG)) { 2144 return -1; 2145 } 2146 /* unprotect the page if it was put read-only because it 2147 contains translated code */ 2148 if (!(p->flags & PAGE_WRITE)) { 2149 if (!page_unprotect(addr, 0)) { 2150 return -1; 2151 } 2152 } 2153 } 2154 } 2155 return 0; 2156 } 2157 2158 /* called from signal handler: invalidate the code and unprotect the 2159 * page. Return 0 if the fault was not handled, 1 if it was handled, 2160 * and 2 if it was handled but the caller must cause the TB to be 2161 * immediately exited. (We can only return 2 if the 'pc' argument is 2162 * non-zero.) 2163 */ 2164 int page_unprotect(target_ulong address, uintptr_t pc) 2165 { 2166 unsigned int prot; 2167 bool current_tb_invalidated; 2168 PageDesc *p; 2169 target_ulong host_start, host_end, addr; 2170 2171 /* Technically this isn't safe inside a signal handler. However we 2172 know this only ever happens in a synchronous SEGV handler, so in 2173 practice it seems to be ok. */ 2174 mmap_lock(); 2175 2176 p = page_find(address >> TARGET_PAGE_BITS); 2177 if (!p) { 2178 mmap_unlock(); 2179 return 0; 2180 } 2181 2182 /* if the page was really writable, then we change its 2183 protection back to writable */ 2184 if (p->flags & PAGE_WRITE_ORG) { 2185 current_tb_invalidated = false; 2186 if (p->flags & PAGE_WRITE) { 2187 /* If the page is actually marked WRITE then assume this is because 2188 * this thread raced with another one which got here first and 2189 * set the page to PAGE_WRITE and did the TB invalidate for us. 2190 */ 2191 #ifdef TARGET_HAS_PRECISE_SMC 2192 TranslationBlock *current_tb = tb_find_pc(pc); 2193 if (current_tb) { 2194 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; 2195 } 2196 #endif 2197 } else { 2198 host_start = address & qemu_host_page_mask; 2199 host_end = host_start + qemu_host_page_size; 2200 2201 prot = 0; 2202 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) { 2203 p = page_find(addr >> TARGET_PAGE_BITS); 2204 p->flags |= PAGE_WRITE; 2205 prot |= p->flags; 2206 2207 /* and since the content will be modified, we must invalidate 2208 the corresponding translated code. */ 2209 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); 2210 #ifdef CONFIG_USER_ONLY 2211 if (DEBUG_TB_CHECK_GATE) { 2212 tb_invalidate_check(addr); 2213 } 2214 #endif 2215 } 2216 mprotect((void *)g2h(host_start), qemu_host_page_size, 2217 prot & PAGE_BITS); 2218 } 2219 mmap_unlock(); 2220 /* If current TB was invalidated return to main loop */ 2221 return current_tb_invalidated ? 2 : 1; 2222 } 2223 mmap_unlock(); 2224 return 0; 2225 } 2226 #endif /* CONFIG_USER_ONLY */ 2227 2228 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ 2229 void tcg_flush_softmmu_tlb(CPUState *cs) 2230 { 2231 #ifdef CONFIG_SOFTMMU 2232 tlb_flush(cs); 2233 #endif 2234 } 2235