1 /* 2 * Memory region management for Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/units.h" 27 #include "qemu/madvise.h" 28 #include "qemu/mprotect.h" 29 #include "qemu/memalign.h" 30 #include "qemu/cacheinfo.h" 31 #include "qemu/qtree.h" 32 #include "qapi/error.h" 33 #include "tcg/tcg.h" 34 #include "exec/translation-block.h" 35 #include "tcg-internal.h" 36 #include "host/cpuinfo.h" 37 38 39 /* 40 * Local source-level compatibility with Unix. 41 * Used by tcg_region_init below. 42 */ 43 #if defined(_WIN32) 44 #define PROT_READ 1 45 #define PROT_WRITE 2 46 #define PROT_EXEC 4 47 #endif 48 49 struct tcg_region_tree { 50 QemuMutex lock; 51 QTree *tree; 52 /* padding to avoid false sharing is computed at run-time */ 53 }; 54 55 /* 56 * We divide code_gen_buffer into equally-sized "regions" that TCG threads 57 * dynamically allocate from as demand dictates. Given appropriate region 58 * sizing, this minimizes flushes even when some TCG threads generate a lot 59 * more code than others. 60 */ 61 struct tcg_region_state { 62 QemuMutex lock; 63 64 /* fields set at init time */ 65 void *start_aligned; 66 void *after_prologue; 67 size_t n; 68 size_t size; /* size of one region */ 69 size_t stride; /* .size + guard size */ 70 size_t total_size; /* size of entire buffer, >= n * stride */ 71 72 /* fields protected by the lock */ 73 size_t current; /* current region index */ 74 size_t agg_size_full; /* aggregate size of full regions */ 75 }; 76 77 static struct tcg_region_state region; 78 79 /* 80 * This is an array of struct tcg_region_tree's, with padding. 81 * We use void * to simplify the computation of region_trees[i]; each 82 * struct is found every tree_size bytes. 83 */ 84 static void *region_trees; 85 static size_t tree_size; 86 87 bool in_code_gen_buffer(const void *p) 88 { 89 /* 90 * Much like it is valid to have a pointer to the byte past the 91 * end of an array (so long as you don't dereference it), allow 92 * a pointer to the byte past the end of the code gen buffer. 93 */ 94 return (size_t)(p - region.start_aligned) <= region.total_size; 95 } 96 97 #ifndef CONFIG_TCG_INTERPRETER 98 static int host_prot_read_exec(void) 99 { 100 #if defined(CONFIG_LINUX) && defined(HOST_AARCH64) && defined(PROT_BTI) 101 if (cpuinfo & CPUINFO_BTI) { 102 return PROT_READ | PROT_EXEC | PROT_BTI; 103 } 104 #endif 105 return PROT_READ | PROT_EXEC; 106 } 107 #endif 108 109 #ifdef CONFIG_DEBUG_TCG 110 const void *tcg_splitwx_to_rx(void *rw) 111 { 112 /* Pass NULL pointers unchanged. */ 113 if (rw) { 114 g_assert(in_code_gen_buffer(rw)); 115 rw += tcg_splitwx_diff; 116 } 117 return rw; 118 } 119 120 void *tcg_splitwx_to_rw(const void *rx) 121 { 122 /* Pass NULL pointers unchanged. */ 123 if (rx) { 124 rx -= tcg_splitwx_diff; 125 /* Assert that we end with a pointer in the rw region. */ 126 g_assert(in_code_gen_buffer(rx)); 127 } 128 return (void *)rx; 129 } 130 #endif /* CONFIG_DEBUG_TCG */ 131 132 /* compare a pointer @ptr and a tb_tc @s */ 133 static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s) 134 { 135 if (ptr >= s->ptr + s->size) { 136 return 1; 137 } else if (ptr < s->ptr) { 138 return -1; 139 } 140 return 0; 141 } 142 143 static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp, gpointer userdata) 144 { 145 const struct tb_tc *a = ap; 146 const struct tb_tc *b = bp; 147 148 /* 149 * When both sizes are set, we know this isn't a lookup. 150 * This is the most likely case: every TB must be inserted; lookups 151 * are a lot less frequent. 152 */ 153 if (likely(a->size && b->size)) { 154 if (a->ptr > b->ptr) { 155 return 1; 156 } else if (a->ptr < b->ptr) { 157 return -1; 158 } 159 /* a->ptr == b->ptr should happen only on deletions */ 160 g_assert(a->size == b->size); 161 return 0; 162 } 163 /* 164 * All lookups have either .size field set to 0. 165 * From the glib sources we see that @ap is always the lookup key. However 166 * the docs provide no guarantee, so we just mark this case as likely. 167 */ 168 if (likely(a->size == 0)) { 169 return ptr_cmp_tb_tc(a->ptr, b); 170 } 171 return ptr_cmp_tb_tc(b->ptr, a); 172 } 173 174 static void tb_destroy(gpointer value) 175 { 176 TranslationBlock *tb = value; 177 qemu_spin_destroy(&tb->jmp_lock); 178 } 179 180 static void tcg_region_trees_init(void) 181 { 182 size_t i; 183 184 tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize); 185 region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size); 186 for (i = 0; i < region.n; i++) { 187 struct tcg_region_tree *rt = region_trees + i * tree_size; 188 189 qemu_mutex_init(&rt->lock); 190 rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy); 191 } 192 } 193 194 static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p) 195 { 196 size_t region_idx; 197 198 /* 199 * Like tcg_splitwx_to_rw, with no assert. The pc may come from 200 * a signal handler over which the caller has no control. 201 */ 202 if (!in_code_gen_buffer(p)) { 203 p -= tcg_splitwx_diff; 204 if (!in_code_gen_buffer(p)) { 205 return NULL; 206 } 207 } 208 209 if (p < region.start_aligned) { 210 region_idx = 0; 211 } else { 212 ptrdiff_t offset = p - region.start_aligned; 213 214 if (offset > region.stride * (region.n - 1)) { 215 region_idx = region.n - 1; 216 } else { 217 region_idx = offset / region.stride; 218 } 219 } 220 return region_trees + region_idx * tree_size; 221 } 222 223 void tcg_tb_insert(TranslationBlock *tb) 224 { 225 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); 226 227 g_assert(rt != NULL); 228 qemu_mutex_lock(&rt->lock); 229 q_tree_insert(rt->tree, &tb->tc, tb); 230 qemu_mutex_unlock(&rt->lock); 231 } 232 233 void tcg_tb_remove(TranslationBlock *tb) 234 { 235 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); 236 237 g_assert(rt != NULL); 238 qemu_mutex_lock(&rt->lock); 239 q_tree_remove(rt->tree, &tb->tc); 240 qemu_mutex_unlock(&rt->lock); 241 } 242 243 /* 244 * Find the TB 'tb' such that 245 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size 246 * Return NULL if not found. 247 */ 248 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) 249 { 250 struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr); 251 TranslationBlock *tb; 252 struct tb_tc s = { .ptr = (void *)tc_ptr }; 253 254 if (rt == NULL) { 255 return NULL; 256 } 257 258 qemu_mutex_lock(&rt->lock); 259 tb = q_tree_lookup(rt->tree, &s); 260 qemu_mutex_unlock(&rt->lock); 261 return tb; 262 } 263 264 static void tcg_region_tree_lock_all(void) 265 { 266 size_t i; 267 268 for (i = 0; i < region.n; i++) { 269 struct tcg_region_tree *rt = region_trees + i * tree_size; 270 271 qemu_mutex_lock(&rt->lock); 272 } 273 } 274 275 static void tcg_region_tree_unlock_all(void) 276 { 277 size_t i; 278 279 for (i = 0; i < region.n; i++) { 280 struct tcg_region_tree *rt = region_trees + i * tree_size; 281 282 qemu_mutex_unlock(&rt->lock); 283 } 284 } 285 286 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) 287 { 288 size_t i; 289 290 tcg_region_tree_lock_all(); 291 for (i = 0; i < region.n; i++) { 292 struct tcg_region_tree *rt = region_trees + i * tree_size; 293 294 q_tree_foreach(rt->tree, func, user_data); 295 } 296 tcg_region_tree_unlock_all(); 297 } 298 299 size_t tcg_nb_tbs(void) 300 { 301 size_t nb_tbs = 0; 302 size_t i; 303 304 tcg_region_tree_lock_all(); 305 for (i = 0; i < region.n; i++) { 306 struct tcg_region_tree *rt = region_trees + i * tree_size; 307 308 nb_tbs += q_tree_nnodes(rt->tree); 309 } 310 tcg_region_tree_unlock_all(); 311 return nb_tbs; 312 } 313 314 static void tcg_region_tree_reset_all(void) 315 { 316 size_t i; 317 318 tcg_region_tree_lock_all(); 319 for (i = 0; i < region.n; i++) { 320 struct tcg_region_tree *rt = region_trees + i * tree_size; 321 322 /* Increment the refcount first so that destroy acts as a reset */ 323 q_tree_ref(rt->tree); 324 q_tree_destroy(rt->tree); 325 } 326 tcg_region_tree_unlock_all(); 327 } 328 329 static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend) 330 { 331 void *start, *end; 332 333 start = region.start_aligned + curr_region * region.stride; 334 end = start + region.size; 335 336 if (curr_region == 0) { 337 start = region.after_prologue; 338 } 339 /* The final region may have a few extra pages due to earlier rounding. */ 340 if (curr_region == region.n - 1) { 341 end = region.start_aligned + region.total_size; 342 } 343 344 *pstart = start; 345 *pend = end; 346 } 347 348 static void tcg_region_assign(TCGContext *s, size_t curr_region) 349 { 350 void *start, *end; 351 352 tcg_region_bounds(curr_region, &start, &end); 353 354 s->code_gen_buffer = start; 355 s->code_gen_ptr = start; 356 s->code_gen_buffer_size = end - start; 357 s->code_gen_highwater = end - TCG_HIGHWATER; 358 } 359 360 static bool tcg_region_alloc__locked(TCGContext *s) 361 { 362 if (region.current == region.n) { 363 return true; 364 } 365 tcg_region_assign(s, region.current); 366 region.current++; 367 return false; 368 } 369 370 /* 371 * Request a new region once the one in use has filled up. 372 * Returns true on error. 373 */ 374 bool tcg_region_alloc(TCGContext *s) 375 { 376 bool err; 377 /* read the region size now; alloc__locked will overwrite it on success */ 378 size_t size_full = s->code_gen_buffer_size; 379 380 qemu_mutex_lock(®ion.lock); 381 err = tcg_region_alloc__locked(s); 382 if (!err) { 383 region.agg_size_full += size_full - TCG_HIGHWATER; 384 } 385 qemu_mutex_unlock(®ion.lock); 386 return err; 387 } 388 389 /* 390 * Perform a context's first region allocation. 391 * This function does _not_ increment region.agg_size_full. 392 */ 393 static void tcg_region_initial_alloc__locked(TCGContext *s) 394 { 395 bool err = tcg_region_alloc__locked(s); 396 g_assert(!err); 397 } 398 399 void tcg_region_initial_alloc(TCGContext *s) 400 { 401 qemu_mutex_lock(®ion.lock); 402 tcg_region_initial_alloc__locked(s); 403 qemu_mutex_unlock(®ion.lock); 404 } 405 406 /* Call from a safe-work context */ 407 void tcg_region_reset_all(void) 408 { 409 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 410 unsigned int i; 411 412 qemu_mutex_lock(®ion.lock); 413 region.current = 0; 414 region.agg_size_full = 0; 415 416 for (i = 0; i < n_ctxs; i++) { 417 TCGContext *s = qatomic_read(&tcg_ctxs[i]); 418 tcg_region_initial_alloc__locked(s); 419 } 420 qemu_mutex_unlock(®ion.lock); 421 422 tcg_region_tree_reset_all(); 423 } 424 425 static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) 426 { 427 #ifdef CONFIG_USER_ONLY 428 return 1; 429 #else 430 size_t n_regions; 431 432 /* 433 * It is likely that some vCPUs will translate more code than others, 434 * so we first try to set more regions than max_cpus, with those regions 435 * being of reasonable size. If that's not possible we make do by evenly 436 * dividing the code_gen_buffer among the vCPUs. 437 */ 438 /* Use a single region if all we have is one vCPU thread */ 439 if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { 440 return 1; 441 } 442 443 /* 444 * Try to have more regions than max_cpus, with each region being >= 2 MB. 445 * If we can't, then just allocate one region per vCPU thread. 446 */ 447 n_regions = tb_size / (2 * MiB); 448 if (n_regions <= max_cpus) { 449 return max_cpus; 450 } 451 return MIN(n_regions, max_cpus * 8); 452 #endif 453 } 454 455 /* 456 * Minimum size of the code gen buffer. This number is randomly chosen, 457 * but not so small that we can't have a fair number of TB's live. 458 * 459 * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h. 460 * Unless otherwise indicated, this is constrained by the range of 461 * direct branches on the host cpu, as used by the TCG implementation 462 * of goto_tb. 463 */ 464 #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) 465 466 #if TCG_TARGET_REG_BITS == 32 467 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) 468 #ifdef CONFIG_USER_ONLY 469 /* 470 * For user mode on smaller 32 bit systems we may run into trouble 471 * allocating big chunks of data in the right place. On these systems 472 * we utilise a static code generation buffer directly in the binary. 473 */ 474 #define USE_STATIC_CODE_GEN_BUFFER 475 #endif 476 #else /* TCG_TARGET_REG_BITS == 64 */ 477 #ifdef CONFIG_USER_ONLY 478 /* 479 * As user-mode emulation typically means running multiple instances 480 * of the translator don't go too nuts with our default code gen 481 * buffer lest we make things too hard for the OS. 482 */ 483 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) 484 #else 485 /* 486 * We expect most system emulation to run one or two guests per host. 487 * Users running large scale system emulation may want to tweak their 488 * runtime setup via the tb-size control on the command line. 489 */ 490 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) 491 #endif 492 #endif 493 494 #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 495 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 496 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 497 498 #ifdef USE_STATIC_CODE_GEN_BUFFER 499 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 500 __attribute__((aligned(CODE_GEN_ALIGN))); 501 502 static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) 503 { 504 void *buf, *end; 505 size_t size; 506 507 if (splitwx > 0) { 508 error_setg(errp, "jit split-wx not supported"); 509 return -1; 510 } 511 512 /* page-align the beginning and end of the buffer */ 513 buf = static_code_gen_buffer; 514 end = static_code_gen_buffer + sizeof(static_code_gen_buffer); 515 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size()); 516 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size()); 517 518 size = end - buf; 519 520 /* Honor a command-line option limiting the size of the buffer. */ 521 if (size > tb_size) { 522 size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size()); 523 } 524 525 region.start_aligned = buf; 526 region.total_size = size; 527 528 return PROT_READ | PROT_WRITE; 529 } 530 #elif defined(_WIN32) 531 static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 532 { 533 void *buf; 534 535 if (splitwx > 0) { 536 error_setg(errp, "jit split-wx not supported"); 537 return -1; 538 } 539 540 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, 541 PAGE_EXECUTE_READWRITE); 542 if (buf == NULL) { 543 error_setg_win32(errp, GetLastError(), 544 "allocate %zu bytes for jit buffer", size); 545 return false; 546 } 547 548 region.start_aligned = buf; 549 region.total_size = size; 550 551 return PROT_READ | PROT_WRITE | PROT_EXEC; 552 } 553 #else 554 static int alloc_code_gen_buffer_anon(size_t size, int prot, 555 int flags, Error **errp) 556 { 557 void *buf; 558 559 buf = mmap(NULL, size, prot, flags, -1, 0); 560 if (buf == MAP_FAILED) { 561 error_setg_errno(errp, errno, 562 "allocate %zu bytes for jit buffer", size); 563 return -1; 564 } 565 566 region.start_aligned = buf; 567 region.total_size = size; 568 return prot; 569 } 570 571 #ifndef CONFIG_TCG_INTERPRETER 572 #ifdef CONFIG_POSIX 573 #include "qemu/memfd.h" 574 575 static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) 576 { 577 void *buf_rw = NULL, *buf_rx = MAP_FAILED; 578 int fd = -1; 579 580 buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); 581 if (buf_rw == NULL) { 582 goto fail; 583 } 584 585 buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0); 586 if (buf_rx == MAP_FAILED) { 587 goto fail_rx; 588 } 589 590 close(fd); 591 region.start_aligned = buf_rw; 592 region.total_size = size; 593 tcg_splitwx_diff = buf_rx - buf_rw; 594 595 return PROT_READ | PROT_WRITE; 596 597 fail_rx: 598 error_setg_errno(errp, errno, "failed to map shared memory for execute"); 599 fail: 600 if (buf_rx != MAP_FAILED) { 601 munmap(buf_rx, size); 602 } 603 if (buf_rw) { 604 munmap(buf_rw, size); 605 } 606 if (fd >= 0) { 607 close(fd); 608 } 609 return -1; 610 } 611 #endif /* CONFIG_POSIX */ 612 613 #ifdef CONFIG_DARWIN 614 #include <mach/mach.h> 615 616 extern kern_return_t mach_vm_remap(vm_map_t target_task, 617 mach_vm_address_t *target_address, 618 mach_vm_size_t size, 619 mach_vm_offset_t mask, 620 int flags, 621 vm_map_t src_task, 622 mach_vm_address_t src_address, 623 boolean_t copy, 624 vm_prot_t *cur_protection, 625 vm_prot_t *max_protection, 626 vm_inherit_t inheritance); 627 628 static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) 629 { 630 kern_return_t ret; 631 mach_vm_address_t buf_rw, buf_rx; 632 vm_prot_t cur_prot, max_prot; 633 634 /* Map the read-write portion via normal anon memory. */ 635 if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, 636 MAP_PRIVATE | MAP_ANONYMOUS, errp)) { 637 return -1; 638 } 639 640 buf_rw = (mach_vm_address_t)region.start_aligned; 641 buf_rx = 0; 642 ret = mach_vm_remap(mach_task_self(), 643 &buf_rx, 644 size, 645 0, 646 VM_FLAGS_ANYWHERE, 647 mach_task_self(), 648 buf_rw, 649 false, 650 &cur_prot, 651 &max_prot, 652 VM_INHERIT_NONE); 653 if (ret != KERN_SUCCESS) { 654 /* TODO: Convert "ret" to a human readable error message. */ 655 error_setg(errp, "vm_remap for jit splitwx failed"); 656 munmap((void *)buf_rw, size); 657 return -1; 658 } 659 660 if (mprotect((void *)buf_rx, size, host_prot_read_exec()) != 0) { 661 error_setg_errno(errp, errno, "mprotect for jit splitwx"); 662 munmap((void *)buf_rx, size); 663 munmap((void *)buf_rw, size); 664 return -1; 665 } 666 667 tcg_splitwx_diff = buf_rx - buf_rw; 668 return PROT_READ | PROT_WRITE; 669 } 670 #endif /* CONFIG_DARWIN */ 671 #endif /* CONFIG_TCG_INTERPRETER */ 672 673 static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp) 674 { 675 #ifndef CONFIG_TCG_INTERPRETER 676 # ifdef CONFIG_DARWIN 677 return alloc_code_gen_buffer_splitwx_vmremap(size, errp); 678 # endif 679 # ifdef CONFIG_POSIX 680 return alloc_code_gen_buffer_splitwx_memfd(size, errp); 681 # endif 682 #endif 683 error_setg(errp, "jit split-wx not supported"); 684 return -1; 685 } 686 687 static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 688 { 689 ERRP_GUARD(); 690 int prot, flags; 691 692 if (splitwx) { 693 prot = alloc_code_gen_buffer_splitwx(size, errp); 694 if (prot >= 0) { 695 return prot; 696 } 697 /* 698 * If splitwx force-on (1), fail; 699 * if splitwx default-on (-1), fall through to splitwx off. 700 */ 701 if (splitwx > 0) { 702 return -1; 703 } 704 error_free_or_abort(errp); 705 } 706 707 /* 708 * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect 709 * rejects a permission change from RWX -> NONE when reserving the 710 * guard pages later. We can go the other way with the same number 711 * of syscalls, so always begin with PROT_NONE. 712 */ 713 prot = PROT_NONE; 714 flags = MAP_PRIVATE | MAP_ANONYMOUS; 715 #ifdef CONFIG_DARWIN 716 /* Applicable to both iOS and macOS (Apple Silicon). */ 717 if (!splitwx) { 718 flags |= MAP_JIT; 719 } 720 #endif 721 722 return alloc_code_gen_buffer_anon(size, prot, flags, errp); 723 } 724 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 725 726 /* 727 * Initializes region partitioning. 728 * 729 * Called at init time from the parent thread (i.e. the one calling 730 * tcg_context_init), after the target's TCG globals have been set. 731 * 732 * Region partitioning works by splitting code_gen_buffer into separate regions, 733 * and then assigning regions to TCG threads so that the threads can translate 734 * code in parallel without synchronization. 735 * 736 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at 737 * least max_cpus regions in MTTCG. In !MTTCG we use a single region. 738 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) 739 * must have been parsed before calling this function, since it calls 740 * qemu_tcg_mttcg_enabled(). 741 * 742 * In user-mode we use a single region. Having multiple regions in user-mode 743 * is not supported, because the number of vCPU threads (recall that each thread 744 * spawned by the guest corresponds to a vCPU thread) is only bounded by the 745 * OS, and usually this number is huge (tens of thousands is not uncommon). 746 * Thus, given this large bound on the number of vCPU threads and the fact 747 * that code_gen_buffer is allocated at compile-time, we cannot guarantee 748 * that the availability of at least one region per vCPU thread. 749 * 750 * However, this user-mode limitation is unlikely to be a significant problem 751 * in practice. Multi-threaded guests share most if not all of their translated 752 * code, which makes parallel code generation less appealing than in softmmu. 753 */ 754 void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) 755 { 756 const size_t page_size = qemu_real_host_page_size(); 757 size_t region_size; 758 int have_prot, need_prot; 759 760 /* Size the buffer. */ 761 if (tb_size == 0) { 762 size_t phys_mem = qemu_get_host_physmem(); 763 if (phys_mem == 0) { 764 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 765 } else { 766 tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size); 767 tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size); 768 } 769 } 770 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 771 tb_size = MIN_CODE_GEN_BUFFER_SIZE; 772 } 773 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 774 tb_size = MAX_CODE_GEN_BUFFER_SIZE; 775 } 776 777 have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal); 778 assert(have_prot >= 0); 779 780 /* Request large pages for the buffer and the splitwx. */ 781 qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE); 782 if (tcg_splitwx_diff) { 783 qemu_madvise(region.start_aligned + tcg_splitwx_diff, 784 region.total_size, QEMU_MADV_HUGEPAGE); 785 } 786 787 /* 788 * Make region_size a multiple of page_size, using aligned as the start. 789 * As a result of this we might end up with a few extra pages at the end of 790 * the buffer; we will assign those to the last region. 791 */ 792 region.n = tcg_n_regions(tb_size, max_cpus); 793 region_size = tb_size / region.n; 794 region_size = QEMU_ALIGN_DOWN(region_size, page_size); 795 796 /* A region must have at least 2 pages; one code, one guard */ 797 g_assert(region_size >= 2 * page_size); 798 region.stride = region_size; 799 800 /* Reserve space for guard pages. */ 801 region.size = region_size - page_size; 802 region.total_size -= page_size; 803 804 /* 805 * The first region will be smaller than the others, via the prologue, 806 * which has yet to be allocated. For now, the first region begins at 807 * the page boundary. 808 */ 809 region.after_prologue = region.start_aligned; 810 811 /* init the region struct */ 812 qemu_mutex_init(®ion.lock); 813 814 /* 815 * Set guard pages in the rw buffer, as that's the one into which 816 * buffer overruns could occur. Do not set guard pages in the rx 817 * buffer -- let that one use hugepages throughout. 818 * Work with the page protections set up with the initial mapping. 819 */ 820 need_prot = PROT_READ | PROT_WRITE; 821 #ifndef CONFIG_TCG_INTERPRETER 822 if (tcg_splitwx_diff == 0) { 823 need_prot |= host_prot_read_exec(); 824 } 825 #endif 826 for (size_t i = 0, n = region.n; i < n; i++) { 827 void *start, *end; 828 829 tcg_region_bounds(i, &start, &end); 830 if (have_prot != need_prot) { 831 int rc; 832 833 if (need_prot == (PROT_READ | PROT_WRITE | PROT_EXEC)) { 834 rc = qemu_mprotect_rwx(start, end - start); 835 } else if (need_prot == (PROT_READ | PROT_WRITE)) { 836 rc = qemu_mprotect_rw(start, end - start); 837 } else { 838 #ifdef CONFIG_POSIX 839 rc = mprotect(start, end - start, need_prot); 840 #else 841 g_assert_not_reached(); 842 #endif 843 } 844 if (rc) { 845 error_setg_errno(&error_fatal, errno, 846 "mprotect of jit buffer"); 847 } 848 } 849 if (have_prot != 0) { 850 /* Guard pages are nice for bug detection but are not essential. */ 851 (void)qemu_mprotect_none(end, page_size); 852 } 853 } 854 855 tcg_region_trees_init(); 856 857 /* 858 * Leave the initial context initialized to the first region. 859 * This will be the context into which we generate the prologue. 860 * It is also the only context for CONFIG_USER_ONLY. 861 */ 862 tcg_region_initial_alloc__locked(&tcg_init_ctx); 863 } 864 865 void tcg_region_prologue_set(TCGContext *s) 866 { 867 /* Deduct the prologue from the first region. */ 868 g_assert(region.start_aligned == s->code_gen_buffer); 869 region.after_prologue = s->code_ptr; 870 871 /* Recompute boundaries of the first region. */ 872 tcg_region_assign(s, 0); 873 874 /* Register the balance of the buffer with gdb. */ 875 tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue), 876 region.start_aligned + region.total_size - 877 region.after_prologue); 878 } 879 880 /* 881 * Returns the size (in bytes) of all translated code (i.e. from all regions) 882 * currently in the cache. 883 * See also: tcg_code_capacity() 884 * Do not confuse with tcg_current_code_size(); that one applies to a single 885 * TCG context. 886 */ 887 size_t tcg_code_size(void) 888 { 889 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs); 890 unsigned int i; 891 size_t total; 892 893 qemu_mutex_lock(®ion.lock); 894 total = region.agg_size_full; 895 for (i = 0; i < n_ctxs; i++) { 896 const TCGContext *s = qatomic_read(&tcg_ctxs[i]); 897 size_t size; 898 899 size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; 900 g_assert(size <= s->code_gen_buffer_size); 901 total += size; 902 } 903 qemu_mutex_unlock(®ion.lock); 904 return total; 905 } 906 907 /* 908 * Returns the code capacity (in bytes) of the entire cache, i.e. including all 909 * regions. 910 * See also: tcg_code_size() 911 */ 912 size_t tcg_code_capacity(void) 913 { 914 size_t guard_size, capacity; 915 916 /* no need for synchronization; these variables are set at init time */ 917 guard_size = region.stride - region.size; 918 capacity = region.total_size; 919 capacity -= (region.n - 1) * guard_size; 920 capacity -= region.n * TCG_HIGHWATER; 921 922 return capacity; 923 } 924