1 /* 2 * os-posix-lib.c 3 * 4 * Copyright (c) 2003-2008 Fabrice Bellard 5 * Copyright (c) 2010 Red Hat, Inc. 6 * 7 * QEMU library functions on POSIX which are shared between QEMU and 8 * the QEMU tools. 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a copy 11 * of this software and associated documentation files (the "Software"), to deal 12 * in the Software without restriction, including without limitation the rights 13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 14 * copies of the Software, and to permit persons to whom the Software is 15 * furnished to do so, subject to the following conditions: 16 * 17 * The above copyright notice and this permission notice shall be included in 18 * all copies or substantial portions of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 26 * THE SOFTWARE. 27 */ 28 29 #include "qemu/osdep.h" 30 #include <termios.h> 31 32 #include <glib/gprintf.h> 33 34 #include "sysemu/sysemu.h" 35 #include "trace.h" 36 #include "qapi/error.h" 37 #include "qemu/error-report.h" 38 #include "qemu/madvise.h" 39 #include "qemu/sockets.h" 40 #include "qemu/thread.h" 41 #include <libgen.h> 42 #include "qemu/cutils.h" 43 #include "qemu/units.h" 44 #include "qemu/thread-context.h" 45 46 #ifdef CONFIG_LINUX 47 #include <sys/syscall.h> 48 #endif 49 50 #ifdef __FreeBSD__ 51 #include <sys/thr.h> 52 #include <sys/user.h> 53 #include <libutil.h> 54 #endif 55 56 #ifdef __NetBSD__ 57 #include <lwp.h> 58 #endif 59 60 #include "qemu/mmap-alloc.h" 61 62 #define MAX_MEM_PREALLOC_THREAD_COUNT 16 63 64 struct MemsetThread; 65 66 typedef struct MemsetContext { 67 bool all_threads_created; 68 bool any_thread_failed; 69 struct MemsetThread *threads; 70 int num_threads; 71 } MemsetContext; 72 73 struct MemsetThread { 74 char *addr; 75 size_t numpages; 76 size_t hpagesize; 77 QemuThread pgthread; 78 sigjmp_buf env; 79 MemsetContext *context; 80 }; 81 typedef struct MemsetThread MemsetThread; 82 83 /* used by sigbus_handler() */ 84 static MemsetContext *sigbus_memset_context; 85 struct sigaction sigbus_oldact; 86 static QemuMutex sigbus_mutex; 87 88 static QemuMutex page_mutex; 89 static QemuCond page_cond; 90 91 int qemu_get_thread_id(void) 92 { 93 #if defined(__linux__) 94 return syscall(SYS_gettid); 95 #elif defined(__FreeBSD__) 96 /* thread id is up to INT_MAX */ 97 long tid; 98 thr_self(&tid); 99 return (int)tid; 100 #elif defined(__NetBSD__) 101 return _lwp_self(); 102 #elif defined(__OpenBSD__) 103 return getthrid(); 104 #else 105 return getpid(); 106 #endif 107 } 108 109 int qemu_daemon(int nochdir, int noclose) 110 { 111 return daemon(nochdir, noclose); 112 } 113 114 bool qemu_write_pidfile(const char *path, Error **errp) 115 { 116 int fd; 117 char pidstr[32]; 118 119 while (1) { 120 struct stat a, b; 121 struct flock lock = { 122 .l_type = F_WRLCK, 123 .l_whence = SEEK_SET, 124 .l_len = 0, 125 }; 126 127 fd = qemu_create(path, O_WRONLY, S_IRUSR | S_IWUSR, errp); 128 if (fd == -1) { 129 return false; 130 } 131 132 if (fstat(fd, &b) < 0) { 133 error_setg_errno(errp, errno, "Cannot stat file"); 134 goto fail_close; 135 } 136 137 if (fcntl(fd, F_SETLK, &lock)) { 138 error_setg_errno(errp, errno, "Cannot lock pid file"); 139 goto fail_close; 140 } 141 142 /* 143 * Now make sure the path we locked is the same one that now 144 * exists on the filesystem. 145 */ 146 if (stat(path, &a) < 0) { 147 /* 148 * PID file disappeared, someone else must be racing with 149 * us, so try again. 150 */ 151 close(fd); 152 continue; 153 } 154 155 if (a.st_ino == b.st_ino) { 156 break; 157 } 158 159 /* 160 * PID file was recreated, someone else must be racing with 161 * us, so try again. 162 */ 163 close(fd); 164 } 165 166 if (ftruncate(fd, 0) < 0) { 167 error_setg_errno(errp, errno, "Failed to truncate pid file"); 168 goto fail_unlink; 169 } 170 171 snprintf(pidstr, sizeof(pidstr), FMT_pid "\n", getpid()); 172 if (qemu_write_full(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) { 173 error_setg(errp, "Failed to write pid file"); 174 goto fail_unlink; 175 } 176 177 return true; 178 179 fail_unlink: 180 unlink(path); 181 fail_close: 182 close(fd); 183 return false; 184 } 185 186 /* alloc shared memory pages */ 187 void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared, 188 bool noreserve) 189 { 190 const uint32_t qemu_map_flags = (shared ? QEMU_MAP_SHARED : 0) | 191 (noreserve ? QEMU_MAP_NORESERVE : 0); 192 size_t align = QEMU_VMALLOC_ALIGN; 193 void *ptr = qemu_ram_mmap(-1, size, align, qemu_map_flags, 0); 194 195 if (ptr == MAP_FAILED) { 196 return NULL; 197 } 198 199 if (alignment) { 200 *alignment = align; 201 } 202 203 trace_qemu_anon_ram_alloc(size, ptr); 204 return ptr; 205 } 206 207 void qemu_anon_ram_free(void *ptr, size_t size) 208 { 209 trace_qemu_anon_ram_free(ptr, size); 210 qemu_ram_munmap(-1, ptr, size); 211 } 212 213 void qemu_socket_set_block(int fd) 214 { 215 g_unix_set_fd_nonblocking(fd, false, NULL); 216 } 217 218 int qemu_socket_try_set_nonblock(int fd) 219 { 220 return g_unix_set_fd_nonblocking(fd, true, NULL) ? 0 : -errno; 221 } 222 223 void qemu_socket_set_nonblock(int fd) 224 { 225 int f; 226 f = qemu_socket_try_set_nonblock(fd); 227 assert(f == 0); 228 } 229 230 int socket_set_fast_reuse(int fd) 231 { 232 int val = 1, ret; 233 234 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, 235 (const char *)&val, sizeof(val)); 236 237 assert(ret == 0); 238 239 return ret; 240 } 241 242 void qemu_set_cloexec(int fd) 243 { 244 int f; 245 f = fcntl(fd, F_GETFD); 246 assert(f != -1); 247 f = fcntl(fd, F_SETFD, f | FD_CLOEXEC); 248 assert(f != -1); 249 } 250 251 int qemu_socketpair(int domain, int type, int protocol, int sv[2]) 252 { 253 int ret; 254 255 #ifdef SOCK_CLOEXEC 256 ret = socketpair(domain, type | SOCK_CLOEXEC, protocol, sv); 257 if (ret != -1 || errno != EINVAL) { 258 return ret; 259 } 260 #endif 261 ret = socketpair(domain, type, protocol, sv);; 262 if (ret == 0) { 263 qemu_set_cloexec(sv[0]); 264 qemu_set_cloexec(sv[1]); 265 } 266 267 return ret; 268 } 269 270 char * 271 qemu_get_local_state_dir(void) 272 { 273 return get_relocated_path(CONFIG_QEMU_LOCALSTATEDIR); 274 } 275 276 void qemu_set_tty_echo(int fd, bool echo) 277 { 278 struct termios tty; 279 280 tcgetattr(fd, &tty); 281 282 if (echo) { 283 tty.c_lflag |= ECHO | ECHONL | ICANON | IEXTEN; 284 } else { 285 tty.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN); 286 } 287 288 tcsetattr(fd, TCSANOW, &tty); 289 } 290 291 #ifdef CONFIG_LINUX 292 static void sigbus_handler(int signal, siginfo_t *siginfo, void *ctx) 293 #else /* CONFIG_LINUX */ 294 static void sigbus_handler(int signal) 295 #endif /* CONFIG_LINUX */ 296 { 297 int i; 298 299 if (sigbus_memset_context) { 300 for (i = 0; i < sigbus_memset_context->num_threads; i++) { 301 MemsetThread *thread = &sigbus_memset_context->threads[i]; 302 303 if (qemu_thread_is_self(&thread->pgthread)) { 304 siglongjmp(thread->env, 1); 305 } 306 } 307 } 308 309 #ifdef CONFIG_LINUX 310 /* 311 * We assume that the MCE SIGBUS handler could have been registered. We 312 * should never receive BUS_MCEERR_AO on any of our threads, but only on 313 * the main thread registered for PR_MCE_KILL_EARLY. Further, we should not 314 * receive BUS_MCEERR_AR triggered by action of other threads on one of 315 * our threads. So, no need to check for unrelated SIGBUS when seeing one 316 * for our threads. 317 * 318 * We will forward to the MCE handler, which will either handle the SIGBUS 319 * or reinstall the default SIGBUS handler and reraise the SIGBUS. The 320 * default SIGBUS handler will crash the process, so we don't care. 321 */ 322 if (sigbus_oldact.sa_flags & SA_SIGINFO) { 323 sigbus_oldact.sa_sigaction(signal, siginfo, ctx); 324 return; 325 } 326 #endif /* CONFIG_LINUX */ 327 warn_report("qemu_prealloc_mem: unrelated SIGBUS detected and ignored"); 328 } 329 330 static void *do_touch_pages(void *arg) 331 { 332 MemsetThread *memset_args = (MemsetThread *)arg; 333 sigset_t set, oldset; 334 int ret = 0; 335 336 /* 337 * On Linux, the page faults from the loop below can cause mmap_sem 338 * contention with allocation of the thread stacks. Do not start 339 * clearing until all threads have been created. 340 */ 341 qemu_mutex_lock(&page_mutex); 342 while (!memset_args->context->all_threads_created) { 343 qemu_cond_wait(&page_cond, &page_mutex); 344 } 345 qemu_mutex_unlock(&page_mutex); 346 347 /* unblock SIGBUS */ 348 sigemptyset(&set); 349 sigaddset(&set, SIGBUS); 350 pthread_sigmask(SIG_UNBLOCK, &set, &oldset); 351 352 if (sigsetjmp(memset_args->env, 1)) { 353 ret = -EFAULT; 354 } else { 355 char *addr = memset_args->addr; 356 size_t numpages = memset_args->numpages; 357 size_t hpagesize = memset_args->hpagesize; 358 size_t i; 359 for (i = 0; i < numpages; i++) { 360 /* 361 * Read & write back the same value, so we don't 362 * corrupt existing user/app data that might be 363 * stored. 364 * 365 * 'volatile' to stop compiler optimizing this away 366 * to a no-op 367 */ 368 *(volatile char *)addr = *addr; 369 addr += hpagesize; 370 } 371 } 372 pthread_sigmask(SIG_SETMASK, &oldset, NULL); 373 return (void *)(uintptr_t)ret; 374 } 375 376 static void *do_madv_populate_write_pages(void *arg) 377 { 378 MemsetThread *memset_args = (MemsetThread *)arg; 379 const size_t size = memset_args->numpages * memset_args->hpagesize; 380 char * const addr = memset_args->addr; 381 int ret = 0; 382 383 /* See do_touch_pages(). */ 384 qemu_mutex_lock(&page_mutex); 385 while (!memset_args->context->all_threads_created) { 386 qemu_cond_wait(&page_cond, &page_mutex); 387 } 388 qemu_mutex_unlock(&page_mutex); 389 390 if (size && qemu_madvise(addr, size, QEMU_MADV_POPULATE_WRITE)) { 391 ret = -errno; 392 } 393 return (void *)(uintptr_t)ret; 394 } 395 396 static inline int get_memset_num_threads(size_t hpagesize, size_t numpages, 397 int max_threads) 398 { 399 long host_procs = sysconf(_SC_NPROCESSORS_ONLN); 400 int ret = 1; 401 402 if (host_procs > 0) { 403 ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), max_threads); 404 } 405 406 /* Especially with gigantic pages, don't create more threads than pages. */ 407 ret = MIN(ret, numpages); 408 /* Don't start threads to prealloc comparatively little memory. */ 409 ret = MIN(ret, MAX(1, hpagesize * numpages / (64 * MiB))); 410 411 /* In case sysconf() fails, we fall back to single threaded */ 412 return ret; 413 } 414 415 static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, 416 int max_threads, ThreadContext *tc, 417 bool use_madv_populate_write) 418 { 419 static gsize initialized = 0; 420 MemsetContext context = { 421 .num_threads = get_memset_num_threads(hpagesize, numpages, max_threads), 422 }; 423 size_t numpages_per_thread, leftover; 424 void *(*touch_fn)(void *); 425 int ret = 0, i = 0; 426 char *addr = area; 427 428 if (g_once_init_enter(&initialized)) { 429 qemu_mutex_init(&page_mutex); 430 qemu_cond_init(&page_cond); 431 g_once_init_leave(&initialized, 1); 432 } 433 434 if (use_madv_populate_write) { 435 /* Avoid creating a single thread for MADV_POPULATE_WRITE */ 436 if (context.num_threads == 1) { 437 if (qemu_madvise(area, hpagesize * numpages, 438 QEMU_MADV_POPULATE_WRITE)) { 439 return -errno; 440 } 441 return 0; 442 } 443 touch_fn = do_madv_populate_write_pages; 444 } else { 445 touch_fn = do_touch_pages; 446 } 447 448 context.threads = g_new0(MemsetThread, context.num_threads); 449 numpages_per_thread = numpages / context.num_threads; 450 leftover = numpages % context.num_threads; 451 for (i = 0; i < context.num_threads; i++) { 452 context.threads[i].addr = addr; 453 context.threads[i].numpages = numpages_per_thread + (i < leftover); 454 context.threads[i].hpagesize = hpagesize; 455 context.threads[i].context = &context; 456 if (tc) { 457 thread_context_create_thread(tc, &context.threads[i].pgthread, 458 "touch_pages", 459 touch_fn, &context.threads[i], 460 QEMU_THREAD_JOINABLE); 461 } else { 462 qemu_thread_create(&context.threads[i].pgthread, "touch_pages", 463 touch_fn, &context.threads[i], 464 QEMU_THREAD_JOINABLE); 465 } 466 addr += context.threads[i].numpages * hpagesize; 467 } 468 469 if (!use_madv_populate_write) { 470 sigbus_memset_context = &context; 471 } 472 473 qemu_mutex_lock(&page_mutex); 474 context.all_threads_created = true; 475 qemu_cond_broadcast(&page_cond); 476 qemu_mutex_unlock(&page_mutex); 477 478 for (i = 0; i < context.num_threads; i++) { 479 int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread); 480 481 if (tmp) { 482 ret = tmp; 483 } 484 } 485 486 if (!use_madv_populate_write) { 487 sigbus_memset_context = NULL; 488 } 489 g_free(context.threads); 490 491 return ret; 492 } 493 494 static bool madv_populate_write_possible(char *area, size_t pagesize) 495 { 496 return !qemu_madvise(area, pagesize, QEMU_MADV_POPULATE_WRITE) || 497 errno != EINVAL; 498 } 499 500 bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, 501 ThreadContext *tc, Error **errp) 502 { 503 static gsize initialized; 504 int ret; 505 size_t hpagesize = qemu_fd_getpagesize(fd); 506 size_t numpages = DIV_ROUND_UP(sz, hpagesize); 507 bool use_madv_populate_write; 508 struct sigaction act; 509 bool rv = true; 510 511 /* 512 * Sense on every invocation, as MADV_POPULATE_WRITE cannot be used for 513 * some special mappings, such as mapping /dev/mem. 514 */ 515 use_madv_populate_write = madv_populate_write_possible(area, hpagesize); 516 517 if (!use_madv_populate_write) { 518 if (g_once_init_enter(&initialized)) { 519 qemu_mutex_init(&sigbus_mutex); 520 g_once_init_leave(&initialized, 1); 521 } 522 523 qemu_mutex_lock(&sigbus_mutex); 524 memset(&act, 0, sizeof(act)); 525 #ifdef CONFIG_LINUX 526 act.sa_sigaction = &sigbus_handler; 527 act.sa_flags = SA_SIGINFO; 528 #else /* CONFIG_LINUX */ 529 act.sa_handler = &sigbus_handler; 530 act.sa_flags = 0; 531 #endif /* CONFIG_LINUX */ 532 533 ret = sigaction(SIGBUS, &act, &sigbus_oldact); 534 if (ret) { 535 qemu_mutex_unlock(&sigbus_mutex); 536 error_setg_errno(errp, errno, 537 "qemu_prealloc_mem: failed to install signal handler"); 538 return false; 539 } 540 } 541 542 /* touch pages simultaneously */ 543 ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc, 544 use_madv_populate_write); 545 if (ret) { 546 error_setg_errno(errp, -ret, 547 "qemu_prealloc_mem: preallocating memory failed"); 548 rv = false; 549 } 550 551 if (!use_madv_populate_write) { 552 ret = sigaction(SIGBUS, &sigbus_oldact, NULL); 553 if (ret) { 554 /* Terminate QEMU since it can't recover from error */ 555 perror("qemu_prealloc_mem: failed to reinstall signal handler"); 556 exit(1); 557 } 558 qemu_mutex_unlock(&sigbus_mutex); 559 } 560 return rv; 561 } 562 563 char *qemu_get_pid_name(pid_t pid) 564 { 565 char *name = NULL; 566 567 #if defined(__FreeBSD__) 568 /* BSDs don't have /proc, but they provide a nice substitute */ 569 struct kinfo_proc *proc = kinfo_getproc(pid); 570 571 if (proc) { 572 name = g_strdup(proc->ki_comm); 573 free(proc); 574 } 575 #else 576 /* Assume a system with reasonable procfs */ 577 char *pid_path; 578 size_t len; 579 580 pid_path = g_strdup_printf("/proc/%d/cmdline", pid); 581 g_file_get_contents(pid_path, &name, &len, NULL); 582 g_free(pid_path); 583 #endif 584 585 return name; 586 } 587 588 589 void *qemu_alloc_stack(size_t *sz) 590 { 591 void *ptr; 592 int flags; 593 #ifdef CONFIG_DEBUG_STACK_USAGE 594 void *ptr2; 595 #endif 596 size_t pagesz = qemu_real_host_page_size(); 597 #ifdef _SC_THREAD_STACK_MIN 598 /* avoid stacks smaller than _SC_THREAD_STACK_MIN */ 599 long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN); 600 *sz = MAX(MAX(min_stack_sz, 0), *sz); 601 #endif 602 /* adjust stack size to a multiple of the page size */ 603 *sz = ROUND_UP(*sz, pagesz); 604 /* allocate one extra page for the guard page */ 605 *sz += pagesz; 606 607 flags = MAP_PRIVATE | MAP_ANONYMOUS; 608 #if defined(MAP_STACK) && defined(__OpenBSD__) 609 /* Only enable MAP_STACK on OpenBSD. Other OS's such as 610 * Linux/FreeBSD/NetBSD have a flag with the same name 611 * but have differing functionality. OpenBSD will SEGV 612 * if it spots execution with a stack pointer pointing 613 * at memory that was not allocated with MAP_STACK. 614 */ 615 flags |= MAP_STACK; 616 #endif 617 618 ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE, flags, -1, 0); 619 if (ptr == MAP_FAILED) { 620 perror("failed to allocate memory for stack"); 621 abort(); 622 } 623 624 /* Stack grows down -- guard page at the bottom. */ 625 if (mprotect(ptr, pagesz, PROT_NONE) != 0) { 626 perror("failed to set up stack guard page"); 627 abort(); 628 } 629 630 #ifdef CONFIG_DEBUG_STACK_USAGE 631 for (ptr2 = ptr + pagesz; ptr2 < ptr + *sz; ptr2 += sizeof(uint32_t)) { 632 *(uint32_t *)ptr2 = 0xdeadbeaf; 633 } 634 #endif 635 636 return ptr; 637 } 638 639 #ifdef CONFIG_DEBUG_STACK_USAGE 640 static __thread unsigned int max_stack_usage; 641 #endif 642 643 void qemu_free_stack(void *stack, size_t sz) 644 { 645 #ifdef CONFIG_DEBUG_STACK_USAGE 646 unsigned int usage; 647 void *ptr; 648 649 for (ptr = stack + qemu_real_host_page_size(); ptr < stack + sz; 650 ptr += sizeof(uint32_t)) { 651 if (*(uint32_t *)ptr != 0xdeadbeaf) { 652 break; 653 } 654 } 655 usage = sz - (uintptr_t) (ptr - stack); 656 if (usage > max_stack_usage) { 657 error_report("thread %d max stack usage increased from %u to %u", 658 qemu_get_thread_id(), max_stack_usage, usage); 659 max_stack_usage = usage; 660 } 661 #endif 662 663 munmap(stack, sz); 664 } 665 666 /* 667 * Disable CFI checks. 668 * We are going to call a signal handler directly. Such handler may or may not 669 * have been defined in our binary, so there's no guarantee that the pointer 670 * used to set the handler is a cfi-valid pointer. Since the handlers are 671 * stored in kernel memory, changing the handler to an attacker-defined 672 * function requires being able to call a sigaction() syscall, 673 * which is not as easy as overwriting a pointer in memory. 674 */ 675 QEMU_DISABLE_CFI 676 void sigaction_invoke(struct sigaction *action, 677 struct qemu_signalfd_siginfo *info) 678 { 679 siginfo_t si = {}; 680 si.si_signo = info->ssi_signo; 681 si.si_errno = info->ssi_errno; 682 si.si_code = info->ssi_code; 683 684 /* Convert the minimal set of fields defined by POSIX. 685 * Positive si_code values are reserved for kernel-generated 686 * signals, where the valid siginfo fields are determined by 687 * the signal number. But according to POSIX, it is unspecified 688 * whether SI_USER and SI_QUEUE have values less than or equal to 689 * zero. 690 */ 691 if (info->ssi_code == SI_USER || info->ssi_code == SI_QUEUE || 692 info->ssi_code <= 0) { 693 /* SIGTERM, etc. */ 694 si.si_pid = info->ssi_pid; 695 si.si_uid = info->ssi_uid; 696 } else if (info->ssi_signo == SIGILL || info->ssi_signo == SIGFPE || 697 info->ssi_signo == SIGSEGV || info->ssi_signo == SIGBUS) { 698 si.si_addr = (void *)(uintptr_t)info->ssi_addr; 699 } else if (info->ssi_signo == SIGCHLD) { 700 si.si_pid = info->ssi_pid; 701 si.si_status = info->ssi_status; 702 si.si_uid = info->ssi_uid; 703 } 704 action->sa_sigaction(info->ssi_signo, &si, NULL); 705 } 706 707 size_t qemu_get_host_physmem(void) 708 { 709 #ifdef _SC_PHYS_PAGES 710 long pages = sysconf(_SC_PHYS_PAGES); 711 if (pages > 0) { 712 if (pages > SIZE_MAX / qemu_real_host_page_size()) { 713 return SIZE_MAX; 714 } else { 715 return pages * qemu_real_host_page_size(); 716 } 717 } 718 #endif 719 return 0; 720 } 721 722 int qemu_msync(void *addr, size_t length, int fd) 723 { 724 size_t align_mask = ~(qemu_real_host_page_size() - 1); 725 726 /** 727 * There are no strict reqs as per the length of mapping 728 * to be synced. Still the length needs to follow the address 729 * alignment changes. Additionally - round the size to the multiple 730 * of PAGE_SIZE 731 */ 732 length += ((uintptr_t)addr & (qemu_real_host_page_size() - 1)); 733 length = (length + ~align_mask) & align_mask; 734 735 addr = (void *)((uintptr_t)addr & align_mask); 736 737 return msync(addr, length, MS_SYNC); 738 } 739