1 /* 2 * mmap support for qemu 3 * 4 * Copyright (c) 2003 - 2008 Fabrice Bellard 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "exec/page-protection.h" 21 22 #include "qemu.h" 23 24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; 25 static __thread int mmap_lock_count; 26 27 void mmap_lock(void) 28 { 29 if (mmap_lock_count++ == 0) { 30 pthread_mutex_lock(&mmap_mutex); 31 } 32 } 33 34 void mmap_unlock(void) 35 { 36 assert(mmap_lock_count > 0); 37 if (--mmap_lock_count == 0) { 38 pthread_mutex_unlock(&mmap_mutex); 39 } 40 } 41 42 bool have_mmap_lock(void) 43 { 44 return mmap_lock_count > 0 ? true : false; 45 } 46 47 /* Grab lock to make sure things are in a consistent state after fork(). */ 48 void mmap_fork_start(void) 49 { 50 if (mmap_lock_count) 51 abort(); 52 pthread_mutex_lock(&mmap_mutex); 53 } 54 55 void mmap_fork_end(int child) 56 { 57 if (child) 58 pthread_mutex_init(&mmap_mutex, NULL); 59 else 60 pthread_mutex_unlock(&mmap_mutex); 61 } 62 63 /* NOTE: all the constants are the HOST ones, but addresses are target. */ 64 int target_mprotect(abi_ulong start, abi_ulong len, int prot) 65 { 66 abi_ulong end, host_start, host_end, addr; 67 int prot1, ret; 68 69 qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx 70 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len, 71 prot & PROT_READ ? 'r' : '-', 72 prot & PROT_WRITE ? 'w' : '-', 73 prot & PROT_EXEC ? 'x' : '-'); 74 if ((start & ~TARGET_PAGE_MASK) != 0) 75 return -EINVAL; 76 len = TARGET_PAGE_ALIGN(len); 77 end = start + len; 78 if (end < start) 79 return -EINVAL; 80 prot &= PROT_READ | PROT_WRITE | PROT_EXEC; 81 if (len == 0) 82 return 0; 83 84 mmap_lock(); 85 host_start = start & qemu_host_page_mask; 86 host_end = HOST_PAGE_ALIGN(end); 87 if (start > host_start) { 88 /* handle host page containing start */ 89 prot1 = prot; 90 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) { 91 prot1 |= page_get_flags(addr); 92 } 93 if (host_end == host_start + qemu_host_page_size) { 94 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { 95 prot1 |= page_get_flags(addr); 96 } 97 end = host_end; 98 } 99 ret = mprotect(g2h_untagged(host_start), 100 qemu_host_page_size, prot1 & PAGE_RWX); 101 if (ret != 0) 102 goto error; 103 host_start += qemu_host_page_size; 104 } 105 if (end < host_end) { 106 prot1 = prot; 107 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) { 108 prot1 |= page_get_flags(addr); 109 } 110 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size), 111 qemu_host_page_size, prot1 & PAGE_RWX); 112 if (ret != 0) 113 goto error; 114 host_end -= qemu_host_page_size; 115 } 116 117 /* handle the pages in the middle */ 118 if (host_start < host_end) { 119 ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot); 120 if (ret != 0) 121 goto error; 122 } 123 page_set_flags(start, start + len - 1, prot | PAGE_VALID); 124 mmap_unlock(); 125 return 0; 126 error: 127 mmap_unlock(); 128 return ret; 129 } 130 131 /* 132 * map an incomplete host page 133 * 134 * mmap_frag can be called with a valid fd, if flags doesn't contain one of 135 * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we 136 * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be 137 * added. 138 * 139 * * If fd is valid (not -1) we want to map the pages with MAP_ANON. 140 * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it 141 * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD 142 * in sys/vm/vm_mmap.c. 143 * * If flags contains MAP_ANON it doesn't matter if we add it or not. 144 * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't 145 * matter if we add it or not either. See enforcing of constraints for 146 * MAP_STACK in kern_mmap. 147 * 148 * Don't add MAP_ANON for the flags that use fd == -1 without specifying the 149 * flags directly, with the assumption that future flags that require fd == -1 150 * will also not require MAP_ANON. 151 */ 152 static int mmap_frag(abi_ulong real_start, 153 abi_ulong start, abi_ulong end, 154 int prot, int flags, int fd, abi_ulong offset) 155 { 156 abi_ulong real_end, addr; 157 void *host_start; 158 int prot1, prot_new; 159 160 real_end = real_start + qemu_host_page_size; 161 host_start = g2h_untagged(real_start); 162 163 /* get the protection of the target pages outside the mapping */ 164 prot1 = 0; 165 for (addr = real_start; addr < real_end; addr++) { 166 if (addr < start || addr >= end) 167 prot1 |= page_get_flags(addr); 168 } 169 170 if (prot1 == 0) { 171 /* no page was there, so we allocate one. See also above. */ 172 void *p = mmap(host_start, qemu_host_page_size, prot, 173 flags | ((fd != -1) ? MAP_ANON : 0), -1, 0); 174 if (p == MAP_FAILED) 175 return -1; 176 prot1 = prot; 177 } 178 prot1 &= PAGE_RWX; 179 180 prot_new = prot | prot1; 181 if (fd != -1) { 182 /* msync() won't work here, so we return an error if write is 183 possible while it is a shared mapping */ 184 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && 185 (prot & PROT_WRITE)) 186 return -1; 187 188 /* adjust protection to be able to read */ 189 if (!(prot1 & PROT_WRITE)) 190 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); 191 192 /* read the corresponding file data */ 193 if (pread(fd, g2h_untagged(start), end - start, offset) == -1) { 194 return -1; 195 } 196 197 /* put final protection */ 198 if (prot_new != (prot1 | PROT_WRITE)) 199 mprotect(host_start, qemu_host_page_size, prot_new); 200 } else { 201 if (prot_new != prot1) { 202 mprotect(host_start, qemu_host_page_size, prot_new); 203 } 204 if (prot_new & PROT_WRITE) { 205 memset(g2h_untagged(start), 0, end - start); 206 } 207 } 208 return 0; 209 } 210 211 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64 212 # define TASK_UNMAPPED_BASE (1ul << 38) 213 #else 214 # define TASK_UNMAPPED_BASE 0x40000000 215 #endif 216 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; 217 218 /* 219 * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest 220 * address space. 221 */ 222 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, 223 abi_ulong alignment) 224 { 225 abi_ulong ret; 226 227 ret = page_find_range_empty(start, reserved_va, size, alignment); 228 if (ret == -1 && start > TARGET_PAGE_SIZE) { 229 /* Restart at the beginning of the address space. */ 230 ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1, 231 size, alignment); 232 } 233 234 return ret; 235 } 236 237 /* 238 * Find and reserve a free memory area of size 'size'. The search 239 * starts at 'start'. 240 * It must be called with mmap_lock() held. 241 * Return -1 if error. 242 */ 243 static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size, 244 abi_ulong alignment) 245 { 246 void *ptr, *prev; 247 abi_ulong addr; 248 int flags; 249 int wrapped, repeat; 250 251 /* If 'start' == 0, then a default start address is used. */ 252 if (start == 0) { 253 start = mmap_next_start; 254 } else { 255 start &= qemu_host_page_mask; 256 } 257 258 size = HOST_PAGE_ALIGN(size); 259 260 if (reserved_va) { 261 return mmap_find_vma_reserved(start, size, 262 (alignment != 0 ? 1 << alignment : 263 MAX(qemu_host_page_size, TARGET_PAGE_SIZE))); 264 } 265 266 addr = start; 267 wrapped = repeat = 0; 268 prev = 0; 269 flags = MAP_ANON | MAP_PRIVATE; 270 if (alignment != 0) { 271 flags |= MAP_ALIGNED(alignment); 272 } 273 274 for (;; prev = ptr) { 275 /* 276 * Reserve needed memory area to avoid a race. 277 * It should be discarded using: 278 * - mmap() with MAP_FIXED flag 279 * - mremap() with MREMAP_FIXED flag 280 * - shmat() with SHM_REMAP flag 281 */ 282 ptr = mmap(g2h_untagged(addr), size, PROT_NONE, 283 flags, -1, 0); 284 285 /* ENOMEM, if host address space has no memory */ 286 if (ptr == MAP_FAILED) { 287 return (abi_ulong)-1; 288 } 289 290 /* 291 * Count the number of sequential returns of the same address. 292 * This is used to modify the search algorithm below. 293 */ 294 repeat = (ptr == prev ? repeat + 1 : 0); 295 296 if (h2g_valid(ptr + size - 1)) { 297 addr = h2g(ptr); 298 299 if ((addr & ~TARGET_PAGE_MASK) == 0) { 300 /* Success. */ 301 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) { 302 mmap_next_start = addr + size; 303 } 304 return addr; 305 } 306 307 /* The address is not properly aligned for the target. */ 308 switch (repeat) { 309 case 0: 310 /* 311 * Assume the result that the kernel gave us is the 312 * first with enough free space, so start again at the 313 * next higher target page. 314 */ 315 addr = TARGET_PAGE_ALIGN(addr); 316 break; 317 case 1: 318 /* 319 * Sometimes the kernel decides to perform the allocation 320 * at the top end of memory instead. 321 */ 322 addr &= TARGET_PAGE_MASK; 323 break; 324 case 2: 325 /* Start over at low memory. */ 326 addr = 0; 327 break; 328 default: 329 /* Fail. This unaligned block must the last. */ 330 addr = -1; 331 break; 332 } 333 } else { 334 /* 335 * Since the result the kernel gave didn't fit, start 336 * again at low memory. If any repetition, fail. 337 */ 338 addr = (repeat ? -1 : 0); 339 } 340 341 /* Unmap and try again. */ 342 munmap(ptr, size); 343 344 /* ENOMEM if we checked the whole of the target address space. */ 345 if (addr == (abi_ulong)-1) { 346 return (abi_ulong)-1; 347 } else if (addr == 0) { 348 if (wrapped) { 349 return (abi_ulong)-1; 350 } 351 wrapped = 1; 352 /* 353 * Don't actually use 0 when wrapping, instead indicate 354 * that we'd truly like an allocation in low memory. 355 */ 356 addr = TARGET_PAGE_SIZE; 357 } else if (wrapped && addr >= start) { 358 return (abi_ulong)-1; 359 } 360 } 361 } 362 363 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) 364 { 365 return mmap_find_vma_aligned(start, size, 0); 366 } 367 368 /* NOTE: all the constants are the HOST ones */ 369 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, 370 int flags, int fd, off_t offset) 371 { 372 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; 373 374 mmap_lock(); 375 if (qemu_loglevel_mask(CPU_LOG_PAGE)) { 376 qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx 377 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=", 378 start, len, 379 prot & PROT_READ ? 'r' : '-', 380 prot & PROT_WRITE ? 'w' : '-', 381 prot & PROT_EXEC ? 'x' : '-'); 382 if (flags & MAP_ALIGNMENT_MASK) { 383 qemu_log("MAP_ALIGNED(%u) ", 384 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); 385 } 386 if (flags & MAP_GUARD) { 387 qemu_log("MAP_GUARD "); 388 } 389 if (flags & MAP_FIXED) { 390 qemu_log("MAP_FIXED "); 391 } 392 if (flags & MAP_ANON) { 393 qemu_log("MAP_ANON "); 394 } 395 if (flags & MAP_EXCL) { 396 qemu_log("MAP_EXCL "); 397 } 398 if (flags & MAP_PRIVATE) { 399 qemu_log("MAP_PRIVATE "); 400 } 401 if (flags & MAP_SHARED) { 402 qemu_log("MAP_SHARED "); 403 } 404 if (flags & MAP_NOCORE) { 405 qemu_log("MAP_NOCORE "); 406 } 407 if (flags & MAP_STACK) { 408 qemu_log("MAP_STACK "); 409 } 410 qemu_log("fd=%d offset=0x%lx\n", fd, offset); 411 } 412 413 if ((flags & MAP_ANON) && fd != -1) { 414 errno = EINVAL; 415 goto fail; 416 } 417 if (flags & MAP_STACK) { 418 if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) != 419 (PROT_READ | PROT_WRITE))) { 420 errno = EINVAL; 421 goto fail; 422 } 423 } 424 if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 || 425 offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE | 426 /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */ 427 MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) { 428 errno = EINVAL; 429 goto fail; 430 } 431 432 if (offset & ~TARGET_PAGE_MASK) { 433 errno = EINVAL; 434 goto fail; 435 } 436 437 if (len == 0) { 438 errno = EINVAL; 439 goto fail; 440 } 441 442 /* Check for overflows */ 443 len = TARGET_PAGE_ALIGN(len); 444 if (len == 0) { 445 errno = ENOMEM; 446 goto fail; 447 } 448 449 real_start = start & qemu_host_page_mask; 450 host_offset = offset & qemu_host_page_mask; 451 452 /* 453 * If the user is asking for the kernel to find a location, do that 454 * before we truncate the length for mapping files below. 455 */ 456 if (!(flags & MAP_FIXED)) { 457 host_len = len + offset - host_offset; 458 host_len = HOST_PAGE_ALIGN(host_len); 459 if ((flags & MAP_ALIGNMENT_MASK) != 0) 460 start = mmap_find_vma_aligned(real_start, host_len, 461 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); 462 else 463 start = mmap_find_vma(real_start, host_len); 464 if (start == (abi_ulong)-1) { 465 errno = ENOMEM; 466 goto fail; 467 } 468 } 469 470 /* 471 * When mapping files into a memory area larger than the file, accesses 472 * to pages beyond the file size will cause a SIGBUS. 473 * 474 * For example, if mmaping a file of 100 bytes on a host with 4K pages 475 * emulating a target with 8K pages, the target expects to be able to 476 * access the first 8K. But the host will trap us on any access beyond 477 * 4K. 478 * 479 * When emulating a target with a larger page-size than the hosts, we 480 * may need to truncate file maps at EOF and add extra anonymous pages 481 * up to the targets page boundary. 482 */ 483 484 if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) { 485 struct stat sb; 486 487 if (fstat(fd, &sb) == -1) { 488 goto fail; 489 } 490 491 /* Are we trying to create a map beyond EOF?. */ 492 if (offset + len > sb.st_size) { 493 /* 494 * If so, truncate the file map at eof aligned with 495 * the hosts real pagesize. Additional anonymous maps 496 * will be created beyond EOF. 497 */ 498 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset); 499 } 500 } 501 502 if (!(flags & MAP_FIXED)) { 503 unsigned long host_start; 504 void *p; 505 506 host_len = len + offset - host_offset; 507 host_len = HOST_PAGE_ALIGN(host_len); 508 509 /* 510 * Note: we prefer to control the mapping address. It is 511 * especially important if qemu_host_page_size > 512 * qemu_real_host_page_size 513 */ 514 p = mmap(g2h_untagged(start), host_len, prot, 515 flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0); 516 if (p == MAP_FAILED) 517 goto fail; 518 /* update start so that it points to the file position at 'offset' */ 519 host_start = (unsigned long)p; 520 if (fd != -1) { 521 p = mmap(g2h_untagged(start), len, prot, 522 flags | MAP_FIXED, fd, host_offset); 523 if (p == MAP_FAILED) { 524 munmap(g2h_untagged(start), host_len); 525 goto fail; 526 } 527 host_start += offset - host_offset; 528 } 529 start = h2g(host_start); 530 } else { 531 if (start & ~TARGET_PAGE_MASK) { 532 errno = EINVAL; 533 goto fail; 534 } 535 end = start + len; 536 real_end = HOST_PAGE_ALIGN(end); 537 538 /* 539 * Test if requested memory area fits target address space 540 * It can fail only on 64-bit host with 32-bit target. 541 * On any other target/host host mmap() handles this error correctly. 542 */ 543 if (!guest_range_valid_untagged(start, len)) { 544 errno = EINVAL; 545 goto fail; 546 } 547 548 /* 549 * worst case: we cannot map the file because the offset is not 550 * aligned, so we read it 551 */ 552 if (fd != -1 && 553 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { 554 /* 555 * msync() won't work here, so we return an error if write is 556 * possible while it is a shared mapping 557 */ 558 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && 559 (prot & PROT_WRITE)) { 560 errno = EINVAL; 561 goto fail; 562 } 563 retaddr = target_mmap(start, len, prot | PROT_WRITE, 564 MAP_FIXED | MAP_PRIVATE | MAP_ANON, 565 -1, 0); 566 if (retaddr == -1) 567 goto fail; 568 if (pread(fd, g2h_untagged(start), len, offset) == -1) { 569 goto fail; 570 } 571 if (!(prot & PROT_WRITE)) { 572 ret = target_mprotect(start, len, prot); 573 assert(ret == 0); 574 } 575 goto the_end; 576 } 577 578 /* Reject the mapping if any page within the range is mapped */ 579 if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) { 580 errno = EINVAL; 581 goto fail; 582 } 583 584 /* handle the start of the mapping */ 585 if (start > real_start) { 586 if (real_end == real_start + qemu_host_page_size) { 587 /* one single host page */ 588 ret = mmap_frag(real_start, start, end, 589 prot, flags, fd, offset); 590 if (ret == -1) 591 goto fail; 592 goto the_end1; 593 } 594 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size, 595 prot, flags, fd, offset); 596 if (ret == -1) 597 goto fail; 598 real_start += qemu_host_page_size; 599 } 600 /* handle the end of the mapping */ 601 if (end < real_end) { 602 ret = mmap_frag(real_end - qemu_host_page_size, 603 real_end - qemu_host_page_size, end, 604 prot, flags, fd, 605 offset + real_end - qemu_host_page_size - start); 606 if (ret == -1) 607 goto fail; 608 real_end -= qemu_host_page_size; 609 } 610 611 /* map the middle (easier) */ 612 if (real_start < real_end) { 613 void *p; 614 unsigned long offset1; 615 if (flags & MAP_ANON) 616 offset1 = 0; 617 else 618 offset1 = offset + real_start - start; 619 p = mmap(g2h_untagged(real_start), real_end - real_start, 620 prot, flags, fd, offset1); 621 if (p == MAP_FAILED) 622 goto fail; 623 } 624 } 625 the_end1: 626 page_set_flags(start, start + len - 1, prot | PAGE_VALID); 627 the_end: 628 #ifdef DEBUG_MMAP 629 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start); 630 page_dump(stdout); 631 printf("\n"); 632 #endif 633 mmap_unlock(); 634 return start; 635 fail: 636 mmap_unlock(); 637 return -1; 638 } 639 640 void mmap_reserve(abi_ulong start, abi_ulong size) 641 { 642 abi_ulong real_start; 643 abi_ulong real_end; 644 abi_ulong addr; 645 abi_ulong end; 646 int prot; 647 648 real_start = start & qemu_host_page_mask; 649 real_end = HOST_PAGE_ALIGN(start + size); 650 end = start + size; 651 if (start > real_start) { 652 /* handle host page containing start */ 653 prot = 0; 654 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { 655 prot |= page_get_flags(addr); 656 } 657 if (real_end == real_start + qemu_host_page_size) { 658 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 659 prot |= page_get_flags(addr); 660 } 661 end = real_end; 662 } 663 if (prot != 0) { 664 real_start += qemu_host_page_size; 665 } 666 } 667 if (end < real_end) { 668 prot = 0; 669 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 670 prot |= page_get_flags(addr); 671 } 672 if (prot != 0) { 673 real_end -= qemu_host_page_size; 674 } 675 } 676 if (real_start != real_end) { 677 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE, 678 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); 679 } 680 } 681 682 int target_munmap(abi_ulong start, abi_ulong len) 683 { 684 abi_ulong end, real_start, real_end, addr; 685 int prot, ret; 686 687 #ifdef DEBUG_MMAP 688 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x" 689 TARGET_ABI_FMT_lx "\n", 690 start, len); 691 #endif 692 if (start & ~TARGET_PAGE_MASK) 693 return -EINVAL; 694 len = TARGET_PAGE_ALIGN(len); 695 if (len == 0) 696 return -EINVAL; 697 mmap_lock(); 698 end = start + len; 699 real_start = start & qemu_host_page_mask; 700 real_end = HOST_PAGE_ALIGN(end); 701 702 if (start > real_start) { 703 /* handle host page containing start */ 704 prot = 0; 705 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) { 706 prot |= page_get_flags(addr); 707 } 708 if (real_end == real_start + qemu_host_page_size) { 709 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 710 prot |= page_get_flags(addr); 711 } 712 end = real_end; 713 } 714 if (prot != 0) 715 real_start += qemu_host_page_size; 716 } 717 if (end < real_end) { 718 prot = 0; 719 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) { 720 prot |= page_get_flags(addr); 721 } 722 if (prot != 0) 723 real_end -= qemu_host_page_size; 724 } 725 726 ret = 0; 727 /* unmap what we can */ 728 if (real_start < real_end) { 729 if (reserved_va) { 730 mmap_reserve(real_start, real_end - real_start); 731 } else { 732 ret = munmap(g2h_untagged(real_start), real_end - real_start); 733 } 734 } 735 736 if (ret == 0) { 737 page_set_flags(start, start + len - 1, 0); 738 } 739 mmap_unlock(); 740 return ret; 741 } 742 743 int target_msync(abi_ulong start, abi_ulong len, int flags) 744 { 745 abi_ulong end; 746 747 if (start & ~TARGET_PAGE_MASK) 748 return -EINVAL; 749 len = TARGET_PAGE_ALIGN(len); 750 end = start + len; 751 if (end < start) 752 return -EINVAL; 753 if (end == start) 754 return 0; 755 756 start &= qemu_host_page_mask; 757 return msync(g2h_untagged(start), end - start, flags); 758 } 759