xref: /openbmc/qemu/bsd-user/mmap.c (revision 1faa437d)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 - 2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "exec/page-protection.h"
21 
22 #include "qemu.h"
23 
24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25 static __thread int mmap_lock_count;
26 
27 void mmap_lock(void)
28 {
29     if (mmap_lock_count++ == 0) {
30         pthread_mutex_lock(&mmap_mutex);
31     }
32 }
33 
34 void mmap_unlock(void)
35 {
36     assert(mmap_lock_count > 0);
37     if (--mmap_lock_count == 0) {
38         pthread_mutex_unlock(&mmap_mutex);
39     }
40 }
41 
42 bool have_mmap_lock(void)
43 {
44     return mmap_lock_count > 0 ? true : false;
45 }
46 
47 /* Grab lock to make sure things are in a consistent state after fork().  */
48 void mmap_fork_start(void)
49 {
50     if (mmap_lock_count)
51         abort();
52     pthread_mutex_lock(&mmap_mutex);
53 }
54 
55 void mmap_fork_end(int child)
56 {
57     if (child)
58         pthread_mutex_init(&mmap_mutex, NULL);
59     else
60         pthread_mutex_unlock(&mmap_mutex);
61 }
62 
63 /* NOTE: all the constants are the HOST ones, but addresses are target. */
64 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
65 {
66     abi_ulong end, host_start, host_end, addr;
67     int prot1, ret;
68 
69     qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx
70                   " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
71                   prot & PROT_READ ? 'r' : '-',
72                   prot & PROT_WRITE ? 'w' : '-',
73                   prot & PROT_EXEC ? 'x' : '-');
74     if ((start & ~TARGET_PAGE_MASK) != 0)
75         return -EINVAL;
76     len = TARGET_PAGE_ALIGN(len);
77     end = start + len;
78     if (end < start)
79         return -EINVAL;
80     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
81     if (len == 0)
82         return 0;
83 
84     mmap_lock();
85     host_start = start & qemu_host_page_mask;
86     host_end = HOST_PAGE_ALIGN(end);
87     if (start > host_start) {
88         /* handle host page containing start */
89         prot1 = prot;
90         for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
91             prot1 |= page_get_flags(addr);
92         }
93         if (host_end == host_start + qemu_host_page_size) {
94             for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
95                 prot1 |= page_get_flags(addr);
96             }
97             end = host_end;
98         }
99         ret = mprotect(g2h_untagged(host_start),
100                        qemu_host_page_size, prot1 & PAGE_RWX);
101         if (ret != 0)
102             goto error;
103         host_start += qemu_host_page_size;
104     }
105     if (end < host_end) {
106         prot1 = prot;
107         for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
108             prot1 |= page_get_flags(addr);
109         }
110         ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
111                        qemu_host_page_size, prot1 & PAGE_RWX);
112         if (ret != 0)
113             goto error;
114         host_end -= qemu_host_page_size;
115     }
116 
117     /* handle the pages in the middle */
118     if (host_start < host_end) {
119         ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
120         if (ret != 0)
121             goto error;
122     }
123     page_set_flags(start, start + len - 1, prot | PAGE_VALID);
124     mmap_unlock();
125     return 0;
126 error:
127     mmap_unlock();
128     return ret;
129 }
130 
131 /*
132  * Perform a pread on behalf of target_mmap.  We can reach EOF, we can be
133  * interrupted by signals, and in general there's no good error return path.
134  * If @zero, zero the rest of the block at EOF.
135  * Return true on success.
136  */
137 static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero)
138 {
139     while (1) {
140         ssize_t r = pread(fd, p, len, offset);
141 
142         if (likely(r == len)) {
143             /* Complete */
144             return true;
145         }
146         if (r == 0) {
147             /* EOF */
148             if (zero) {
149                 memset(p, 0, len);
150             }
151             return true;
152         }
153         if (r > 0) {
154             /* Short read */
155             p += r;
156             len -= r;
157             offset += r;
158         } else if (errno != EINTR) {
159             /* Error */
160             return false;
161         }
162     }
163 }
164 
165 /*
166  * map an incomplete host page
167  *
168  * mmap_frag can be called with a valid fd, if flags doesn't contain one of
169  * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we
170  * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be
171  * added.
172  *
173  * * If fd is valid (not -1) we want to map the pages with MAP_ANON.
174  * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it
175  *   will be rejected.  See kern_mmap's enforcing of constraints for MAP_GUARD
176  *   in sys/vm/vm_mmap.c.
177  * * If flags contains MAP_ANON it doesn't matter if we add it or not.
178  * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't
179  *   matter if we add it or not either. See enforcing of constraints for
180  *   MAP_STACK in kern_mmap.
181  *
182  * Don't add MAP_ANON for the flags that use fd == -1 without specifying the
183  * flags directly, with the assumption that future flags that require fd == -1
184  * will also not require MAP_ANON.
185  */
186 static int mmap_frag(abi_ulong real_start,
187                      abi_ulong start, abi_ulong end,
188                      int prot, int flags, int fd, abi_ulong offset)
189 {
190     abi_ulong real_end, addr;
191     void *host_start;
192     int prot1, prot_new;
193 
194     real_end = real_start + qemu_host_page_size;
195     host_start = g2h_untagged(real_start);
196 
197     /* get the protection of the target pages outside the mapping */
198     prot1 = 0;
199     for (addr = real_start; addr < real_end; addr++) {
200         if (addr < start || addr >= end)
201             prot1 |= page_get_flags(addr);
202     }
203 
204     if (prot1 == 0) {
205         /* no page was there, so we allocate one. See also above. */
206         void *p = mmap(host_start, qemu_host_page_size, prot,
207                        flags | ((fd != -1) ? MAP_ANON : 0), -1, 0);
208         if (p == MAP_FAILED)
209             return -1;
210         prot1 = prot;
211     }
212     prot1 &= PAGE_RWX;
213 
214     prot_new = prot | prot1;
215     if (fd != -1) {
216         /* msync() won't work here, so we return an error if write is
217            possible while it is a shared mapping */
218         if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
219             (prot & PROT_WRITE))
220             return -1;
221 
222         /* adjust protection to be able to read */
223         if (!(prot1 & PROT_WRITE))
224             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
225 
226         /* read the corresponding file data */
227         if (!mmap_pread(fd, g2h_untagged(start), end - start, offset, true)) {
228             return -1;
229         }
230 
231         /* put final protection */
232         if (prot_new != (prot1 | PROT_WRITE))
233             mprotect(host_start, qemu_host_page_size, prot_new);
234     } else {
235         if (prot_new != prot1) {
236             mprotect(host_start, qemu_host_page_size, prot_new);
237         }
238         if (prot_new & PROT_WRITE) {
239             memset(g2h_untagged(start), 0, end - start);
240         }
241     }
242     return 0;
243 }
244 
245 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
246 # define TASK_UNMAPPED_BASE  (1ul << 38)
247 #else
248 # define TASK_UNMAPPED_BASE  0x40000000
249 #endif
250 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
251 
252 /*
253  * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
254  * address space.
255  */
256 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
257                                         abi_ulong alignment)
258 {
259     abi_ulong ret;
260 
261     ret = page_find_range_empty(start, reserved_va, size, alignment);
262     if (ret == -1 && start > TARGET_PAGE_SIZE) {
263         /* Restart at the beginning of the address space. */
264         ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1,
265                                     size, alignment);
266     }
267 
268     return ret;
269 }
270 
271 /*
272  * Find and reserve a free memory area of size 'size'. The search
273  * starts at 'start'.
274  * It must be called with mmap_lock() held.
275  * Return -1 if error.
276  */
277 static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
278                                        abi_ulong alignment)
279 {
280     void *ptr, *prev;
281     abi_ulong addr;
282     int flags;
283     int wrapped, repeat;
284 
285     /* If 'start' == 0, then a default start address is used. */
286     if (start == 0) {
287         start = mmap_next_start;
288     } else {
289         start &= qemu_host_page_mask;
290     }
291 
292     size = HOST_PAGE_ALIGN(size);
293 
294     if (reserved_va) {
295         return mmap_find_vma_reserved(start, size,
296             (alignment != 0 ? 1 << alignment :
297              MAX(qemu_host_page_size, TARGET_PAGE_SIZE)));
298     }
299 
300     addr = start;
301     wrapped = repeat = 0;
302     prev = 0;
303     flags = MAP_ANON | MAP_PRIVATE;
304     if (alignment != 0) {
305         flags |= MAP_ALIGNED(alignment);
306     }
307 
308     for (;; prev = ptr) {
309         /*
310          * Reserve needed memory area to avoid a race.
311          * It should be discarded using:
312          *  - mmap() with MAP_FIXED flag
313          *  - mremap() with MREMAP_FIXED flag
314          *  - shmat() with SHM_REMAP flag
315          */
316         ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
317                    flags, -1, 0);
318 
319         /* ENOMEM, if host address space has no memory */
320         if (ptr == MAP_FAILED) {
321             return (abi_ulong)-1;
322         }
323 
324         /*
325          * Count the number of sequential returns of the same address.
326          * This is used to modify the search algorithm below.
327          */
328         repeat = (ptr == prev ? repeat + 1 : 0);
329 
330         if (h2g_valid(ptr + size - 1)) {
331             addr = h2g(ptr);
332 
333             if ((addr & ~TARGET_PAGE_MASK) == 0) {
334                 /* Success.  */
335                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
336                     mmap_next_start = addr + size;
337                 }
338                 return addr;
339             }
340 
341             /* The address is not properly aligned for the target.  */
342             switch (repeat) {
343             case 0:
344                 /*
345                  * Assume the result that the kernel gave us is the
346                  * first with enough free space, so start again at the
347                  * next higher target page.
348                  */
349                 addr = TARGET_PAGE_ALIGN(addr);
350                 break;
351             case 1:
352                 /*
353                  * Sometimes the kernel decides to perform the allocation
354                  * at the top end of memory instead.
355                  */
356                 addr &= TARGET_PAGE_MASK;
357                 break;
358             case 2:
359                 /* Start over at low memory.  */
360                 addr = 0;
361                 break;
362             default:
363                 /* Fail.  This unaligned block must the last.  */
364                 addr = -1;
365                 break;
366             }
367         } else {
368             /*
369              * Since the result the kernel gave didn't fit, start
370              * again at low memory.  If any repetition, fail.
371              */
372             addr = (repeat ? -1 : 0);
373         }
374 
375         /* Unmap and try again.  */
376         munmap(ptr, size);
377 
378         /* ENOMEM if we checked the whole of the target address space.  */
379         if (addr == (abi_ulong)-1) {
380             return (abi_ulong)-1;
381         } else if (addr == 0) {
382             if (wrapped) {
383                 return (abi_ulong)-1;
384             }
385             wrapped = 1;
386             /*
387              * Don't actually use 0 when wrapping, instead indicate
388              * that we'd truly like an allocation in low memory.
389              */
390             addr = TARGET_PAGE_SIZE;
391         } else if (wrapped && addr >= start) {
392             return (abi_ulong)-1;
393         }
394     }
395 }
396 
397 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
398 {
399     return mmap_find_vma_aligned(start, size, 0);
400 }
401 
402 /* NOTE: all the constants are the HOST ones */
403 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
404                      int flags, int fd, off_t offset)
405 {
406     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
407 
408     mmap_lock();
409     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
410         qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx
411                  " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
412                  start, len,
413                  prot & PROT_READ ? 'r' : '-',
414                  prot & PROT_WRITE ? 'w' : '-',
415                  prot & PROT_EXEC ? 'x' : '-');
416         if (flags & MAP_ALIGNMENT_MASK) {
417             qemu_log("MAP_ALIGNED(%u) ",
418                      (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
419         }
420         if (flags & MAP_GUARD) {
421             qemu_log("MAP_GUARD ");
422         }
423         if (flags & MAP_FIXED) {
424             qemu_log("MAP_FIXED ");
425         }
426         if (flags & MAP_ANON) {
427             qemu_log("MAP_ANON ");
428         }
429         if (flags & MAP_EXCL) {
430             qemu_log("MAP_EXCL ");
431         }
432         if (flags & MAP_PRIVATE) {
433             qemu_log("MAP_PRIVATE ");
434         }
435         if (flags & MAP_SHARED) {
436             qemu_log("MAP_SHARED ");
437         }
438         if (flags & MAP_NOCORE) {
439             qemu_log("MAP_NOCORE ");
440         }
441         if (flags & MAP_STACK) {
442             qemu_log("MAP_STACK ");
443         }
444         qemu_log("fd=%d offset=0x%lx\n", fd, offset);
445     }
446 
447     if ((flags & MAP_ANON) && fd != -1) {
448         errno = EINVAL;
449         goto fail;
450     }
451     if (flags & MAP_STACK) {
452         if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
453                     (PROT_READ | PROT_WRITE))) {
454             errno = EINVAL;
455             goto fail;
456         }
457     }
458     if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
459         offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
460         /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
461         MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
462         errno = EINVAL;
463         goto fail;
464     }
465 
466     if (offset & ~TARGET_PAGE_MASK) {
467         errno = EINVAL;
468         goto fail;
469     }
470 
471     if (len == 0) {
472         errno = EINVAL;
473         goto fail;
474     }
475 
476     /* Check for overflows */
477     len = TARGET_PAGE_ALIGN(len);
478     if (len == 0) {
479         errno = ENOMEM;
480         goto fail;
481     }
482 
483     real_start = start & qemu_host_page_mask;
484     host_offset = offset & qemu_host_page_mask;
485 
486     /*
487      * If the user is asking for the kernel to find a location, do that
488      * before we truncate the length for mapping files below.
489      */
490     if (!(flags & MAP_FIXED)) {
491         host_len = len + offset - host_offset;
492         host_len = HOST_PAGE_ALIGN(host_len);
493         if ((flags & MAP_ALIGNMENT_MASK) != 0)
494             start = mmap_find_vma_aligned(real_start, host_len,
495                 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
496         else
497             start = mmap_find_vma(real_start, host_len);
498         if (start == (abi_ulong)-1) {
499             errno = ENOMEM;
500             goto fail;
501         }
502     }
503 
504     /*
505      * When mapping files into a memory area larger than the file, accesses
506      * to pages beyond the file size will cause a SIGBUS.
507      *
508      * For example, if mmaping a file of 100 bytes on a host with 4K pages
509      * emulating a target with 8K pages, the target expects to be able to
510      * access the first 8K. But the host will trap us on any access beyond
511      * 4K.
512      *
513      * When emulating a target with a larger page-size than the hosts, we
514      * may need to truncate file maps at EOF and add extra anonymous pages
515      * up to the targets page boundary.
516      */
517 
518     if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
519         struct stat sb;
520 
521         if (fstat(fd, &sb) == -1) {
522             goto fail;
523         }
524 
525         /* Are we trying to create a map beyond EOF?.  */
526         if (offset + len > sb.st_size) {
527             /*
528              * If so, truncate the file map at eof aligned with
529              * the hosts real pagesize. Additional anonymous maps
530              * will be created beyond EOF.
531              */
532             len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
533         }
534     }
535 
536     if (!(flags & MAP_FIXED)) {
537         unsigned long host_start;
538         void *p;
539 
540         host_len = len + offset - host_offset;
541         host_len = HOST_PAGE_ALIGN(host_len);
542 
543         /*
544          * Note: we prefer to control the mapping address. It is
545          * especially important if qemu_host_page_size >
546          * qemu_real_host_page_size
547          */
548         p = mmap(g2h_untagged(start), host_len, prot,
549                  flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0);
550         if (p == MAP_FAILED)
551             goto fail;
552         /* update start so that it points to the file position at 'offset' */
553         host_start = (unsigned long)p;
554         if (fd != -1) {
555             p = mmap(g2h_untagged(start), len, prot,
556                      flags | MAP_FIXED, fd, host_offset);
557             if (p == MAP_FAILED) {
558                 munmap(g2h_untagged(start), host_len);
559                 goto fail;
560             }
561             host_start += offset - host_offset;
562         }
563         start = h2g(host_start);
564     } else {
565         if (start & ~TARGET_PAGE_MASK) {
566             errno = EINVAL;
567             goto fail;
568         }
569         end = start + len;
570         real_end = HOST_PAGE_ALIGN(end);
571 
572         /*
573          * Test if requested memory area fits target address space
574          * It can fail only on 64-bit host with 32-bit target.
575          * On any other target/host host mmap() handles this error correctly.
576          */
577         if (!guest_range_valid_untagged(start, len)) {
578             errno = EINVAL;
579             goto fail;
580         }
581 
582         /*
583          * worst case: we cannot map the file because the offset is not
584          * aligned, so we read it
585          */
586         if (fd != -1 &&
587             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
588             /*
589              * msync() won't work here, so we return an error if write is
590              * possible while it is a shared mapping
591              */
592             if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
593                 (prot & PROT_WRITE)) {
594                 errno = EINVAL;
595                 goto fail;
596             }
597             retaddr = target_mmap(start, len, prot | PROT_WRITE,
598                                   MAP_FIXED | MAP_PRIVATE | MAP_ANON,
599                                   -1, 0);
600             if (retaddr == -1)
601                 goto fail;
602             if (!mmap_pread(fd, g2h_untagged(start), len, offset, false)) {
603                 goto fail;
604             }
605             if (!(prot & PROT_WRITE)) {
606                 ret = target_mprotect(start, len, prot);
607                 assert(ret == 0);
608             }
609             goto the_end;
610         }
611 
612         /* Reject the mapping if any page within the range is mapped */
613         if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
614             errno = EINVAL;
615             goto fail;
616         }
617 
618         /* handle the start of the mapping */
619         if (start > real_start) {
620             if (real_end == real_start + qemu_host_page_size) {
621                 /* one single host page */
622                 ret = mmap_frag(real_start, start, end,
623                                 prot, flags, fd, offset);
624                 if (ret == -1)
625                     goto fail;
626                 goto the_end1;
627             }
628             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
629                             prot, flags, fd, offset);
630             if (ret == -1)
631                 goto fail;
632             real_start += qemu_host_page_size;
633         }
634         /* handle the end of the mapping */
635         if (end < real_end) {
636             ret = mmap_frag(real_end - qemu_host_page_size,
637                             real_end - qemu_host_page_size, end,
638                             prot, flags, fd,
639                             offset + real_end - qemu_host_page_size - start);
640             if (ret == -1)
641                 goto fail;
642             real_end -= qemu_host_page_size;
643         }
644 
645         /* map the middle (easier) */
646         if (real_start < real_end) {
647             void *p;
648             unsigned long offset1;
649             if (flags & MAP_ANON)
650                 offset1 = 0;
651             else
652                 offset1 = offset + real_start - start;
653             p = mmap(g2h_untagged(real_start), real_end - real_start,
654                      prot, flags, fd, offset1);
655             if (p == MAP_FAILED)
656                 goto fail;
657         }
658     }
659  the_end1:
660     page_set_flags(start, start + len - 1, prot | PAGE_VALID);
661  the_end:
662 #ifdef DEBUG_MMAP
663     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
664     page_dump(stdout);
665     printf("\n");
666 #endif
667     mmap_unlock();
668     return start;
669 fail:
670     mmap_unlock();
671     return -1;
672 }
673 
674 void mmap_reserve(abi_ulong start, abi_ulong size)
675 {
676     abi_ulong real_start;
677     abi_ulong real_end;
678     abi_ulong addr;
679     abi_ulong end;
680     int prot;
681 
682     real_start = start & qemu_host_page_mask;
683     real_end = HOST_PAGE_ALIGN(start + size);
684     end = start + size;
685     if (start > real_start) {
686         /* handle host page containing start */
687         prot = 0;
688         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
689             prot |= page_get_flags(addr);
690         }
691         if (real_end == real_start + qemu_host_page_size) {
692             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
693                 prot |= page_get_flags(addr);
694             }
695             end = real_end;
696         }
697         if (prot != 0) {
698             real_start += qemu_host_page_size;
699         }
700     }
701     if (end < real_end) {
702         prot = 0;
703         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
704             prot |= page_get_flags(addr);
705         }
706         if (prot != 0) {
707             real_end -= qemu_host_page_size;
708         }
709     }
710     if (real_start != real_end) {
711         mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
712                  MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
713     }
714 }
715 
716 int target_munmap(abi_ulong start, abi_ulong len)
717 {
718     abi_ulong end, real_start, real_end, addr;
719     int prot, ret;
720 
721 #ifdef DEBUG_MMAP
722     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
723            TARGET_ABI_FMT_lx "\n",
724            start, len);
725 #endif
726     if (start & ~TARGET_PAGE_MASK)
727         return -EINVAL;
728     len = TARGET_PAGE_ALIGN(len);
729     if (len == 0)
730         return -EINVAL;
731     mmap_lock();
732     end = start + len;
733     real_start = start & qemu_host_page_mask;
734     real_end = HOST_PAGE_ALIGN(end);
735 
736     if (start > real_start) {
737         /* handle host page containing start */
738         prot = 0;
739         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
740             prot |= page_get_flags(addr);
741         }
742         if (real_end == real_start + qemu_host_page_size) {
743             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
744                 prot |= page_get_flags(addr);
745             }
746             end = real_end;
747         }
748         if (prot != 0)
749             real_start += qemu_host_page_size;
750     }
751     if (end < real_end) {
752         prot = 0;
753         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
754             prot |= page_get_flags(addr);
755         }
756         if (prot != 0)
757             real_end -= qemu_host_page_size;
758     }
759 
760     ret = 0;
761     /* unmap what we can */
762     if (real_start < real_end) {
763         if (reserved_va) {
764             mmap_reserve(real_start, real_end - real_start);
765         } else {
766             ret = munmap(g2h_untagged(real_start), real_end - real_start);
767         }
768     }
769 
770     if (ret == 0) {
771         page_set_flags(start, start + len - 1, 0);
772     }
773     mmap_unlock();
774     return ret;
775 }
776 
777 int target_msync(abi_ulong start, abi_ulong len, int flags)
778 {
779     abi_ulong end;
780 
781     if (start & ~TARGET_PAGE_MASK)
782         return -EINVAL;
783     len = TARGET_PAGE_ALIGN(len);
784     end = start + len;
785     if (end < start)
786         return -EINVAL;
787     if (end == start)
788         return 0;
789 
790     start &= qemu_host_page_mask;
791     return msync(g2h_untagged(start), end - start, flags);
792 }
793