xref: /openbmc/qemu/bsd-user/mmap.c (revision 948516a3fac0bdd47eb127fe1a86148ed86d5c65)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 - 2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu.h"
22 #include "qemu-common.h"
23 
24 //#define DEBUG_MMAP
25 
26 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
27 static __thread int mmap_lock_count;
28 
29 void mmap_lock(void)
30 {
31     if (mmap_lock_count++ == 0) {
32         pthread_mutex_lock(&mmap_mutex);
33     }
34 }
35 
36 void mmap_unlock(void)
37 {
38     if (--mmap_lock_count == 0) {
39         pthread_mutex_unlock(&mmap_mutex);
40     }
41 }
42 
43 bool have_mmap_lock(void)
44 {
45     return mmap_lock_count > 0 ? true : false;
46 }
47 
48 /* Grab lock to make sure things are in a consistent state after fork().  */
49 void mmap_fork_start(void)
50 {
51     if (mmap_lock_count)
52         abort();
53     pthread_mutex_lock(&mmap_mutex);
54 }
55 
56 void mmap_fork_end(int child)
57 {
58     if (child)
59         pthread_mutex_init(&mmap_mutex, NULL);
60     else
61         pthread_mutex_unlock(&mmap_mutex);
62 }
63 
64 /* NOTE: all the constants are the HOST ones, but addresses are target. */
65 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
66 {
67     abi_ulong end, host_start, host_end, addr;
68     int prot1, ret;
69 
70 #ifdef DEBUG_MMAP
71     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
72            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
73            prot & PROT_READ ? 'r' : '-',
74            prot & PROT_WRITE ? 'w' : '-',
75            prot & PROT_EXEC ? 'x' : '-');
76 #endif
77 
78     if ((start & ~TARGET_PAGE_MASK) != 0)
79         return -EINVAL;
80     len = TARGET_PAGE_ALIGN(len);
81     end = start + len;
82     if (end < start)
83         return -EINVAL;
84     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
85     if (len == 0)
86         return 0;
87 
88     mmap_lock();
89     host_start = start & qemu_host_page_mask;
90     host_end = HOST_PAGE_ALIGN(end);
91     if (start > host_start) {
92         /* handle host page containing start */
93         prot1 = prot;
94         for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
95             prot1 |= page_get_flags(addr);
96         }
97         if (host_end == host_start + qemu_host_page_size) {
98             for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
99                 prot1 |= page_get_flags(addr);
100             }
101             end = host_end;
102         }
103         ret = mprotect(g2h_untagged(host_start),
104                        qemu_host_page_size, prot1 & PAGE_BITS);
105         if (ret != 0)
106             goto error;
107         host_start += qemu_host_page_size;
108     }
109     if (end < host_end) {
110         prot1 = prot;
111         for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
112             prot1 |= page_get_flags(addr);
113         }
114         ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
115                        qemu_host_page_size, prot1 & PAGE_BITS);
116         if (ret != 0)
117             goto error;
118         host_end -= qemu_host_page_size;
119     }
120 
121     /* handle the pages in the middle */
122     if (host_start < host_end) {
123         ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
124         if (ret != 0)
125             goto error;
126     }
127     page_set_flags(start, start + len, prot | PAGE_VALID);
128     mmap_unlock();
129     return 0;
130 error:
131     mmap_unlock();
132     return ret;
133 }
134 
135 /* map an incomplete host page */
136 static int mmap_frag(abi_ulong real_start,
137                      abi_ulong start, abi_ulong end,
138                      int prot, int flags, int fd, abi_ulong offset)
139 {
140     abi_ulong real_end, addr;
141     void *host_start;
142     int prot1, prot_new;
143 
144     real_end = real_start + qemu_host_page_size;
145     host_start = g2h_untagged(real_start);
146 
147     /* get the protection of the target pages outside the mapping */
148     prot1 = 0;
149     for (addr = real_start; addr < real_end; addr++) {
150         if (addr < start || addr >= end)
151             prot1 |= page_get_flags(addr);
152     }
153 
154     if (prot1 == 0) {
155         /* no page was there, so we allocate one */
156         void *p = mmap(host_start, qemu_host_page_size, prot,
157                        flags | MAP_ANON, -1, 0);
158         if (p == MAP_FAILED)
159             return -1;
160         prot1 = prot;
161     }
162     prot1 &= PAGE_BITS;
163 
164     prot_new = prot | prot1;
165     if (!(flags & MAP_ANON)) {
166         /* msync() won't work here, so we return an error if write is
167            possible while it is a shared mapping */
168         if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
169             (prot & PROT_WRITE))
170             return -1;
171 
172         /* adjust protection to be able to read */
173         if (!(prot1 & PROT_WRITE))
174             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
175 
176         /* read the corresponding file data */
177         pread(fd, g2h_untagged(start), end - start, offset);
178 
179         /* put final protection */
180         if (prot_new != (prot1 | PROT_WRITE))
181             mprotect(host_start, qemu_host_page_size, prot_new);
182     } else {
183         if (prot_new != prot1) {
184             mprotect(host_start, qemu_host_page_size, prot_new);
185         }
186         if (prot_new & PROT_WRITE) {
187             memset(g2h_untagged(start), 0, end - start);
188         }
189     }
190     return 0;
191 }
192 
193 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
194 # define TASK_UNMAPPED_BASE  (1ul << 38)
195 #else
196 # define TASK_UNMAPPED_BASE  0x40000000
197 #endif
198 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
199 
200 unsigned long last_brk;
201 
202 /*
203  * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
204  * address space.
205  */
206 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
207                                         abi_ulong alignment)
208 {
209     abi_ulong addr;
210     abi_ulong end_addr;
211     int prot;
212     int looped = 0;
213 
214     if (size > reserved_va) {
215         return (abi_ulong)-1;
216     }
217 
218     size = HOST_PAGE_ALIGN(size) + alignment;
219     end_addr = start + size;
220     if (end_addr > reserved_va) {
221         end_addr = reserved_va;
222     }
223     addr = end_addr - qemu_host_page_size;
224 
225     while (1) {
226         if (addr > end_addr) {
227             if (looped) {
228                 return (abi_ulong)-1;
229             }
230             end_addr = reserved_va;
231             addr = end_addr - qemu_host_page_size;
232             looped = 1;
233             continue;
234         }
235         prot = page_get_flags(addr);
236         if (prot) {
237             end_addr = addr;
238         }
239         if (end_addr - addr >= size) {
240             break;
241         }
242         addr -= qemu_host_page_size;
243     }
244 
245     if (start == mmap_next_start) {
246         mmap_next_start = addr;
247     }
248     /* addr is sufficiently low to align it up */
249     if (alignment != 0) {
250         addr = (addr + alignment) & ~(alignment - 1);
251     }
252     return addr;
253 }
254 
255 /*
256  * Find and reserve a free memory area of size 'size'. The search
257  * starts at 'start'.
258  * It must be called with mmap_lock() held.
259  * Return -1 if error.
260  */
261 static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
262                                        abi_ulong alignment)
263 {
264     void *ptr, *prev;
265     abi_ulong addr;
266     int flags;
267     int wrapped, repeat;
268 
269     /* If 'start' == 0, then a default start address is used. */
270     if (start == 0) {
271         start = mmap_next_start;
272     } else {
273         start &= qemu_host_page_mask;
274     }
275 
276     size = HOST_PAGE_ALIGN(size);
277 
278     if (reserved_va) {
279         return mmap_find_vma_reserved(start, size,
280             (alignment != 0 ? 1 << alignment : 0));
281     }
282 
283     addr = start;
284     wrapped = repeat = 0;
285     prev = 0;
286     flags = MAP_ANONYMOUS | MAP_PRIVATE;
287 #ifdef MAP_ALIGNED
288     if (alignment != 0) {
289         flags |= MAP_ALIGNED(alignment);
290     }
291 #else
292     /* XXX TODO */
293 #endif
294 
295     for (;; prev = ptr) {
296         /*
297          * Reserve needed memory area to avoid a race.
298          * It should be discarded using:
299          *  - mmap() with MAP_FIXED flag
300          *  - mremap() with MREMAP_FIXED flag
301          *  - shmat() with SHM_REMAP flag
302          */
303         ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
304                    flags, -1, 0);
305 
306         /* ENOMEM, if host address space has no memory */
307         if (ptr == MAP_FAILED) {
308             return (abi_ulong)-1;
309         }
310 
311         /*
312          * Count the number of sequential returns of the same address.
313          * This is used to modify the search algorithm below.
314          */
315         repeat = (ptr == prev ? repeat + 1 : 0);
316 
317         if (h2g_valid(ptr + size - 1)) {
318             addr = h2g(ptr);
319 
320             if ((addr & ~TARGET_PAGE_MASK) == 0) {
321                 /* Success.  */
322                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
323                     mmap_next_start = addr + size;
324                 }
325                 return addr;
326             }
327 
328             /* The address is not properly aligned for the target.  */
329             switch (repeat) {
330             case 0:
331                 /*
332                  * Assume the result that the kernel gave us is the
333                  * first with enough free space, so start again at the
334                  * next higher target page.
335                  */
336                 addr = TARGET_PAGE_ALIGN(addr);
337                 break;
338             case 1:
339                 /*
340                  * Sometimes the kernel decides to perform the allocation
341                  * at the top end of memory instead.
342                  */
343                 addr &= TARGET_PAGE_MASK;
344                 break;
345             case 2:
346                 /* Start over at low memory.  */
347                 addr = 0;
348                 break;
349             default:
350                 /* Fail.  This unaligned block must the last.  */
351                 addr = -1;
352                 break;
353             }
354         } else {
355             /*
356              * Since the result the kernel gave didn't fit, start
357              * again at low memory.  If any repetition, fail.
358              */
359             addr = (repeat ? -1 : 0);
360         }
361 
362         /* Unmap and try again.  */
363         munmap(ptr, size);
364 
365         /* ENOMEM if we checked the whole of the target address space.  */
366         if (addr == (abi_ulong)-1) {
367             return (abi_ulong)-1;
368         } else if (addr == 0) {
369             if (wrapped) {
370                 return (abi_ulong)-1;
371             }
372             wrapped = 1;
373             /*
374              * Don't actually use 0 when wrapping, instead indicate
375              * that we'd truly like an allocation in low memory.
376              */
377             addr = TARGET_PAGE_SIZE;
378         } else if (wrapped && addr >= start) {
379             return (abi_ulong)-1;
380         }
381     }
382 }
383 
384 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
385 {
386     return mmap_find_vma_aligned(start, size, 0);
387 }
388 
389 /* NOTE: all the constants are the HOST ones */
390 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
391                      int flags, int fd, off_t offset)
392 {
393     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
394 
395     mmap_lock();
396 #ifdef DEBUG_MMAP
397     {
398         printf("mmap: start=0x" TARGET_ABI_FMT_lx
399                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
400                start, len,
401                prot & PROT_READ ? 'r' : '-',
402                prot & PROT_WRITE ? 'w' : '-',
403                prot & PROT_EXEC ? 'x' : '-');
404         if (flags & MAP_ALIGNMENT_MASK) {
405             printf("MAP_ALIGNED(%u) ", (flags & MAP_ALIGNMENT_MASK)
406                     >> MAP_ALIGNMENT_SHIFT);
407         }
408 #if MAP_GUARD
409         if (flags & MAP_GUARD) {
410             printf("MAP_GUARD ");
411         }
412 #endif
413         if (flags & MAP_FIXED) {
414             printf("MAP_FIXED ");
415         }
416         if (flags & MAP_ANONYMOUS) {
417             printf("MAP_ANON ");
418         }
419 #ifdef MAP_EXCL
420         if (flags & MAP_EXCL) {
421             printf("MAP_EXCL ");
422         }
423 #endif
424         if (flags & MAP_PRIVATE) {
425             printf("MAP_PRIVATE ");
426         }
427         if (flags & MAP_SHARED) {
428             printf("MAP_SHARED ");
429         }
430         if (flags & MAP_NOCORE) {
431             printf("MAP_NOCORE ");
432         }
433 #ifdef MAP_STACK
434         if (flags & MAP_STACK) {
435             printf("MAP_STACK ");
436         }
437 #endif
438         printf("fd=%d offset=0x%llx\n", fd, offset);
439     }
440 #endif
441 
442     if ((flags & MAP_ANONYMOUS) && fd != -1) {
443         errno = EINVAL;
444         goto fail;
445     }
446 #ifdef MAP_STACK
447     if (flags & MAP_STACK) {
448         if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
449                     (PROT_READ | PROT_WRITE))) {
450             errno = EINVAL;
451             goto fail;
452         }
453     }
454 #endif /* MAP_STACK */
455 #ifdef MAP_GUARD
456     if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
457         offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
458         /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
459         MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
460         errno = EINVAL;
461         goto fail;
462     }
463 #endif
464 
465     if (offset & ~TARGET_PAGE_MASK) {
466         errno = EINVAL;
467         goto fail;
468     }
469 
470     len = TARGET_PAGE_ALIGN(len);
471     if (len == 0) {
472         errno = EINVAL;
473         goto fail;
474     }
475     real_start = start & qemu_host_page_mask;
476     host_offset = offset & qemu_host_page_mask;
477 
478     /*
479      * If the user is asking for the kernel to find a location, do that
480      * before we truncate the length for mapping files below.
481      */
482     if (!(flags & MAP_FIXED)) {
483         host_len = len + offset - host_offset;
484         host_len = HOST_PAGE_ALIGN(host_len);
485         if ((flags & MAP_ALIGNMENT_MASK) != 0)
486             start = mmap_find_vma_aligned(real_start, host_len,
487                 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
488         else
489             start = mmap_find_vma(real_start, host_len);
490         if (start == (abi_ulong)-1) {
491             errno = ENOMEM;
492             goto fail;
493         }
494     }
495 
496     /*
497      * When mapping files into a memory area larger than the file, accesses
498      * to pages beyond the file size will cause a SIGBUS.
499      *
500      * For example, if mmaping a file of 100 bytes on a host with 4K pages
501      * emulating a target with 8K pages, the target expects to be able to
502      * access the first 8K. But the host will trap us on any access beyond
503      * 4K.
504      *
505      * When emulating a target with a larger page-size than the hosts, we
506      * may need to truncate file maps at EOF and add extra anonymous pages
507      * up to the targets page boundary.
508      */
509 
510     if ((qemu_real_host_page_size < qemu_host_page_size) && fd != -1) {
511         struct stat sb;
512 
513         if (fstat(fd, &sb) == -1) {
514             goto fail;
515         }
516 
517         /* Are we trying to create a map beyond EOF?.  */
518         if (offset + len > sb.st_size) {
519             /*
520              * If so, truncate the file map at eof aligned with
521              * the hosts real pagesize. Additional anonymous maps
522              * will be created beyond EOF.
523              */
524             len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
525         }
526     }
527 
528     if (!(flags & MAP_FIXED)) {
529         unsigned long host_start;
530         void *p;
531 
532         host_len = len + offset - host_offset;
533         host_len = HOST_PAGE_ALIGN(host_len);
534 
535         /*
536          * Note: we prefer to control the mapping address. It is
537          * especially important if qemu_host_page_size >
538          * qemu_real_host_page_size
539          */
540         p = mmap(g2h_untagged(start), host_len, prot,
541                  flags | MAP_FIXED | ((fd != -1) ? MAP_ANONYMOUS : 0), -1, 0);
542         if (p == MAP_FAILED)
543             goto fail;
544         /* update start so that it points to the file position at 'offset' */
545         host_start = (unsigned long)p;
546         if (fd != -1) {
547             p = mmap(g2h_untagged(start), len, prot,
548                      flags | MAP_FIXED, fd, host_offset);
549             if (p == MAP_FAILED) {
550                 munmap(g2h_untagged(start), host_len);
551                 goto fail;
552             }
553             host_start += offset - host_offset;
554         }
555         start = h2g(host_start);
556     } else {
557         if (start & ~TARGET_PAGE_MASK) {
558             errno = EINVAL;
559             goto fail;
560         }
561         end = start + len;
562         real_end = HOST_PAGE_ALIGN(end);
563 
564         /*
565          * Test if requested memory area fits target address space
566          * It can fail only on 64-bit host with 32-bit target.
567          * On any other target/host host mmap() handles this error correctly.
568          */
569 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
570         if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
571             errno = EINVAL;
572             goto fail;
573         }
574 #endif
575 
576         /*
577          * worst case: we cannot map the file because the offset is not
578          * aligned, so we read it
579          */
580         if (!(flags & MAP_ANON) &&
581             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
582             /*
583              * msync() won't work here, so we return an error if write is
584              * possible while it is a shared mapping
585              */
586             if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
587                 (prot & PROT_WRITE)) {
588                 errno = EINVAL;
589                 goto fail;
590             }
591             retaddr = target_mmap(start, len, prot | PROT_WRITE,
592                                   MAP_FIXED | MAP_PRIVATE | MAP_ANON,
593                                   -1, 0);
594             if (retaddr == -1)
595                 goto fail;
596             pread(fd, g2h_untagged(start), len, offset);
597             if (!(prot & PROT_WRITE)) {
598                 ret = target_mprotect(start, len, prot);
599                 if (ret != 0) {
600                     start = ret;
601                     goto the_end;
602                 }
603             }
604             goto the_end;
605         }
606 
607         /* handle the start of the mapping */
608         if (start > real_start) {
609             if (real_end == real_start + qemu_host_page_size) {
610                 /* one single host page */
611                 ret = mmap_frag(real_start, start, end,
612                                 prot, flags, fd, offset);
613                 if (ret == -1)
614                     goto fail;
615                 goto the_end1;
616             }
617             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
618                             prot, flags, fd, offset);
619             if (ret == -1)
620                 goto fail;
621             real_start += qemu_host_page_size;
622         }
623         /* handle the end of the mapping */
624         if (end < real_end) {
625             ret = mmap_frag(real_end - qemu_host_page_size,
626                             real_end - qemu_host_page_size, end,
627                             prot, flags, fd,
628                             offset + real_end - qemu_host_page_size - start);
629             if (ret == -1)
630                 goto fail;
631             real_end -= qemu_host_page_size;
632         }
633 
634         /* map the middle (easier) */
635         if (real_start < real_end) {
636             void *p;
637             unsigned long offset1;
638             if (flags & MAP_ANON)
639                 offset1 = 0;
640             else
641                 offset1 = offset + real_start - start;
642             p = mmap(g2h_untagged(real_start), real_end - real_start,
643                      prot, flags, fd, offset1);
644             if (p == MAP_FAILED)
645                 goto fail;
646         }
647     }
648  the_end1:
649     page_set_flags(start, start + len, prot | PAGE_VALID);
650  the_end:
651 #ifdef DEBUG_MMAP
652     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
653     page_dump(stdout);
654     printf("\n");
655 #endif
656     tb_invalidate_phys_range(start, start + len);
657     mmap_unlock();
658     return start;
659 fail:
660     mmap_unlock();
661     return -1;
662 }
663 
664 static void mmap_reserve(abi_ulong start, abi_ulong size)
665 {
666     abi_ulong real_start;
667     abi_ulong real_end;
668     abi_ulong addr;
669     abi_ulong end;
670     int prot;
671 
672     real_start = start & qemu_host_page_mask;
673     real_end = HOST_PAGE_ALIGN(start + size);
674     end = start + size;
675     if (start > real_start) {
676         /* handle host page containing start */
677         prot = 0;
678         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
679             prot |= page_get_flags(addr);
680         }
681         if (real_end == real_start + qemu_host_page_size) {
682             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
683                 prot |= page_get_flags(addr);
684             }
685             end = real_end;
686         }
687         if (prot != 0) {
688             real_start += qemu_host_page_size;
689         }
690     }
691     if (end < real_end) {
692         prot = 0;
693         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
694             prot |= page_get_flags(addr);
695         }
696         if (prot != 0) {
697             real_end -= qemu_host_page_size;
698         }
699     }
700     if (real_start != real_end) {
701         mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
702                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
703                  -1, 0);
704     }
705 }
706 
707 int target_munmap(abi_ulong start, abi_ulong len)
708 {
709     abi_ulong end, real_start, real_end, addr;
710     int prot, ret;
711 
712 #ifdef DEBUG_MMAP
713     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
714            TARGET_ABI_FMT_lx "\n",
715            start, len);
716 #endif
717     if (start & ~TARGET_PAGE_MASK)
718         return -EINVAL;
719     len = TARGET_PAGE_ALIGN(len);
720     if (len == 0)
721         return -EINVAL;
722     mmap_lock();
723     end = start + len;
724     real_start = start & qemu_host_page_mask;
725     real_end = HOST_PAGE_ALIGN(end);
726 
727     if (start > real_start) {
728         /* handle host page containing start */
729         prot = 0;
730         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
731             prot |= page_get_flags(addr);
732         }
733         if (real_end == real_start + qemu_host_page_size) {
734             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
735                 prot |= page_get_flags(addr);
736             }
737             end = real_end;
738         }
739         if (prot != 0)
740             real_start += qemu_host_page_size;
741     }
742     if (end < real_end) {
743         prot = 0;
744         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
745             prot |= page_get_flags(addr);
746         }
747         if (prot != 0)
748             real_end -= qemu_host_page_size;
749     }
750 
751     ret = 0;
752     /* unmap what we can */
753     if (real_start < real_end) {
754         if (reserved_va) {
755             mmap_reserve(real_start, real_end - real_start);
756         } else {
757             ret = munmap(g2h_untagged(real_start), real_end - real_start);
758         }
759     }
760 
761     if (ret == 0) {
762         page_set_flags(start, start + len, 0);
763         tb_invalidate_phys_range(start, start + len);
764     }
765     mmap_unlock();
766     return ret;
767 }
768 
769 int target_msync(abi_ulong start, abi_ulong len, int flags)
770 {
771     abi_ulong end;
772 
773     if (start & ~TARGET_PAGE_MASK)
774         return -EINVAL;
775     len = TARGET_PAGE_ALIGN(len);
776     end = start + len;
777     if (end < start)
778         return -EINVAL;
779     if (end == start)
780         return 0;
781 
782     start &= qemu_host_page_mask;
783     return msync(g2h_untagged(start), end - start, flags);
784 }
785