xref: /openbmc/qemu/bsd-user/mmap.c (revision 26778ac3da794f29c2c7c7d473f0a8d77b874392)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 - 2008 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu.h"
22 #include "qemu-common.h"
23 
24 //#define DEBUG_MMAP
25 
26 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
27 static __thread int mmap_lock_count;
28 
29 void mmap_lock(void)
30 {
31     if (mmap_lock_count++ == 0) {
32         pthread_mutex_lock(&mmap_mutex);
33     }
34 }
35 
36 void mmap_unlock(void)
37 {
38     if (--mmap_lock_count == 0) {
39         pthread_mutex_unlock(&mmap_mutex);
40     }
41 }
42 
43 bool have_mmap_lock(void)
44 {
45     return mmap_lock_count > 0 ? true : false;
46 }
47 
48 /* Grab lock to make sure things are in a consistent state after fork().  */
49 void mmap_fork_start(void)
50 {
51     if (mmap_lock_count)
52         abort();
53     pthread_mutex_lock(&mmap_mutex);
54 }
55 
56 void mmap_fork_end(int child)
57 {
58     if (child)
59         pthread_mutex_init(&mmap_mutex, NULL);
60     else
61         pthread_mutex_unlock(&mmap_mutex);
62 }
63 
64 /* NOTE: all the constants are the HOST ones, but addresses are target. */
65 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
66 {
67     abi_ulong end, host_start, host_end, addr;
68     int prot1, ret;
69 
70 #ifdef DEBUG_MMAP
71     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
72            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
73            prot & PROT_READ ? 'r' : '-',
74            prot & PROT_WRITE ? 'w' : '-',
75            prot & PROT_EXEC ? 'x' : '-');
76 #endif
77 
78     if ((start & ~TARGET_PAGE_MASK) != 0)
79         return -EINVAL;
80     len = TARGET_PAGE_ALIGN(len);
81     end = start + len;
82     if (end < start)
83         return -EINVAL;
84     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
85     if (len == 0)
86         return 0;
87 
88     mmap_lock();
89     host_start = start & qemu_host_page_mask;
90     host_end = HOST_PAGE_ALIGN(end);
91     if (start > host_start) {
92         /* handle host page containing start */
93         prot1 = prot;
94         for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
95             prot1 |= page_get_flags(addr);
96         }
97         if (host_end == host_start + qemu_host_page_size) {
98             for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
99                 prot1 |= page_get_flags(addr);
100             }
101             end = host_end;
102         }
103         ret = mprotect(g2h_untagged(host_start),
104                        qemu_host_page_size, prot1 & PAGE_BITS);
105         if (ret != 0)
106             goto error;
107         host_start += qemu_host_page_size;
108     }
109     if (end < host_end) {
110         prot1 = prot;
111         for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
112             prot1 |= page_get_flags(addr);
113         }
114         ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
115                        qemu_host_page_size, prot1 & PAGE_BITS);
116         if (ret != 0)
117             goto error;
118         host_end -= qemu_host_page_size;
119     }
120 
121     /* handle the pages in the middle */
122     if (host_start < host_end) {
123         ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
124         if (ret != 0)
125             goto error;
126     }
127     page_set_flags(start, start + len, prot | PAGE_VALID);
128     mmap_unlock();
129     return 0;
130 error:
131     mmap_unlock();
132     return ret;
133 }
134 
135 /* map an incomplete host page */
136 static int mmap_frag(abi_ulong real_start,
137                      abi_ulong start, abi_ulong end,
138                      int prot, int flags, int fd, abi_ulong offset)
139 {
140     abi_ulong real_end, addr;
141     void *host_start;
142     int prot1, prot_new;
143 
144     real_end = real_start + qemu_host_page_size;
145     host_start = g2h_untagged(real_start);
146 
147     /* get the protection of the target pages outside the mapping */
148     prot1 = 0;
149     for (addr = real_start; addr < real_end; addr++) {
150         if (addr < start || addr >= end)
151             prot1 |= page_get_flags(addr);
152     }
153 
154     if (prot1 == 0) {
155         /* no page was there, so we allocate one */
156         void *p = mmap(host_start, qemu_host_page_size, prot,
157                        flags | MAP_ANON, -1, 0);
158         if (p == MAP_FAILED)
159             return -1;
160         prot1 = prot;
161     }
162     prot1 &= PAGE_BITS;
163 
164     prot_new = prot | prot1;
165     if (!(flags & MAP_ANON)) {
166         /* msync() won't work here, so we return an error if write is
167            possible while it is a shared mapping */
168         if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
169             (prot & PROT_WRITE))
170             return -1;
171 
172         /* adjust protection to be able to read */
173         if (!(prot1 & PROT_WRITE))
174             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
175 
176         /* read the corresponding file data */
177         if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
178             return -1;
179         }
180 
181         /* put final protection */
182         if (prot_new != (prot1 | PROT_WRITE))
183             mprotect(host_start, qemu_host_page_size, prot_new);
184     } else {
185         if (prot_new != prot1) {
186             mprotect(host_start, qemu_host_page_size, prot_new);
187         }
188         if (prot_new & PROT_WRITE) {
189             memset(g2h_untagged(start), 0, end - start);
190         }
191     }
192     return 0;
193 }
194 
195 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
196 # define TASK_UNMAPPED_BASE  (1ul << 38)
197 #else
198 # define TASK_UNMAPPED_BASE  0x40000000
199 #endif
200 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
201 
202 unsigned long last_brk;
203 
204 /*
205  * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
206  * address space.
207  */
208 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
209                                         abi_ulong alignment)
210 {
211     abi_ulong addr;
212     abi_ulong end_addr;
213     int prot;
214     int looped = 0;
215 
216     if (size > reserved_va) {
217         return (abi_ulong)-1;
218     }
219 
220     size = HOST_PAGE_ALIGN(size) + alignment;
221     end_addr = start + size;
222     if (end_addr > reserved_va) {
223         end_addr = reserved_va;
224     }
225     addr = end_addr - qemu_host_page_size;
226 
227     while (1) {
228         if (addr > end_addr) {
229             if (looped) {
230                 return (abi_ulong)-1;
231             }
232             end_addr = reserved_va;
233             addr = end_addr - qemu_host_page_size;
234             looped = 1;
235             continue;
236         }
237         prot = page_get_flags(addr);
238         if (prot) {
239             end_addr = addr;
240         }
241         if (end_addr - addr >= size) {
242             break;
243         }
244         addr -= qemu_host_page_size;
245     }
246 
247     if (start == mmap_next_start) {
248         mmap_next_start = addr;
249     }
250     /* addr is sufficiently low to align it up */
251     if (alignment != 0) {
252         addr = (addr + alignment) & ~(alignment - 1);
253     }
254     return addr;
255 }
256 
257 /*
258  * Find and reserve a free memory area of size 'size'. The search
259  * starts at 'start'.
260  * It must be called with mmap_lock() held.
261  * Return -1 if error.
262  */
263 static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
264                                        abi_ulong alignment)
265 {
266     void *ptr, *prev;
267     abi_ulong addr;
268     int flags;
269     int wrapped, repeat;
270 
271     /* If 'start' == 0, then a default start address is used. */
272     if (start == 0) {
273         start = mmap_next_start;
274     } else {
275         start &= qemu_host_page_mask;
276     }
277 
278     size = HOST_PAGE_ALIGN(size);
279 
280     if (reserved_va) {
281         return mmap_find_vma_reserved(start, size,
282             (alignment != 0 ? 1 << alignment : 0));
283     }
284 
285     addr = start;
286     wrapped = repeat = 0;
287     prev = 0;
288     flags = MAP_ANONYMOUS | MAP_PRIVATE;
289 #ifdef MAP_ALIGNED
290     if (alignment != 0) {
291         flags |= MAP_ALIGNED(alignment);
292     }
293 #else
294     /* XXX TODO */
295 #endif
296 
297     for (;; prev = ptr) {
298         /*
299          * Reserve needed memory area to avoid a race.
300          * It should be discarded using:
301          *  - mmap() with MAP_FIXED flag
302          *  - mremap() with MREMAP_FIXED flag
303          *  - shmat() with SHM_REMAP flag
304          */
305         ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
306                    flags, -1, 0);
307 
308         /* ENOMEM, if host address space has no memory */
309         if (ptr == MAP_FAILED) {
310             return (abi_ulong)-1;
311         }
312 
313         /*
314          * Count the number of sequential returns of the same address.
315          * This is used to modify the search algorithm below.
316          */
317         repeat = (ptr == prev ? repeat + 1 : 0);
318 
319         if (h2g_valid(ptr + size - 1)) {
320             addr = h2g(ptr);
321 
322             if ((addr & ~TARGET_PAGE_MASK) == 0) {
323                 /* Success.  */
324                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
325                     mmap_next_start = addr + size;
326                 }
327                 return addr;
328             }
329 
330             /* The address is not properly aligned for the target.  */
331             switch (repeat) {
332             case 0:
333                 /*
334                  * Assume the result that the kernel gave us is the
335                  * first with enough free space, so start again at the
336                  * next higher target page.
337                  */
338                 addr = TARGET_PAGE_ALIGN(addr);
339                 break;
340             case 1:
341                 /*
342                  * Sometimes the kernel decides to perform the allocation
343                  * at the top end of memory instead.
344                  */
345                 addr &= TARGET_PAGE_MASK;
346                 break;
347             case 2:
348                 /* Start over at low memory.  */
349                 addr = 0;
350                 break;
351             default:
352                 /* Fail.  This unaligned block must the last.  */
353                 addr = -1;
354                 break;
355             }
356         } else {
357             /*
358              * Since the result the kernel gave didn't fit, start
359              * again at low memory.  If any repetition, fail.
360              */
361             addr = (repeat ? -1 : 0);
362         }
363 
364         /* Unmap and try again.  */
365         munmap(ptr, size);
366 
367         /* ENOMEM if we checked the whole of the target address space.  */
368         if (addr == (abi_ulong)-1) {
369             return (abi_ulong)-1;
370         } else if (addr == 0) {
371             if (wrapped) {
372                 return (abi_ulong)-1;
373             }
374             wrapped = 1;
375             /*
376              * Don't actually use 0 when wrapping, instead indicate
377              * that we'd truly like an allocation in low memory.
378              */
379             addr = TARGET_PAGE_SIZE;
380         } else if (wrapped && addr >= start) {
381             return (abi_ulong)-1;
382         }
383     }
384 }
385 
386 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
387 {
388     return mmap_find_vma_aligned(start, size, 0);
389 }
390 
391 /* NOTE: all the constants are the HOST ones */
392 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
393                      int flags, int fd, off_t offset)
394 {
395     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
396 
397     mmap_lock();
398 #ifdef DEBUG_MMAP
399     {
400         printf("mmap: start=0x" TARGET_ABI_FMT_lx
401                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
402                start, len,
403                prot & PROT_READ ? 'r' : '-',
404                prot & PROT_WRITE ? 'w' : '-',
405                prot & PROT_EXEC ? 'x' : '-');
406         if (flags & MAP_ALIGNMENT_MASK) {
407             printf("MAP_ALIGNED(%u) ", (flags & MAP_ALIGNMENT_MASK)
408                     >> MAP_ALIGNMENT_SHIFT);
409         }
410 #if MAP_GUARD
411         if (flags & MAP_GUARD) {
412             printf("MAP_GUARD ");
413         }
414 #endif
415         if (flags & MAP_FIXED) {
416             printf("MAP_FIXED ");
417         }
418         if (flags & MAP_ANONYMOUS) {
419             printf("MAP_ANON ");
420         }
421 #ifdef MAP_EXCL
422         if (flags & MAP_EXCL) {
423             printf("MAP_EXCL ");
424         }
425 #endif
426         if (flags & MAP_PRIVATE) {
427             printf("MAP_PRIVATE ");
428         }
429         if (flags & MAP_SHARED) {
430             printf("MAP_SHARED ");
431         }
432         if (flags & MAP_NOCORE) {
433             printf("MAP_NOCORE ");
434         }
435 #ifdef MAP_STACK
436         if (flags & MAP_STACK) {
437             printf("MAP_STACK ");
438         }
439 #endif
440         printf("fd=%d offset=0x%llx\n", fd, offset);
441     }
442 #endif
443 
444     if ((flags & MAP_ANONYMOUS) && fd != -1) {
445         errno = EINVAL;
446         goto fail;
447     }
448 #ifdef MAP_STACK
449     if (flags & MAP_STACK) {
450         if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
451                     (PROT_READ | PROT_WRITE))) {
452             errno = EINVAL;
453             goto fail;
454         }
455     }
456 #endif /* MAP_STACK */
457 #ifdef MAP_GUARD
458     if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
459         offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
460         /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
461         MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
462         errno = EINVAL;
463         goto fail;
464     }
465 #endif
466 
467     if (offset & ~TARGET_PAGE_MASK) {
468         errno = EINVAL;
469         goto fail;
470     }
471 
472     len = TARGET_PAGE_ALIGN(len);
473     if (len == 0) {
474         errno = EINVAL;
475         goto fail;
476     }
477     real_start = start & qemu_host_page_mask;
478     host_offset = offset & qemu_host_page_mask;
479 
480     /*
481      * If the user is asking for the kernel to find a location, do that
482      * before we truncate the length for mapping files below.
483      */
484     if (!(flags & MAP_FIXED)) {
485         host_len = len + offset - host_offset;
486         host_len = HOST_PAGE_ALIGN(host_len);
487         if ((flags & MAP_ALIGNMENT_MASK) != 0)
488             start = mmap_find_vma_aligned(real_start, host_len,
489                 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
490         else
491             start = mmap_find_vma(real_start, host_len);
492         if (start == (abi_ulong)-1) {
493             errno = ENOMEM;
494             goto fail;
495         }
496     }
497 
498     /*
499      * When mapping files into a memory area larger than the file, accesses
500      * to pages beyond the file size will cause a SIGBUS.
501      *
502      * For example, if mmaping a file of 100 bytes on a host with 4K pages
503      * emulating a target with 8K pages, the target expects to be able to
504      * access the first 8K. But the host will trap us on any access beyond
505      * 4K.
506      *
507      * When emulating a target with a larger page-size than the hosts, we
508      * may need to truncate file maps at EOF and add extra anonymous pages
509      * up to the targets page boundary.
510      */
511 
512     if ((qemu_real_host_page_size < qemu_host_page_size) && fd != -1) {
513         struct stat sb;
514 
515         if (fstat(fd, &sb) == -1) {
516             goto fail;
517         }
518 
519         /* Are we trying to create a map beyond EOF?.  */
520         if (offset + len > sb.st_size) {
521             /*
522              * If so, truncate the file map at eof aligned with
523              * the hosts real pagesize. Additional anonymous maps
524              * will be created beyond EOF.
525              */
526             len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
527         }
528     }
529 
530     if (!(flags & MAP_FIXED)) {
531         unsigned long host_start;
532         void *p;
533 
534         host_len = len + offset - host_offset;
535         host_len = HOST_PAGE_ALIGN(host_len);
536 
537         /*
538          * Note: we prefer to control the mapping address. It is
539          * especially important if qemu_host_page_size >
540          * qemu_real_host_page_size
541          */
542         p = mmap(g2h_untagged(start), host_len, prot,
543                  flags | MAP_FIXED | ((fd != -1) ? MAP_ANONYMOUS : 0), -1, 0);
544         if (p == MAP_FAILED)
545             goto fail;
546         /* update start so that it points to the file position at 'offset' */
547         host_start = (unsigned long)p;
548         if (fd != -1) {
549             p = mmap(g2h_untagged(start), len, prot,
550                      flags | MAP_FIXED, fd, host_offset);
551             if (p == MAP_FAILED) {
552                 munmap(g2h_untagged(start), host_len);
553                 goto fail;
554             }
555             host_start += offset - host_offset;
556         }
557         start = h2g(host_start);
558     } else {
559         if (start & ~TARGET_PAGE_MASK) {
560             errno = EINVAL;
561             goto fail;
562         }
563         end = start + len;
564         real_end = HOST_PAGE_ALIGN(end);
565 
566         /*
567          * Test if requested memory area fits target address space
568          * It can fail only on 64-bit host with 32-bit target.
569          * On any other target/host host mmap() handles this error correctly.
570          */
571 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
572         if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
573             errno = EINVAL;
574             goto fail;
575         }
576 #endif
577 
578         /*
579          * worst case: we cannot map the file because the offset is not
580          * aligned, so we read it
581          */
582         if (!(flags & MAP_ANON) &&
583             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
584             /*
585              * msync() won't work here, so we return an error if write is
586              * possible while it is a shared mapping
587              */
588             if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
589                 (prot & PROT_WRITE)) {
590                 errno = EINVAL;
591                 goto fail;
592             }
593             retaddr = target_mmap(start, len, prot | PROT_WRITE,
594                                   MAP_FIXED | MAP_PRIVATE | MAP_ANON,
595                                   -1, 0);
596             if (retaddr == -1)
597                 goto fail;
598             if (pread(fd, g2h_untagged(start), len, offset) == -1) {
599                 goto fail;
600             }
601             if (!(prot & PROT_WRITE)) {
602                 ret = target_mprotect(start, len, prot);
603                 if (ret != 0) {
604                     start = ret;
605                     goto the_end;
606                 }
607             }
608             goto the_end;
609         }
610 
611         /* handle the start of the mapping */
612         if (start > real_start) {
613             if (real_end == real_start + qemu_host_page_size) {
614                 /* one single host page */
615                 ret = mmap_frag(real_start, start, end,
616                                 prot, flags, fd, offset);
617                 if (ret == -1)
618                     goto fail;
619                 goto the_end1;
620             }
621             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
622                             prot, flags, fd, offset);
623             if (ret == -1)
624                 goto fail;
625             real_start += qemu_host_page_size;
626         }
627         /* handle the end of the mapping */
628         if (end < real_end) {
629             ret = mmap_frag(real_end - qemu_host_page_size,
630                             real_end - qemu_host_page_size, end,
631                             prot, flags, fd,
632                             offset + real_end - qemu_host_page_size - start);
633             if (ret == -1)
634                 goto fail;
635             real_end -= qemu_host_page_size;
636         }
637 
638         /* map the middle (easier) */
639         if (real_start < real_end) {
640             void *p;
641             unsigned long offset1;
642             if (flags & MAP_ANON)
643                 offset1 = 0;
644             else
645                 offset1 = offset + real_start - start;
646             p = mmap(g2h_untagged(real_start), real_end - real_start,
647                      prot, flags, fd, offset1);
648             if (p == MAP_FAILED)
649                 goto fail;
650         }
651     }
652  the_end1:
653     page_set_flags(start, start + len, prot | PAGE_VALID);
654  the_end:
655 #ifdef DEBUG_MMAP
656     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
657     page_dump(stdout);
658     printf("\n");
659 #endif
660     tb_invalidate_phys_range(start, start + len);
661     mmap_unlock();
662     return start;
663 fail:
664     mmap_unlock();
665     return -1;
666 }
667 
668 static void mmap_reserve(abi_ulong start, abi_ulong size)
669 {
670     abi_ulong real_start;
671     abi_ulong real_end;
672     abi_ulong addr;
673     abi_ulong end;
674     int prot;
675 
676     real_start = start & qemu_host_page_mask;
677     real_end = HOST_PAGE_ALIGN(start + size);
678     end = start + size;
679     if (start > real_start) {
680         /* handle host page containing start */
681         prot = 0;
682         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
683             prot |= page_get_flags(addr);
684         }
685         if (real_end == real_start + qemu_host_page_size) {
686             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
687                 prot |= page_get_flags(addr);
688             }
689             end = real_end;
690         }
691         if (prot != 0) {
692             real_start += qemu_host_page_size;
693         }
694     }
695     if (end < real_end) {
696         prot = 0;
697         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
698             prot |= page_get_flags(addr);
699         }
700         if (prot != 0) {
701             real_end -= qemu_host_page_size;
702         }
703     }
704     if (real_start != real_end) {
705         mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
706                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
707                  -1, 0);
708     }
709 }
710 
711 int target_munmap(abi_ulong start, abi_ulong len)
712 {
713     abi_ulong end, real_start, real_end, addr;
714     int prot, ret;
715 
716 #ifdef DEBUG_MMAP
717     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
718            TARGET_ABI_FMT_lx "\n",
719            start, len);
720 #endif
721     if (start & ~TARGET_PAGE_MASK)
722         return -EINVAL;
723     len = TARGET_PAGE_ALIGN(len);
724     if (len == 0)
725         return -EINVAL;
726     mmap_lock();
727     end = start + len;
728     real_start = start & qemu_host_page_mask;
729     real_end = HOST_PAGE_ALIGN(end);
730 
731     if (start > real_start) {
732         /* handle host page containing start */
733         prot = 0;
734         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
735             prot |= page_get_flags(addr);
736         }
737         if (real_end == real_start + qemu_host_page_size) {
738             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
739                 prot |= page_get_flags(addr);
740             }
741             end = real_end;
742         }
743         if (prot != 0)
744             real_start += qemu_host_page_size;
745     }
746     if (end < real_end) {
747         prot = 0;
748         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
749             prot |= page_get_flags(addr);
750         }
751         if (prot != 0)
752             real_end -= qemu_host_page_size;
753     }
754 
755     ret = 0;
756     /* unmap what we can */
757     if (real_start < real_end) {
758         if (reserved_va) {
759             mmap_reserve(real_start, real_end - real_start);
760         } else {
761             ret = munmap(g2h_untagged(real_start), real_end - real_start);
762         }
763     }
764 
765     if (ret == 0) {
766         page_set_flags(start, start + len, 0);
767         tb_invalidate_phys_range(start, start + len);
768     }
769     mmap_unlock();
770     return ret;
771 }
772 
773 int target_msync(abi_ulong start, abi_ulong len, int flags)
774 {
775     abi_ulong end;
776 
777     if (start & ~TARGET_PAGE_MASK)
778         return -EINVAL;
779     len = TARGET_PAGE_ALIGN(len);
780     end = start + len;
781     if (end < start)
782         return -EINVAL;
783     if (end == start)
784         return 0;
785 
786     start &= qemu_host_page_mask;
787     return msync(g2h_untagged(start), end - start, flags);
788 }
789