xref: /openbmc/qemu/linux-user/mmap.c (revision 84a3a53c)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <stdarg.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <sys/mman.h>
28 #include <linux/mman.h>
29 #include <linux/unistd.h>
30 
31 #include "qemu.h"
32 #include "qemu-common.h"
33 #include "translate-all.h"
34 
35 //#define DEBUG_MMAP
36 
37 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38 static __thread int mmap_lock_count;
39 
40 void mmap_lock(void)
41 {
42     if (mmap_lock_count++ == 0) {
43         pthread_mutex_lock(&mmap_mutex);
44     }
45 }
46 
47 void mmap_unlock(void)
48 {
49     if (--mmap_lock_count == 0) {
50         pthread_mutex_unlock(&mmap_mutex);
51     }
52 }
53 
54 /* Grab lock to make sure things are in a consistent state after fork().  */
55 void mmap_fork_start(void)
56 {
57     if (mmap_lock_count)
58         abort();
59     pthread_mutex_lock(&mmap_mutex);
60 }
61 
62 void mmap_fork_end(int child)
63 {
64     if (child)
65         pthread_mutex_init(&mmap_mutex, NULL);
66     else
67         pthread_mutex_unlock(&mmap_mutex);
68 }
69 
70 /* NOTE: all the constants are the HOST ones, but addresses are target. */
71 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
72 {
73     abi_ulong end, host_start, host_end, addr;
74     int prot1, ret;
75 
76 #ifdef DEBUG_MMAP
77     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
78            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
79            prot & PROT_READ ? 'r' : '-',
80            prot & PROT_WRITE ? 'w' : '-',
81            prot & PROT_EXEC ? 'x' : '-');
82 #endif
83 
84     if ((start & ~TARGET_PAGE_MASK) != 0)
85         return -EINVAL;
86     len = TARGET_PAGE_ALIGN(len);
87     end = start + len;
88     if (end < start)
89         return -EINVAL;
90     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
91     if (len == 0)
92         return 0;
93 
94     mmap_lock();
95     host_start = start & qemu_host_page_mask;
96     host_end = HOST_PAGE_ALIGN(end);
97     if (start > host_start) {
98         /* handle host page containing start */
99         prot1 = prot;
100         for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
101             prot1 |= page_get_flags(addr);
102         }
103         if (host_end == host_start + qemu_host_page_size) {
104             for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
105                 prot1 |= page_get_flags(addr);
106             }
107             end = host_end;
108         }
109         ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
110         if (ret != 0)
111             goto error;
112         host_start += qemu_host_page_size;
113     }
114     if (end < host_end) {
115         prot1 = prot;
116         for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
117             prot1 |= page_get_flags(addr);
118         }
119         ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
120                        prot1 & PAGE_BITS);
121         if (ret != 0)
122             goto error;
123         host_end -= qemu_host_page_size;
124     }
125 
126     /* handle the pages in the middle */
127     if (host_start < host_end) {
128         ret = mprotect(g2h(host_start), host_end - host_start, prot);
129         if (ret != 0)
130             goto error;
131     }
132     page_set_flags(start, start + len, prot | PAGE_VALID);
133     mmap_unlock();
134     return 0;
135 error:
136     mmap_unlock();
137     return ret;
138 }
139 
140 /* map an incomplete host page */
141 static int mmap_frag(abi_ulong real_start,
142                      abi_ulong start, abi_ulong end,
143                      int prot, int flags, int fd, abi_ulong offset)
144 {
145     abi_ulong real_end, addr;
146     void *host_start;
147     int prot1, prot_new;
148 
149     real_end = real_start + qemu_host_page_size;
150     host_start = g2h(real_start);
151 
152     /* get the protection of the target pages outside the mapping */
153     prot1 = 0;
154     for(addr = real_start; addr < real_end; addr++) {
155         if (addr < start || addr >= end)
156             prot1 |= page_get_flags(addr);
157     }
158 
159     if (prot1 == 0) {
160         /* no page was there, so we allocate one */
161         void *p = mmap(host_start, qemu_host_page_size, prot,
162                        flags | MAP_ANONYMOUS, -1, 0);
163         if (p == MAP_FAILED)
164             return -1;
165         prot1 = prot;
166     }
167     prot1 &= PAGE_BITS;
168 
169     prot_new = prot | prot1;
170     if (!(flags & MAP_ANONYMOUS)) {
171         /* msync() won't work here, so we return an error if write is
172            possible while it is a shared mapping */
173         if ((flags & MAP_TYPE) == MAP_SHARED &&
174             (prot & PROT_WRITE))
175             return -1;
176 
177         /* adjust protection to be able to read */
178         if (!(prot1 & PROT_WRITE))
179             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
180 
181         /* read the corresponding file data */
182         if (pread(fd, g2h(start), end - start, offset) == -1)
183             return -1;
184 
185         /* put final protection */
186         if (prot_new != (prot1 | PROT_WRITE))
187             mprotect(host_start, qemu_host_page_size, prot_new);
188     } else {
189         if (prot_new != prot1) {
190             mprotect(host_start, qemu_host_page_size, prot_new);
191         }
192         if (prot_new & PROT_WRITE) {
193             memset(g2h(start), 0, end - start);
194         }
195     }
196     return 0;
197 }
198 
199 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
200 # define TASK_UNMAPPED_BASE  (1ul << 38)
201 #elif defined(__CYGWIN__)
202 /* Cygwin doesn't have a whole lot of address space.  */
203 # define TASK_UNMAPPED_BASE  0x18000000
204 #else
205 # define TASK_UNMAPPED_BASE  0x40000000
206 #endif
207 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
208 
209 unsigned long last_brk;
210 
211 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
212    of guest address space.  */
213 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
214 {
215     abi_ulong addr;
216     abi_ulong end_addr;
217     int prot;
218     int looped = 0;
219 
220     if (size > reserved_va) {
221         return (abi_ulong)-1;
222     }
223 
224     size = HOST_PAGE_ALIGN(size);
225     end_addr = start + size;
226     if (end_addr > reserved_va) {
227         end_addr = reserved_va;
228     }
229     addr = end_addr - qemu_host_page_size;
230 
231     while (1) {
232         if (addr > end_addr) {
233             if (looped) {
234                 return (abi_ulong)-1;
235             }
236             end_addr = reserved_va;
237             addr = end_addr - qemu_host_page_size;
238             looped = 1;
239             continue;
240         }
241         prot = page_get_flags(addr);
242         if (prot) {
243             end_addr = addr;
244         }
245         if (addr + size == end_addr) {
246             break;
247         }
248         addr -= qemu_host_page_size;
249     }
250 
251     if (start == mmap_next_start) {
252         mmap_next_start = addr;
253     }
254 
255     return addr;
256 }
257 
258 /*
259  * Find and reserve a free memory area of size 'size'. The search
260  * starts at 'start'.
261  * It must be called with mmap_lock() held.
262  * Return -1 if error.
263  */
264 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
265 {
266     void *ptr, *prev;
267     abi_ulong addr;
268     int wrapped, repeat;
269 
270     /* If 'start' == 0, then a default start address is used. */
271     if (start == 0) {
272         start = mmap_next_start;
273     } else {
274         start &= qemu_host_page_mask;
275     }
276 
277     size = HOST_PAGE_ALIGN(size);
278 
279     if (reserved_va) {
280         return mmap_find_vma_reserved(start, size);
281     }
282 
283     addr = start;
284     wrapped = repeat = 0;
285     prev = 0;
286 
287     for (;; prev = ptr) {
288         /*
289          * Reserve needed memory area to avoid a race.
290          * It should be discarded using:
291          *  - mmap() with MAP_FIXED flag
292          *  - mremap() with MREMAP_FIXED flag
293          *  - shmat() with SHM_REMAP flag
294          */
295         ptr = mmap(g2h(addr), size, PROT_NONE,
296                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
297 
298         /* ENOMEM, if host address space has no memory */
299         if (ptr == MAP_FAILED) {
300             return (abi_ulong)-1;
301         }
302 
303         /* Count the number of sequential returns of the same address.
304            This is used to modify the search algorithm below.  */
305         repeat = (ptr == prev ? repeat + 1 : 0);
306 
307         if (h2g_valid(ptr + size - 1)) {
308             addr = h2g(ptr);
309 
310             if ((addr & ~TARGET_PAGE_MASK) == 0) {
311                 /* Success.  */
312                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
313                     mmap_next_start = addr + size;
314                 }
315                 return addr;
316             }
317 
318             /* The address is not properly aligned for the target.  */
319             switch (repeat) {
320             case 0:
321                 /* Assume the result that the kernel gave us is the
322                    first with enough free space, so start again at the
323                    next higher target page.  */
324                 addr = TARGET_PAGE_ALIGN(addr);
325                 break;
326             case 1:
327                 /* Sometimes the kernel decides to perform the allocation
328                    at the top end of memory instead.  */
329                 addr &= TARGET_PAGE_MASK;
330                 break;
331             case 2:
332                 /* Start over at low memory.  */
333                 addr = 0;
334                 break;
335             default:
336                 /* Fail.  This unaligned block must the last.  */
337                 addr = -1;
338                 break;
339             }
340         } else {
341             /* Since the result the kernel gave didn't fit, start
342                again at low memory.  If any repetition, fail.  */
343             addr = (repeat ? -1 : 0);
344         }
345 
346         /* Unmap and try again.  */
347         munmap(ptr, size);
348 
349         /* ENOMEM if we checked the whole of the target address space.  */
350         if (addr == (abi_ulong)-1) {
351             return (abi_ulong)-1;
352         } else if (addr == 0) {
353             if (wrapped) {
354                 return (abi_ulong)-1;
355             }
356             wrapped = 1;
357             /* Don't actually use 0 when wrapping, instead indicate
358                that we'd truly like an allocation in low memory.  */
359             addr = (mmap_min_addr > TARGET_PAGE_SIZE
360                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
361                      : TARGET_PAGE_SIZE);
362         } else if (wrapped && addr >= start) {
363             return (abi_ulong)-1;
364         }
365     }
366 }
367 
368 /* NOTE: all the constants are the HOST ones */
369 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
370                      int flags, int fd, abi_ulong offset)
371 {
372     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
373 
374     mmap_lock();
375 #ifdef DEBUG_MMAP
376     {
377         printf("mmap: start=0x" TARGET_ABI_FMT_lx
378                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
379                start, len,
380                prot & PROT_READ ? 'r' : '-',
381                prot & PROT_WRITE ? 'w' : '-',
382                prot & PROT_EXEC ? 'x' : '-');
383         if (flags & MAP_FIXED)
384             printf("MAP_FIXED ");
385         if (flags & MAP_ANONYMOUS)
386             printf("MAP_ANON ");
387         switch(flags & MAP_TYPE) {
388         case MAP_PRIVATE:
389             printf("MAP_PRIVATE ");
390             break;
391         case MAP_SHARED:
392             printf("MAP_SHARED ");
393             break;
394         default:
395             printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
396             break;
397         }
398         printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
399     }
400 #endif
401 
402     if (offset & ~TARGET_PAGE_MASK) {
403         errno = EINVAL;
404         goto fail;
405     }
406 
407     len = TARGET_PAGE_ALIGN(len);
408     if (len == 0)
409         goto the_end;
410     real_start = start & qemu_host_page_mask;
411     host_offset = offset & qemu_host_page_mask;
412 
413     /* If the user is asking for the kernel to find a location, do that
414        before we truncate the length for mapping files below.  */
415     if (!(flags & MAP_FIXED)) {
416         host_len = len + offset - host_offset;
417         host_len = HOST_PAGE_ALIGN(host_len);
418         start = mmap_find_vma(real_start, host_len);
419         if (start == (abi_ulong)-1) {
420             errno = ENOMEM;
421             goto fail;
422         }
423     }
424 
425     /* When mapping files into a memory area larger than the file, accesses
426        to pages beyond the file size will cause a SIGBUS.
427 
428        For example, if mmaping a file of 100 bytes on a host with 4K pages
429        emulating a target with 8K pages, the target expects to be able to
430        access the first 8K. But the host will trap us on any access beyond
431        4K.
432 
433        When emulating a target with a larger page-size than the hosts, we
434        may need to truncate file maps at EOF and add extra anonymous pages
435        up to the targets page boundary.  */
436 
437     if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
438         && !(flags & MAP_ANONYMOUS)) {
439        struct stat sb;
440 
441        if (fstat (fd, &sb) == -1)
442            goto fail;
443 
444        /* Are we trying to create a map beyond EOF?.  */
445        if (offset + len > sb.st_size) {
446            /* If so, truncate the file map at eof aligned with
447               the hosts real pagesize. Additional anonymous maps
448               will be created beyond EOF.  */
449            len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
450        }
451     }
452 
453     if (!(flags & MAP_FIXED)) {
454         unsigned long host_start;
455         void *p;
456 
457         host_len = len + offset - host_offset;
458         host_len = HOST_PAGE_ALIGN(host_len);
459 
460         /* Note: we prefer to control the mapping address. It is
461            especially important if qemu_host_page_size >
462            qemu_real_host_page_size */
463         p = mmap(g2h(start), host_len, prot,
464                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
465         if (p == MAP_FAILED)
466             goto fail;
467         /* update start so that it points to the file position at 'offset' */
468         host_start = (unsigned long)p;
469         if (!(flags & MAP_ANONYMOUS)) {
470             p = mmap(g2h(start), len, prot,
471                      flags | MAP_FIXED, fd, host_offset);
472             if (p == MAP_FAILED) {
473                 munmap(g2h(start), host_len);
474                 goto fail;
475             }
476             host_start += offset - host_offset;
477         }
478         start = h2g(host_start);
479     } else {
480         if (start & ~TARGET_PAGE_MASK) {
481             errno = EINVAL;
482             goto fail;
483         }
484         end = start + len;
485         real_end = HOST_PAGE_ALIGN(end);
486 
487 	/*
488 	 * Test if requested memory area fits target address space
489 	 * It can fail only on 64-bit host with 32-bit target.
490 	 * On any other target/host host mmap() handles this error correctly.
491 	 */
492         if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
493             errno = EINVAL;
494             goto fail;
495         }
496 
497         /* worst case: we cannot map the file because the offset is not
498            aligned, so we read it */
499         if (!(flags & MAP_ANONYMOUS) &&
500             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
501             /* msync() won't work here, so we return an error if write is
502                possible while it is a shared mapping */
503             if ((flags & MAP_TYPE) == MAP_SHARED &&
504                 (prot & PROT_WRITE)) {
505                 errno = EINVAL;
506                 goto fail;
507             }
508             retaddr = target_mmap(start, len, prot | PROT_WRITE,
509                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
510                                   -1, 0);
511             if (retaddr == -1)
512                 goto fail;
513             if (pread(fd, g2h(start), len, offset) == -1)
514                 goto fail;
515             if (!(prot & PROT_WRITE)) {
516                 ret = target_mprotect(start, len, prot);
517                 assert(ret == 0);
518             }
519             goto the_end;
520         }
521 
522         /* handle the start of the mapping */
523         if (start > real_start) {
524             if (real_end == real_start + qemu_host_page_size) {
525                 /* one single host page */
526                 ret = mmap_frag(real_start, start, end,
527                                 prot, flags, fd, offset);
528                 if (ret == -1)
529                     goto fail;
530                 goto the_end1;
531             }
532             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
533                             prot, flags, fd, offset);
534             if (ret == -1)
535                 goto fail;
536             real_start += qemu_host_page_size;
537         }
538         /* handle the end of the mapping */
539         if (end < real_end) {
540             ret = mmap_frag(real_end - qemu_host_page_size,
541                             real_end - qemu_host_page_size, end,
542                             prot, flags, fd,
543                             offset + real_end - qemu_host_page_size - start);
544             if (ret == -1)
545                 goto fail;
546             real_end -= qemu_host_page_size;
547         }
548 
549         /* map the middle (easier) */
550         if (real_start < real_end) {
551             void *p;
552             unsigned long offset1;
553             if (flags & MAP_ANONYMOUS)
554                 offset1 = 0;
555             else
556                 offset1 = offset + real_start - start;
557             p = mmap(g2h(real_start), real_end - real_start,
558                      prot, flags, fd, offset1);
559             if (p == MAP_FAILED)
560                 goto fail;
561         }
562     }
563  the_end1:
564     page_set_flags(start, start + len, prot | PAGE_VALID);
565  the_end:
566 #ifdef DEBUG_MMAP
567     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
568     page_dump(stdout);
569     printf("\n");
570 #endif
571     tb_invalidate_phys_range(start, start + len);
572     mmap_unlock();
573     return start;
574 fail:
575     mmap_unlock();
576     return -1;
577 }
578 
579 static void mmap_reserve(abi_ulong start, abi_ulong size)
580 {
581     abi_ulong real_start;
582     abi_ulong real_end;
583     abi_ulong addr;
584     abi_ulong end;
585     int prot;
586 
587     real_start = start & qemu_host_page_mask;
588     real_end = HOST_PAGE_ALIGN(start + size);
589     end = start + size;
590     if (start > real_start) {
591         /* handle host page containing start */
592         prot = 0;
593         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
594             prot |= page_get_flags(addr);
595         }
596         if (real_end == real_start + qemu_host_page_size) {
597             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
598                 prot |= page_get_flags(addr);
599             }
600             end = real_end;
601         }
602         if (prot != 0)
603             real_start += qemu_host_page_size;
604     }
605     if (end < real_end) {
606         prot = 0;
607         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
608             prot |= page_get_flags(addr);
609         }
610         if (prot != 0)
611             real_end -= qemu_host_page_size;
612     }
613     if (real_start != real_end) {
614         mmap(g2h(real_start), real_end - real_start, PROT_NONE,
615                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
616                  -1, 0);
617     }
618 }
619 
620 int target_munmap(abi_ulong start, abi_ulong len)
621 {
622     abi_ulong end, real_start, real_end, addr;
623     int prot, ret;
624 
625 #ifdef DEBUG_MMAP
626     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
627            TARGET_ABI_FMT_lx "\n",
628            start, len);
629 #endif
630     if (start & ~TARGET_PAGE_MASK)
631         return -EINVAL;
632     len = TARGET_PAGE_ALIGN(len);
633     if (len == 0)
634         return -EINVAL;
635     mmap_lock();
636     end = start + len;
637     real_start = start & qemu_host_page_mask;
638     real_end = HOST_PAGE_ALIGN(end);
639 
640     if (start > real_start) {
641         /* handle host page containing start */
642         prot = 0;
643         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
644             prot |= page_get_flags(addr);
645         }
646         if (real_end == real_start + qemu_host_page_size) {
647             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
648                 prot |= page_get_flags(addr);
649             }
650             end = real_end;
651         }
652         if (prot != 0)
653             real_start += qemu_host_page_size;
654     }
655     if (end < real_end) {
656         prot = 0;
657         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
658             prot |= page_get_flags(addr);
659         }
660         if (prot != 0)
661             real_end -= qemu_host_page_size;
662     }
663 
664     ret = 0;
665     /* unmap what we can */
666     if (real_start < real_end) {
667         if (reserved_va) {
668             mmap_reserve(real_start, real_end - real_start);
669         } else {
670             ret = munmap(g2h(real_start), real_end - real_start);
671         }
672     }
673 
674     if (ret == 0) {
675         page_set_flags(start, start + len, 0);
676         tb_invalidate_phys_range(start, start + len);
677     }
678     mmap_unlock();
679     return ret;
680 }
681 
682 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
683                        abi_ulong new_size, unsigned long flags,
684                        abi_ulong new_addr)
685 {
686     int prot;
687     void *host_addr;
688 
689     mmap_lock();
690 
691     if (flags & MREMAP_FIXED) {
692         host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
693                                      old_size, new_size,
694                                      flags,
695                                      g2h(new_addr));
696 
697         if (reserved_va && host_addr != MAP_FAILED) {
698             /* If new and old addresses overlap then the above mremap will
699                already have failed with EINVAL.  */
700             mmap_reserve(old_addr, old_size);
701         }
702     } else if (flags & MREMAP_MAYMOVE) {
703         abi_ulong mmap_start;
704 
705         mmap_start = mmap_find_vma(0, new_size);
706 
707         if (mmap_start == -1) {
708             errno = ENOMEM;
709             host_addr = MAP_FAILED;
710         } else {
711             host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
712                                          old_size, new_size,
713                                          flags | MREMAP_FIXED,
714                                          g2h(mmap_start));
715             if (reserved_va) {
716                 mmap_reserve(old_addr, old_size);
717             }
718         }
719     } else {
720         int prot = 0;
721         if (reserved_va && old_size < new_size) {
722             abi_ulong addr;
723             for (addr = old_addr + old_size;
724                  addr < old_addr + new_size;
725                  addr++) {
726                 prot |= page_get_flags(addr);
727             }
728         }
729         if (prot == 0) {
730             host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
731             if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
732                 mmap_reserve(old_addr + old_size, new_size - old_size);
733             }
734         } else {
735             errno = ENOMEM;
736             host_addr = MAP_FAILED;
737         }
738         /* Check if address fits target address space */
739         if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
740             /* Revert mremap() changes */
741             host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
742             errno = ENOMEM;
743             host_addr = MAP_FAILED;
744         }
745     }
746 
747     if (host_addr == MAP_FAILED) {
748         new_addr = -1;
749     } else {
750         new_addr = h2g(host_addr);
751         prot = page_get_flags(old_addr);
752         page_set_flags(old_addr, old_addr + old_size, 0);
753         page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
754     }
755     tb_invalidate_phys_range(new_addr, new_addr + new_size);
756     mmap_unlock();
757     return new_addr;
758 }
759 
760 int target_msync(abi_ulong start, abi_ulong len, int flags)
761 {
762     abi_ulong end;
763 
764     if (start & ~TARGET_PAGE_MASK)
765         return -EINVAL;
766     len = TARGET_PAGE_ALIGN(len);
767     end = start + len;
768     if (end < start)
769         return -EINVAL;
770     if (end == start)
771         return 0;
772 
773     start &= qemu_host_page_mask;
774     return msync(g2h(start), end - start, flags);
775 }
776