xref: /openbmc/qemu/linux-user/mmap.c (revision ddbb0d09)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <stdarg.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <sys/mman.h>
28 #include <linux/mman.h>
29 #include <linux/unistd.h>
30 
31 #include "qemu.h"
32 #include "qemu-common.h"
33 #include "translate-all.h"
34 
35 //#define DEBUG_MMAP
36 
37 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38 static __thread int mmap_lock_count;
39 
40 void mmap_lock(void)
41 {
42     if (mmap_lock_count++ == 0) {
43         pthread_mutex_lock(&mmap_mutex);
44     }
45 }
46 
47 void mmap_unlock(void)
48 {
49     if (--mmap_lock_count == 0) {
50         pthread_mutex_unlock(&mmap_mutex);
51     }
52 }
53 
54 /* Grab lock to make sure things are in a consistent state after fork().  */
55 void mmap_fork_start(void)
56 {
57     if (mmap_lock_count)
58         abort();
59     pthread_mutex_lock(&mmap_mutex);
60 }
61 
62 void mmap_fork_end(int child)
63 {
64     if (child)
65         pthread_mutex_init(&mmap_mutex, NULL);
66     else
67         pthread_mutex_unlock(&mmap_mutex);
68 }
69 
70 /* NOTE: all the constants are the HOST ones, but addresses are target. */
71 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
72 {
73     abi_ulong end, host_start, host_end, addr;
74     int prot1, ret;
75 
76 #ifdef DEBUG_MMAP
77     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
78            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
79            prot & PROT_READ ? 'r' : '-',
80            prot & PROT_WRITE ? 'w' : '-',
81            prot & PROT_EXEC ? 'x' : '-');
82 #endif
83 
84     if ((start & ~TARGET_PAGE_MASK) != 0)
85         return -EINVAL;
86     len = TARGET_PAGE_ALIGN(len);
87     end = start + len;
88     if (end < start)
89         return -EINVAL;
90     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
91     if (len == 0)
92         return 0;
93 
94     mmap_lock();
95     host_start = start & qemu_host_page_mask;
96     host_end = HOST_PAGE_ALIGN(end);
97     if (start > host_start) {
98         /* handle host page containing start */
99         prot1 = prot;
100         for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
101             prot1 |= page_get_flags(addr);
102         }
103         if (host_end == host_start + qemu_host_page_size) {
104             for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
105                 prot1 |= page_get_flags(addr);
106             }
107             end = host_end;
108         }
109         ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
110         if (ret != 0)
111             goto error;
112         host_start += qemu_host_page_size;
113     }
114     if (end < host_end) {
115         prot1 = prot;
116         for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
117             prot1 |= page_get_flags(addr);
118         }
119         ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
120                        prot1 & PAGE_BITS);
121         if (ret != 0)
122             goto error;
123         host_end -= qemu_host_page_size;
124     }
125 
126     /* handle the pages in the middle */
127     if (host_start < host_end) {
128         ret = mprotect(g2h(host_start), host_end - host_start, prot);
129         if (ret != 0)
130             goto error;
131     }
132     page_set_flags(start, start + len, prot | PAGE_VALID);
133     mmap_unlock();
134     return 0;
135 error:
136     mmap_unlock();
137     return ret;
138 }
139 
140 /* map an incomplete host page */
141 static int mmap_frag(abi_ulong real_start,
142                      abi_ulong start, abi_ulong end,
143                      int prot, int flags, int fd, abi_ulong offset)
144 {
145     abi_ulong real_end, addr;
146     void *host_start;
147     int prot1, prot_new;
148 
149     real_end = real_start + qemu_host_page_size;
150     host_start = g2h(real_start);
151 
152     /* get the protection of the target pages outside the mapping */
153     prot1 = 0;
154     for(addr = real_start; addr < real_end; addr++) {
155         if (addr < start || addr >= end)
156             prot1 |= page_get_flags(addr);
157     }
158 
159     if (prot1 == 0) {
160         /* no page was there, so we allocate one */
161         void *p = mmap(host_start, qemu_host_page_size, prot,
162                        flags | MAP_ANONYMOUS, -1, 0);
163         if (p == MAP_FAILED)
164             return -1;
165         prot1 = prot;
166     }
167     prot1 &= PAGE_BITS;
168 
169     prot_new = prot | prot1;
170     if (!(flags & MAP_ANONYMOUS)) {
171         /* msync() won't work here, so we return an error if write is
172            possible while it is a shared mapping */
173         if ((flags & MAP_TYPE) == MAP_SHARED &&
174             (prot & PROT_WRITE))
175             return -1;
176 
177         /* adjust protection to be able to read */
178         if (!(prot1 & PROT_WRITE))
179             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
180 
181         /* read the corresponding file data */
182         if (pread(fd, g2h(start), end - start, offset) == -1)
183             return -1;
184 
185         /* put final protection */
186         if (prot_new != (prot1 | PROT_WRITE))
187             mprotect(host_start, qemu_host_page_size, prot_new);
188     } else {
189         /* just update the protection */
190         if (prot_new != prot1) {
191             mprotect(host_start, qemu_host_page_size, prot_new);
192         }
193     }
194     return 0;
195 }
196 
197 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
198 # define TASK_UNMAPPED_BASE  (1ul << 38)
199 #elif defined(__CYGWIN__)
200 /* Cygwin doesn't have a whole lot of address space.  */
201 # define TASK_UNMAPPED_BASE  0x18000000
202 #else
203 # define TASK_UNMAPPED_BASE  0x40000000
204 #endif
205 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
206 
207 unsigned long last_brk;
208 
209 #ifdef CONFIG_USE_GUEST_BASE
210 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
211    of guest address space.  */
212 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
213 {
214     abi_ulong addr;
215     abi_ulong end_addr;
216     int prot;
217     int looped = 0;
218 
219     if (size > RESERVED_VA) {
220         return (abi_ulong)-1;
221     }
222 
223     size = HOST_PAGE_ALIGN(size);
224     end_addr = start + size;
225     if (end_addr > RESERVED_VA) {
226         end_addr = RESERVED_VA;
227     }
228     addr = end_addr - qemu_host_page_size;
229 
230     while (1) {
231         if (addr > end_addr) {
232             if (looped) {
233                 return (abi_ulong)-1;
234             }
235             end_addr = RESERVED_VA;
236             addr = end_addr - qemu_host_page_size;
237             looped = 1;
238             continue;
239         }
240         prot = page_get_flags(addr);
241         if (prot) {
242             end_addr = addr;
243         }
244         if (addr + size == end_addr) {
245             break;
246         }
247         addr -= qemu_host_page_size;
248     }
249 
250     if (start == mmap_next_start) {
251         mmap_next_start = addr;
252     }
253 
254     return addr;
255 }
256 #endif
257 
258 /*
259  * Find and reserve a free memory area of size 'size'. The search
260  * starts at 'start'.
261  * It must be called with mmap_lock() held.
262  * Return -1 if error.
263  */
264 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
265 {
266     void *ptr, *prev;
267     abi_ulong addr;
268     int wrapped, repeat;
269 
270     /* If 'start' == 0, then a default start address is used. */
271     if (start == 0) {
272         start = mmap_next_start;
273     } else {
274         start &= qemu_host_page_mask;
275     }
276 
277     size = HOST_PAGE_ALIGN(size);
278 
279 #ifdef CONFIG_USE_GUEST_BASE
280     if (RESERVED_VA) {
281         return mmap_find_vma_reserved(start, size);
282     }
283 #endif
284 
285     addr = start;
286     wrapped = repeat = 0;
287     prev = 0;
288 
289     for (;; prev = ptr) {
290         /*
291          * Reserve needed memory area to avoid a race.
292          * It should be discarded using:
293          *  - mmap() with MAP_FIXED flag
294          *  - mremap() with MREMAP_FIXED flag
295          *  - shmat() with SHM_REMAP flag
296          */
297         ptr = mmap(g2h(addr), size, PROT_NONE,
298                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
299 
300         /* ENOMEM, if host address space has no memory */
301         if (ptr == MAP_FAILED) {
302             return (abi_ulong)-1;
303         }
304 
305         /* Count the number of sequential returns of the same address.
306            This is used to modify the search algorithm below.  */
307         repeat = (ptr == prev ? repeat + 1 : 0);
308 
309         if (h2g_valid(ptr + size - 1)) {
310             addr = h2g(ptr);
311 
312             if ((addr & ~TARGET_PAGE_MASK) == 0) {
313                 /* Success.  */
314                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
315                     mmap_next_start = addr + size;
316                 }
317                 return addr;
318             }
319 
320             /* The address is not properly aligned for the target.  */
321             switch (repeat) {
322             case 0:
323                 /* Assume the result that the kernel gave us is the
324                    first with enough free space, so start again at the
325                    next higher target page.  */
326                 addr = TARGET_PAGE_ALIGN(addr);
327                 break;
328             case 1:
329                 /* Sometimes the kernel decides to perform the allocation
330                    at the top end of memory instead.  */
331                 addr &= TARGET_PAGE_MASK;
332                 break;
333             case 2:
334                 /* Start over at low memory.  */
335                 addr = 0;
336                 break;
337             default:
338                 /* Fail.  This unaligned block must the last.  */
339                 addr = -1;
340                 break;
341             }
342         } else {
343             /* Since the result the kernel gave didn't fit, start
344                again at low memory.  If any repetition, fail.  */
345             addr = (repeat ? -1 : 0);
346         }
347 
348         /* Unmap and try again.  */
349         munmap(ptr, size);
350 
351         /* ENOMEM if we checked the whole of the target address space.  */
352         if (addr == (abi_ulong)-1) {
353             return (abi_ulong)-1;
354         } else if (addr == 0) {
355             if (wrapped) {
356                 return (abi_ulong)-1;
357             }
358             wrapped = 1;
359             /* Don't actually use 0 when wrapping, instead indicate
360                that we'd truly like an allocation in low memory.  */
361             addr = (mmap_min_addr > TARGET_PAGE_SIZE
362                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
363                      : TARGET_PAGE_SIZE);
364         } else if (wrapped && addr >= start) {
365             return (abi_ulong)-1;
366         }
367     }
368 }
369 
370 /* NOTE: all the constants are the HOST ones */
371 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
372                      int flags, int fd, abi_ulong offset)
373 {
374     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
375 
376     mmap_lock();
377 #ifdef DEBUG_MMAP
378     {
379         printf("mmap: start=0x" TARGET_ABI_FMT_lx
380                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
381                start, len,
382                prot & PROT_READ ? 'r' : '-',
383                prot & PROT_WRITE ? 'w' : '-',
384                prot & PROT_EXEC ? 'x' : '-');
385         if (flags & MAP_FIXED)
386             printf("MAP_FIXED ");
387         if (flags & MAP_ANONYMOUS)
388             printf("MAP_ANON ");
389         switch(flags & MAP_TYPE) {
390         case MAP_PRIVATE:
391             printf("MAP_PRIVATE ");
392             break;
393         case MAP_SHARED:
394             printf("MAP_SHARED ");
395             break;
396         default:
397             printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
398             break;
399         }
400         printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
401     }
402 #endif
403 
404     if (offset & ~TARGET_PAGE_MASK) {
405         errno = EINVAL;
406         goto fail;
407     }
408 
409     len = TARGET_PAGE_ALIGN(len);
410     if (len == 0)
411         goto the_end;
412     real_start = start & qemu_host_page_mask;
413     host_offset = offset & qemu_host_page_mask;
414 
415     /* If the user is asking for the kernel to find a location, do that
416        before we truncate the length for mapping files below.  */
417     if (!(flags & MAP_FIXED)) {
418         host_len = len + offset - host_offset;
419         host_len = HOST_PAGE_ALIGN(host_len);
420         start = mmap_find_vma(real_start, host_len);
421         if (start == (abi_ulong)-1) {
422             errno = ENOMEM;
423             goto fail;
424         }
425     }
426 
427     /* When mapping files into a memory area larger than the file, accesses
428        to pages beyond the file size will cause a SIGBUS.
429 
430        For example, if mmaping a file of 100 bytes on a host with 4K pages
431        emulating a target with 8K pages, the target expects to be able to
432        access the first 8K. But the host will trap us on any access beyond
433        4K.
434 
435        When emulating a target with a larger page-size than the hosts, we
436        may need to truncate file maps at EOF and add extra anonymous pages
437        up to the targets page boundary.  */
438 
439     if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
440         && !(flags & MAP_ANONYMOUS)) {
441        struct stat sb;
442 
443        if (fstat (fd, &sb) == -1)
444            goto fail;
445 
446        /* Are we trying to create a map beyond EOF?.  */
447        if (offset + len > sb.st_size) {
448            /* If so, truncate the file map at eof aligned with
449               the hosts real pagesize. Additional anonymous maps
450               will be created beyond EOF.  */
451            len = (sb.st_size - offset);
452            len += qemu_real_host_page_size - 1;
453            len &= ~(qemu_real_host_page_size - 1);
454        }
455     }
456 
457     if (!(flags & MAP_FIXED)) {
458         unsigned long host_start;
459         void *p;
460 
461         host_len = len + offset - host_offset;
462         host_len = HOST_PAGE_ALIGN(host_len);
463 
464         /* Note: we prefer to control the mapping address. It is
465            especially important if qemu_host_page_size >
466            qemu_real_host_page_size */
467         p = mmap(g2h(start), host_len, prot,
468                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
469         if (p == MAP_FAILED)
470             goto fail;
471         /* update start so that it points to the file position at 'offset' */
472         host_start = (unsigned long)p;
473         if (!(flags & MAP_ANONYMOUS)) {
474             p = mmap(g2h(start), len, prot,
475                      flags | MAP_FIXED, fd, host_offset);
476             if (p == MAP_FAILED) {
477                 munmap(g2h(start), host_len);
478                 goto fail;
479             }
480             host_start += offset - host_offset;
481         }
482         start = h2g(host_start);
483     } else {
484         if (start & ~TARGET_PAGE_MASK) {
485             errno = EINVAL;
486             goto fail;
487         }
488         end = start + len;
489         real_end = HOST_PAGE_ALIGN(end);
490 
491 	/*
492 	 * Test if requested memory area fits target address space
493 	 * It can fail only on 64-bit host with 32-bit target.
494 	 * On any other target/host host mmap() handles this error correctly.
495 	 */
496         if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
497             errno = EINVAL;
498             goto fail;
499         }
500 
501         /* worst case: we cannot map the file because the offset is not
502            aligned, so we read it */
503         if (!(flags & MAP_ANONYMOUS) &&
504             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
505             /* msync() won't work here, so we return an error if write is
506                possible while it is a shared mapping */
507             if ((flags & MAP_TYPE) == MAP_SHARED &&
508                 (prot & PROT_WRITE)) {
509                 errno = EINVAL;
510                 goto fail;
511             }
512             retaddr = target_mmap(start, len, prot | PROT_WRITE,
513                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
514                                   -1, 0);
515             if (retaddr == -1)
516                 goto fail;
517             if (pread(fd, g2h(start), len, offset) == -1)
518                 goto fail;
519             if (!(prot & PROT_WRITE)) {
520                 ret = target_mprotect(start, len, prot);
521                 if (ret != 0) {
522                     start = ret;
523                     goto the_end;
524                 }
525             }
526             goto the_end;
527         }
528 
529         /* handle the start of the mapping */
530         if (start > real_start) {
531             if (real_end == real_start + qemu_host_page_size) {
532                 /* one single host page */
533                 ret = mmap_frag(real_start, start, end,
534                                 prot, flags, fd, offset);
535                 if (ret == -1)
536                     goto fail;
537                 goto the_end1;
538             }
539             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
540                             prot, flags, fd, offset);
541             if (ret == -1)
542                 goto fail;
543             real_start += qemu_host_page_size;
544         }
545         /* handle the end of the mapping */
546         if (end < real_end) {
547             ret = mmap_frag(real_end - qemu_host_page_size,
548                             real_end - qemu_host_page_size, real_end,
549                             prot, flags, fd,
550                             offset + real_end - qemu_host_page_size - start);
551             if (ret == -1)
552                 goto fail;
553             real_end -= qemu_host_page_size;
554         }
555 
556         /* map the middle (easier) */
557         if (real_start < real_end) {
558             void *p;
559             unsigned long offset1;
560             if (flags & MAP_ANONYMOUS)
561                 offset1 = 0;
562             else
563                 offset1 = offset + real_start - start;
564             p = mmap(g2h(real_start), real_end - real_start,
565                      prot, flags, fd, offset1);
566             if (p == MAP_FAILED)
567                 goto fail;
568         }
569     }
570  the_end1:
571     page_set_flags(start, start + len, prot | PAGE_VALID);
572  the_end:
573 #ifdef DEBUG_MMAP
574     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
575     page_dump(stdout);
576     printf("\n");
577 #endif
578     tb_invalidate_phys_range(start, start + len);
579     mmap_unlock();
580     return start;
581 fail:
582     mmap_unlock();
583     return -1;
584 }
585 
586 static void mmap_reserve(abi_ulong start, abi_ulong size)
587 {
588     abi_ulong real_start;
589     abi_ulong real_end;
590     abi_ulong addr;
591     abi_ulong end;
592     int prot;
593 
594     real_start = start & qemu_host_page_mask;
595     real_end = HOST_PAGE_ALIGN(start + size);
596     end = start + size;
597     if (start > real_start) {
598         /* handle host page containing start */
599         prot = 0;
600         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
601             prot |= page_get_flags(addr);
602         }
603         if (real_end == real_start + qemu_host_page_size) {
604             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
605                 prot |= page_get_flags(addr);
606             }
607             end = real_end;
608         }
609         if (prot != 0)
610             real_start += qemu_host_page_size;
611     }
612     if (end < real_end) {
613         prot = 0;
614         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
615             prot |= page_get_flags(addr);
616         }
617         if (prot != 0)
618             real_end -= qemu_host_page_size;
619     }
620     if (real_start != real_end) {
621         mmap(g2h(real_start), real_end - real_start, PROT_NONE,
622                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
623                  -1, 0);
624     }
625 }
626 
627 int target_munmap(abi_ulong start, abi_ulong len)
628 {
629     abi_ulong end, real_start, real_end, addr;
630     int prot, ret;
631 
632 #ifdef DEBUG_MMAP
633     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
634            TARGET_ABI_FMT_lx "\n",
635            start, len);
636 #endif
637     if (start & ~TARGET_PAGE_MASK)
638         return -EINVAL;
639     len = TARGET_PAGE_ALIGN(len);
640     if (len == 0)
641         return -EINVAL;
642     mmap_lock();
643     end = start + len;
644     real_start = start & qemu_host_page_mask;
645     real_end = HOST_PAGE_ALIGN(end);
646 
647     if (start > real_start) {
648         /* handle host page containing start */
649         prot = 0;
650         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
651             prot |= page_get_flags(addr);
652         }
653         if (real_end == real_start + qemu_host_page_size) {
654             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
655                 prot |= page_get_flags(addr);
656             }
657             end = real_end;
658         }
659         if (prot != 0)
660             real_start += qemu_host_page_size;
661     }
662     if (end < real_end) {
663         prot = 0;
664         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
665             prot |= page_get_flags(addr);
666         }
667         if (prot != 0)
668             real_end -= qemu_host_page_size;
669     }
670 
671     ret = 0;
672     /* unmap what we can */
673     if (real_start < real_end) {
674         if (RESERVED_VA) {
675             mmap_reserve(real_start, real_end - real_start);
676         } else {
677             ret = munmap(g2h(real_start), real_end - real_start);
678         }
679     }
680 
681     if (ret == 0) {
682         page_set_flags(start, start + len, 0);
683         tb_invalidate_phys_range(start, start + len);
684     }
685     mmap_unlock();
686     return ret;
687 }
688 
689 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
690                        abi_ulong new_size, unsigned long flags,
691                        abi_ulong new_addr)
692 {
693     int prot;
694     void *host_addr;
695 
696     mmap_lock();
697 
698     if (flags & MREMAP_FIXED) {
699         host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
700                                      old_size, new_size,
701                                      flags,
702                                      g2h(new_addr));
703 
704         if (RESERVED_VA && host_addr != MAP_FAILED) {
705             /* If new and old addresses overlap then the above mremap will
706                already have failed with EINVAL.  */
707             mmap_reserve(old_addr, old_size);
708         }
709     } else if (flags & MREMAP_MAYMOVE) {
710         abi_ulong mmap_start;
711 
712         mmap_start = mmap_find_vma(0, new_size);
713 
714         if (mmap_start == -1) {
715             errno = ENOMEM;
716             host_addr = MAP_FAILED;
717         } else {
718             host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
719                                          old_size, new_size,
720                                          flags | MREMAP_FIXED,
721                                          g2h(mmap_start));
722             if ( RESERVED_VA ) {
723                 mmap_reserve(old_addr, old_size);
724             }
725         }
726     } else {
727         int prot = 0;
728         if (RESERVED_VA && old_size < new_size) {
729             abi_ulong addr;
730             for (addr = old_addr + old_size;
731                  addr < old_addr + new_size;
732                  addr++) {
733                 prot |= page_get_flags(addr);
734             }
735         }
736         if (prot == 0) {
737             host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
738             if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
739                 mmap_reserve(old_addr + old_size, new_size - old_size);
740             }
741         } else {
742             errno = ENOMEM;
743             host_addr = MAP_FAILED;
744         }
745         /* Check if address fits target address space */
746         if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
747             /* Revert mremap() changes */
748             host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
749             errno = ENOMEM;
750             host_addr = MAP_FAILED;
751         }
752     }
753 
754     if (host_addr == MAP_FAILED) {
755         new_addr = -1;
756     } else {
757         new_addr = h2g(host_addr);
758         prot = page_get_flags(old_addr);
759         page_set_flags(old_addr, old_addr + old_size, 0);
760         page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
761     }
762     tb_invalidate_phys_range(new_addr, new_addr + new_size);
763     mmap_unlock();
764     return new_addr;
765 }
766 
767 int target_msync(abi_ulong start, abi_ulong len, int flags)
768 {
769     abi_ulong end;
770 
771     if (start & ~TARGET_PAGE_MASK)
772         return -EINVAL;
773     len = TARGET_PAGE_ALIGN(len);
774     end = start + len;
775     if (end < start)
776         return -EINVAL;
777     if (end == start)
778         return 0;
779 
780     start &= qemu_host_page_mask;
781     return msync(g2h(start), end - start, flags);
782 }
783