xref: /openbmc/qemu/linux-user/mmap.c (revision db1b58e9)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <stdarg.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <sys/mman.h>
28 #include <linux/mman.h>
29 #include <linux/unistd.h>
30 
31 #include "qemu.h"
32 #include "qemu-common.h"
33 
34 //#define DEBUG_MMAP
35 
36 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
37 static __thread int mmap_lock_count;
38 
39 void mmap_lock(void)
40 {
41     if (mmap_lock_count++ == 0) {
42         pthread_mutex_lock(&mmap_mutex);
43     }
44 }
45 
46 void mmap_unlock(void)
47 {
48     if (--mmap_lock_count == 0) {
49         pthread_mutex_unlock(&mmap_mutex);
50     }
51 }
52 
53 /* Grab lock to make sure things are in a consistent state after fork().  */
54 void mmap_fork_start(void)
55 {
56     if (mmap_lock_count)
57         abort();
58     pthread_mutex_lock(&mmap_mutex);
59 }
60 
61 void mmap_fork_end(int child)
62 {
63     if (child)
64         pthread_mutex_init(&mmap_mutex, NULL);
65     else
66         pthread_mutex_unlock(&mmap_mutex);
67 }
68 
69 /* NOTE: all the constants are the HOST ones, but addresses are target. */
70 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
71 {
72     abi_ulong end, host_start, host_end, addr;
73     int prot1, ret;
74 
75 #ifdef DEBUG_MMAP
76     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
77            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
78            prot & PROT_READ ? 'r' : '-',
79            prot & PROT_WRITE ? 'w' : '-',
80            prot & PROT_EXEC ? 'x' : '-');
81 #endif
82 
83     if ((start & ~TARGET_PAGE_MASK) != 0)
84         return -EINVAL;
85     len = TARGET_PAGE_ALIGN(len);
86     end = start + len;
87     if (end < start)
88         return -EINVAL;
89     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
90     if (len == 0)
91         return 0;
92 
93     mmap_lock();
94     host_start = start & qemu_host_page_mask;
95     host_end = HOST_PAGE_ALIGN(end);
96     if (start > host_start) {
97         /* handle host page containing start */
98         prot1 = prot;
99         for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
100             prot1 |= page_get_flags(addr);
101         }
102         if (host_end == host_start + qemu_host_page_size) {
103             for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
104                 prot1 |= page_get_flags(addr);
105             }
106             end = host_end;
107         }
108         ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
109         if (ret != 0)
110             goto error;
111         host_start += qemu_host_page_size;
112     }
113     if (end < host_end) {
114         prot1 = prot;
115         for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
116             prot1 |= page_get_flags(addr);
117         }
118         ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
119                        prot1 & PAGE_BITS);
120         if (ret != 0)
121             goto error;
122         host_end -= qemu_host_page_size;
123     }
124 
125     /* handle the pages in the middle */
126     if (host_start < host_end) {
127         ret = mprotect(g2h(host_start), host_end - host_start, prot);
128         if (ret != 0)
129             goto error;
130     }
131     page_set_flags(start, start + len, prot | PAGE_VALID);
132     mmap_unlock();
133     return 0;
134 error:
135     mmap_unlock();
136     return ret;
137 }
138 
139 /* map an incomplete host page */
140 static int mmap_frag(abi_ulong real_start,
141                      abi_ulong start, abi_ulong end,
142                      int prot, int flags, int fd, abi_ulong offset)
143 {
144     abi_ulong real_end, addr;
145     void *host_start;
146     int prot1, prot_new;
147 
148     real_end = real_start + qemu_host_page_size;
149     host_start = g2h(real_start);
150 
151     /* get the protection of the target pages outside the mapping */
152     prot1 = 0;
153     for(addr = real_start; addr < real_end; addr++) {
154         if (addr < start || addr >= end)
155             prot1 |= page_get_flags(addr);
156     }
157 
158     if (prot1 == 0) {
159         /* no page was there, so we allocate one */
160         void *p = mmap(host_start, qemu_host_page_size, prot,
161                        flags | MAP_ANONYMOUS, -1, 0);
162         if (p == MAP_FAILED)
163             return -1;
164         prot1 = prot;
165     }
166     prot1 &= PAGE_BITS;
167 
168     prot_new = prot | prot1;
169     if (!(flags & MAP_ANONYMOUS)) {
170         /* msync() won't work here, so we return an error if write is
171            possible while it is a shared mapping */
172         if ((flags & MAP_TYPE) == MAP_SHARED &&
173             (prot & PROT_WRITE))
174             return -1;
175 
176         /* adjust protection to be able to read */
177         if (!(prot1 & PROT_WRITE))
178             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
179 
180         /* read the corresponding file data */
181         if (pread(fd, g2h(start), end - start, offset) == -1)
182             return -1;
183 
184         /* put final protection */
185         if (prot_new != (prot1 | PROT_WRITE))
186             mprotect(host_start, qemu_host_page_size, prot_new);
187     } else {
188         /* just update the protection */
189         if (prot_new != prot1) {
190             mprotect(host_start, qemu_host_page_size, prot_new);
191         }
192     }
193     return 0;
194 }
195 
196 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
197 # define TASK_UNMAPPED_BASE  (1ul << 38)
198 #elif defined(__CYGWIN__)
199 /* Cygwin doesn't have a whole lot of address space.  */
200 # define TASK_UNMAPPED_BASE  0x18000000
201 #else
202 # define TASK_UNMAPPED_BASE  0x40000000
203 #endif
204 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
205 
206 unsigned long last_brk;
207 
208 #ifdef CONFIG_USE_GUEST_BASE
209 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
210    of guest address space.  */
211 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
212 {
213     abi_ulong addr;
214     abi_ulong end_addr;
215     int prot;
216     int looped = 0;
217 
218     if (size > RESERVED_VA) {
219         return (abi_ulong)-1;
220     }
221 
222     size = HOST_PAGE_ALIGN(size);
223     end_addr = start + size;
224     if (end_addr > RESERVED_VA) {
225         end_addr = RESERVED_VA;
226     }
227     addr = end_addr - qemu_host_page_size;
228 
229     while (1) {
230         if (addr > end_addr) {
231             if (looped) {
232                 return (abi_ulong)-1;
233             }
234             end_addr = RESERVED_VA;
235             addr = end_addr - qemu_host_page_size;
236             looped = 1;
237             continue;
238         }
239         prot = page_get_flags(addr);
240         if (prot) {
241             end_addr = addr;
242         }
243         if (addr + size == end_addr) {
244             break;
245         }
246         addr -= qemu_host_page_size;
247     }
248 
249     if (start == mmap_next_start) {
250         mmap_next_start = addr;
251     }
252 
253     return addr;
254 }
255 #endif
256 
257 /*
258  * Find and reserve a free memory area of size 'size'. The search
259  * starts at 'start'.
260  * It must be called with mmap_lock() held.
261  * Return -1 if error.
262  */
263 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
264 {
265     void *ptr, *prev;
266     abi_ulong addr;
267     int wrapped, repeat;
268 
269     /* If 'start' == 0, then a default start address is used. */
270     if (start == 0) {
271         start = mmap_next_start;
272     } else {
273         start &= qemu_host_page_mask;
274     }
275 
276     size = HOST_PAGE_ALIGN(size);
277 
278 #ifdef CONFIG_USE_GUEST_BASE
279     if (RESERVED_VA) {
280         return mmap_find_vma_reserved(start, size);
281     }
282 #endif
283 
284     addr = start;
285     wrapped = repeat = 0;
286     prev = 0;
287 
288     for (;; prev = ptr) {
289         /*
290          * Reserve needed memory area to avoid a race.
291          * It should be discarded using:
292          *  - mmap() with MAP_FIXED flag
293          *  - mremap() with MREMAP_FIXED flag
294          *  - shmat() with SHM_REMAP flag
295          */
296         ptr = mmap(g2h(addr), size, PROT_NONE,
297                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
298 
299         /* ENOMEM, if host address space has no memory */
300         if (ptr == MAP_FAILED) {
301             return (abi_ulong)-1;
302         }
303 
304         /* Count the number of sequential returns of the same address.
305            This is used to modify the search algorithm below.  */
306         repeat = (ptr == prev ? repeat + 1 : 0);
307 
308         if (h2g_valid(ptr + size - 1)) {
309             addr = h2g(ptr);
310 
311             if ((addr & ~TARGET_PAGE_MASK) == 0) {
312                 /* Success.  */
313                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
314                     mmap_next_start = addr + size;
315                 }
316                 return addr;
317             }
318 
319             /* The address is not properly aligned for the target.  */
320             switch (repeat) {
321             case 0:
322                 /* Assume the result that the kernel gave us is the
323                    first with enough free space, so start again at the
324                    next higher target page.  */
325                 addr = TARGET_PAGE_ALIGN(addr);
326                 break;
327             case 1:
328                 /* Sometimes the kernel decides to perform the allocation
329                    at the top end of memory instead.  */
330                 addr &= TARGET_PAGE_MASK;
331                 break;
332             case 2:
333                 /* Start over at low memory.  */
334                 addr = 0;
335                 break;
336             default:
337                 /* Fail.  This unaligned block must the last.  */
338                 addr = -1;
339                 break;
340             }
341         } else {
342             /* Since the result the kernel gave didn't fit, start
343                again at low memory.  If any repetition, fail.  */
344             addr = (repeat ? -1 : 0);
345         }
346 
347         /* Unmap and try again.  */
348         munmap(ptr, size);
349 
350         /* ENOMEM if we checked the whole of the target address space.  */
351         if (addr == (abi_ulong)-1) {
352             return (abi_ulong)-1;
353         } else if (addr == 0) {
354             if (wrapped) {
355                 return (abi_ulong)-1;
356             }
357             wrapped = 1;
358             /* Don't actually use 0 when wrapping, instead indicate
359                that we'd truly like an allocation in low memory.  */
360             addr = (mmap_min_addr > TARGET_PAGE_SIZE
361                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
362                      : TARGET_PAGE_SIZE);
363         } else if (wrapped && addr >= start) {
364             return (abi_ulong)-1;
365         }
366     }
367 }
368 
369 /* NOTE: all the constants are the HOST ones */
370 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
371                      int flags, int fd, abi_ulong offset)
372 {
373     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
374 
375     mmap_lock();
376 #ifdef DEBUG_MMAP
377     {
378         printf("mmap: start=0x" TARGET_ABI_FMT_lx
379                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
380                start, len,
381                prot & PROT_READ ? 'r' : '-',
382                prot & PROT_WRITE ? 'w' : '-',
383                prot & PROT_EXEC ? 'x' : '-');
384         if (flags & MAP_FIXED)
385             printf("MAP_FIXED ");
386         if (flags & MAP_ANONYMOUS)
387             printf("MAP_ANON ");
388         switch(flags & MAP_TYPE) {
389         case MAP_PRIVATE:
390             printf("MAP_PRIVATE ");
391             break;
392         case MAP_SHARED:
393             printf("MAP_SHARED ");
394             break;
395         default:
396             printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
397             break;
398         }
399         printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
400     }
401 #endif
402 
403     if (offset & ~TARGET_PAGE_MASK) {
404         errno = EINVAL;
405         goto fail;
406     }
407 
408     len = TARGET_PAGE_ALIGN(len);
409     if (len == 0)
410         goto the_end;
411     real_start = start & qemu_host_page_mask;
412     host_offset = offset & qemu_host_page_mask;
413 
414     /* If the user is asking for the kernel to find a location, do that
415        before we truncate the length for mapping files below.  */
416     if (!(flags & MAP_FIXED)) {
417         host_len = len + offset - host_offset;
418         host_len = HOST_PAGE_ALIGN(host_len);
419         start = mmap_find_vma(real_start, host_len);
420         if (start == (abi_ulong)-1) {
421             errno = ENOMEM;
422             goto fail;
423         }
424     }
425 
426     /* When mapping files into a memory area larger than the file, accesses
427        to pages beyond the file size will cause a SIGBUS.
428 
429        For example, if mmaping a file of 100 bytes on a host with 4K pages
430        emulating a target with 8K pages, the target expects to be able to
431        access the first 8K. But the host will trap us on any access beyond
432        4K.
433 
434        When emulating a target with a larger page-size than the hosts, we
435        may need to truncate file maps at EOF and add extra anonymous pages
436        up to the targets page boundary.  */
437 
438     if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
439         && !(flags & MAP_ANONYMOUS)) {
440        struct stat sb;
441 
442        if (fstat (fd, &sb) == -1)
443            goto fail;
444 
445        /* Are we trying to create a map beyond EOF?.  */
446        if (offset + len > sb.st_size) {
447            /* If so, truncate the file map at eof aligned with
448               the hosts real pagesize. Additional anonymous maps
449               will be created beyond EOF.  */
450            len = (sb.st_size - offset);
451            len += qemu_real_host_page_size - 1;
452            len &= ~(qemu_real_host_page_size - 1);
453        }
454     }
455 
456     if (!(flags & MAP_FIXED)) {
457         unsigned long host_start;
458         void *p;
459 
460         host_len = len + offset - host_offset;
461         host_len = HOST_PAGE_ALIGN(host_len);
462 
463         /* Note: we prefer to control the mapping address. It is
464            especially important if qemu_host_page_size >
465            qemu_real_host_page_size */
466         p = mmap(g2h(start), host_len, prot,
467                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
468         if (p == MAP_FAILED)
469             goto fail;
470         /* update start so that it points to the file position at 'offset' */
471         host_start = (unsigned long)p;
472         if (!(flags & MAP_ANONYMOUS)) {
473             p = mmap(g2h(start), len, prot,
474                      flags | MAP_FIXED, fd, host_offset);
475             if (p == MAP_FAILED) {
476                 munmap(g2h(start), host_len);
477                 goto fail;
478             }
479             host_start += offset - host_offset;
480         }
481         start = h2g(host_start);
482     } else {
483         if (start & ~TARGET_PAGE_MASK) {
484             errno = EINVAL;
485             goto fail;
486         }
487         end = start + len;
488         real_end = HOST_PAGE_ALIGN(end);
489 
490 	/*
491 	 * Test if requested memory area fits target address space
492 	 * It can fail only on 64-bit host with 32-bit target.
493 	 * On any other target/host host mmap() handles this error correctly.
494 	 */
495         if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
496             errno = EINVAL;
497             goto fail;
498         }
499 
500         /* worst case: we cannot map the file because the offset is not
501            aligned, so we read it */
502         if (!(flags & MAP_ANONYMOUS) &&
503             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
504             /* msync() won't work here, so we return an error if write is
505                possible while it is a shared mapping */
506             if ((flags & MAP_TYPE) == MAP_SHARED &&
507                 (prot & PROT_WRITE)) {
508                 errno = EINVAL;
509                 goto fail;
510             }
511             retaddr = target_mmap(start, len, prot | PROT_WRITE,
512                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
513                                   -1, 0);
514             if (retaddr == -1)
515                 goto fail;
516             if (pread(fd, g2h(start), len, offset) == -1)
517                 goto fail;
518             if (!(prot & PROT_WRITE)) {
519                 ret = target_mprotect(start, len, prot);
520                 if (ret != 0) {
521                     start = ret;
522                     goto the_end;
523                 }
524             }
525             goto the_end;
526         }
527 
528         /* handle the start of the mapping */
529         if (start > real_start) {
530             if (real_end == real_start + qemu_host_page_size) {
531                 /* one single host page */
532                 ret = mmap_frag(real_start, start, end,
533                                 prot, flags, fd, offset);
534                 if (ret == -1)
535                     goto fail;
536                 goto the_end1;
537             }
538             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
539                             prot, flags, fd, offset);
540             if (ret == -1)
541                 goto fail;
542             real_start += qemu_host_page_size;
543         }
544         /* handle the end of the mapping */
545         if (end < real_end) {
546             ret = mmap_frag(real_end - qemu_host_page_size,
547                             real_end - qemu_host_page_size, real_end,
548                             prot, flags, fd,
549                             offset + real_end - qemu_host_page_size - start);
550             if (ret == -1)
551                 goto fail;
552             real_end -= qemu_host_page_size;
553         }
554 
555         /* map the middle (easier) */
556         if (real_start < real_end) {
557             void *p;
558             unsigned long offset1;
559             if (flags & MAP_ANONYMOUS)
560                 offset1 = 0;
561             else
562                 offset1 = offset + real_start - start;
563             p = mmap(g2h(real_start), real_end - real_start,
564                      prot, flags, fd, offset1);
565             if (p == MAP_FAILED)
566                 goto fail;
567         }
568     }
569  the_end1:
570     page_set_flags(start, start + len, prot | PAGE_VALID);
571  the_end:
572 #ifdef DEBUG_MMAP
573     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
574     page_dump(stdout);
575     printf("\n");
576 #endif
577     tb_invalidate_phys_range(start, start + len, 0);
578     mmap_unlock();
579     return start;
580 fail:
581     mmap_unlock();
582     return -1;
583 }
584 
585 static void mmap_reserve(abi_ulong start, abi_ulong size)
586 {
587     abi_ulong real_start;
588     abi_ulong real_end;
589     abi_ulong addr;
590     abi_ulong end;
591     int prot;
592 
593     real_start = start & qemu_host_page_mask;
594     real_end = HOST_PAGE_ALIGN(start + size);
595     end = start + size;
596     if (start > real_start) {
597         /* handle host page containing start */
598         prot = 0;
599         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
600             prot |= page_get_flags(addr);
601         }
602         if (real_end == real_start + qemu_host_page_size) {
603             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
604                 prot |= page_get_flags(addr);
605             }
606             end = real_end;
607         }
608         if (prot != 0)
609             real_start += qemu_host_page_size;
610     }
611     if (end < real_end) {
612         prot = 0;
613         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
614             prot |= page_get_flags(addr);
615         }
616         if (prot != 0)
617             real_end -= qemu_host_page_size;
618     }
619     if (real_start != real_end) {
620         mmap(g2h(real_start), real_end - real_start, PROT_NONE,
621                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
622                  -1, 0);
623     }
624 }
625 
626 int target_munmap(abi_ulong start, abi_ulong len)
627 {
628     abi_ulong end, real_start, real_end, addr;
629     int prot, ret;
630 
631 #ifdef DEBUG_MMAP
632     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
633            TARGET_ABI_FMT_lx "\n",
634            start, len);
635 #endif
636     if (start & ~TARGET_PAGE_MASK)
637         return -EINVAL;
638     len = TARGET_PAGE_ALIGN(len);
639     if (len == 0)
640         return -EINVAL;
641     mmap_lock();
642     end = start + len;
643     real_start = start & qemu_host_page_mask;
644     real_end = HOST_PAGE_ALIGN(end);
645 
646     if (start > real_start) {
647         /* handle host page containing start */
648         prot = 0;
649         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
650             prot |= page_get_flags(addr);
651         }
652         if (real_end == real_start + qemu_host_page_size) {
653             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
654                 prot |= page_get_flags(addr);
655             }
656             end = real_end;
657         }
658         if (prot != 0)
659             real_start += qemu_host_page_size;
660     }
661     if (end < real_end) {
662         prot = 0;
663         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
664             prot |= page_get_flags(addr);
665         }
666         if (prot != 0)
667             real_end -= qemu_host_page_size;
668     }
669 
670     ret = 0;
671     /* unmap what we can */
672     if (real_start < real_end) {
673         if (RESERVED_VA) {
674             mmap_reserve(real_start, real_end - real_start);
675         } else {
676             ret = munmap(g2h(real_start), real_end - real_start);
677         }
678     }
679 
680     if (ret == 0) {
681         page_set_flags(start, start + len, 0);
682         tb_invalidate_phys_range(start, start + len, 0);
683     }
684     mmap_unlock();
685     return ret;
686 }
687 
688 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
689                        abi_ulong new_size, unsigned long flags,
690                        abi_ulong new_addr)
691 {
692     int prot;
693     void *host_addr;
694 
695     mmap_lock();
696 
697     if (flags & MREMAP_FIXED) {
698         host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
699                                      old_size, new_size,
700                                      flags,
701                                      g2h(new_addr));
702 
703         if (RESERVED_VA && host_addr != MAP_FAILED) {
704             /* If new and old addresses overlap then the above mremap will
705                already have failed with EINVAL.  */
706             mmap_reserve(old_addr, old_size);
707         }
708     } else if (flags & MREMAP_MAYMOVE) {
709         abi_ulong mmap_start;
710 
711         mmap_start = mmap_find_vma(0, new_size);
712 
713         if (mmap_start == -1) {
714             errno = ENOMEM;
715             host_addr = MAP_FAILED;
716         } else {
717             host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
718                                          old_size, new_size,
719                                          flags | MREMAP_FIXED,
720                                          g2h(mmap_start));
721             if ( RESERVED_VA ) {
722                 mmap_reserve(old_addr, old_size);
723             }
724         }
725     } else {
726         int prot = 0;
727         if (RESERVED_VA && old_size < new_size) {
728             abi_ulong addr;
729             for (addr = old_addr + old_size;
730                  addr < old_addr + new_size;
731                  addr++) {
732                 prot |= page_get_flags(addr);
733             }
734         }
735         if (prot == 0) {
736             host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
737             if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
738                 mmap_reserve(old_addr + old_size, new_size - old_size);
739             }
740         } else {
741             errno = ENOMEM;
742             host_addr = MAP_FAILED;
743         }
744         /* Check if address fits target address space */
745         if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
746             /* Revert mremap() changes */
747             host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
748             errno = ENOMEM;
749             host_addr = MAP_FAILED;
750         }
751     }
752 
753     if (host_addr == MAP_FAILED) {
754         new_addr = -1;
755     } else {
756         new_addr = h2g(host_addr);
757         prot = page_get_flags(old_addr);
758         page_set_flags(old_addr, old_addr + old_size, 0);
759         page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
760     }
761     tb_invalidate_phys_range(new_addr, new_addr + new_size, 0);
762     mmap_unlock();
763     return new_addr;
764 }
765 
766 int target_msync(abi_ulong start, abi_ulong len, int flags)
767 {
768     abi_ulong end;
769 
770     if (start & ~TARGET_PAGE_MASK)
771         return -EINVAL;
772     len = TARGET_PAGE_ALIGN(len);
773     end = start + len;
774     if (end < start)
775         return -EINVAL;
776     if (end == start)
777         return 0;
778 
779     start &= qemu_host_page_mask;
780     return msync(g2h(start), end - start, flags);
781 }
782