xref: /openbmc/qemu/linux-user/mmap.c (revision 56983463)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <stdarg.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <sys/types.h>
26 #include <sys/stat.h>
27 #include <sys/mman.h>
28 #include <linux/mman.h>
29 #include <linux/unistd.h>
30 
31 #include "qemu.h"
32 #include "qemu-common.h"
33 
34 //#define DEBUG_MMAP
35 
36 #if defined(CONFIG_USE_NPTL)
37 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38 static __thread int mmap_lock_count;
39 
40 void mmap_lock(void)
41 {
42     if (mmap_lock_count++ == 0) {
43         pthread_mutex_lock(&mmap_mutex);
44     }
45 }
46 
47 void mmap_unlock(void)
48 {
49     if (--mmap_lock_count == 0) {
50         pthread_mutex_unlock(&mmap_mutex);
51     }
52 }
53 
54 /* Grab lock to make sure things are in a consistent state after fork().  */
55 void mmap_fork_start(void)
56 {
57     if (mmap_lock_count)
58         abort();
59     pthread_mutex_lock(&mmap_mutex);
60 }
61 
62 void mmap_fork_end(int child)
63 {
64     if (child)
65         pthread_mutex_init(&mmap_mutex, NULL);
66     else
67         pthread_mutex_unlock(&mmap_mutex);
68 }
69 #else
70 /* We aren't threadsafe to start with, so no need to worry about locking.  */
71 void mmap_lock(void)
72 {
73 }
74 
75 void mmap_unlock(void)
76 {
77 }
78 #endif
79 
80 /* NOTE: all the constants are the HOST ones, but addresses are target. */
81 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
82 {
83     abi_ulong end, host_start, host_end, addr;
84     int prot1, ret;
85 
86 #ifdef DEBUG_MMAP
87     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
88            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
89            prot & PROT_READ ? 'r' : '-',
90            prot & PROT_WRITE ? 'w' : '-',
91            prot & PROT_EXEC ? 'x' : '-');
92 #endif
93 
94     if ((start & ~TARGET_PAGE_MASK) != 0)
95         return -EINVAL;
96     len = TARGET_PAGE_ALIGN(len);
97     end = start + len;
98     if (end < start)
99         return -EINVAL;
100     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
101     if (len == 0)
102         return 0;
103 
104     mmap_lock();
105     host_start = start & qemu_host_page_mask;
106     host_end = HOST_PAGE_ALIGN(end);
107     if (start > host_start) {
108         /* handle host page containing start */
109         prot1 = prot;
110         for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
111             prot1 |= page_get_flags(addr);
112         }
113         if (host_end == host_start + qemu_host_page_size) {
114             for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
115                 prot1 |= page_get_flags(addr);
116             }
117             end = host_end;
118         }
119         ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
120         if (ret != 0)
121             goto error;
122         host_start += qemu_host_page_size;
123     }
124     if (end < host_end) {
125         prot1 = prot;
126         for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
127             prot1 |= page_get_flags(addr);
128         }
129         ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
130                        prot1 & PAGE_BITS);
131         if (ret != 0)
132             goto error;
133         host_end -= qemu_host_page_size;
134     }
135 
136     /* handle the pages in the middle */
137     if (host_start < host_end) {
138         ret = mprotect(g2h(host_start), host_end - host_start, prot);
139         if (ret != 0)
140             goto error;
141     }
142     page_set_flags(start, start + len, prot | PAGE_VALID);
143     mmap_unlock();
144     return 0;
145 error:
146     mmap_unlock();
147     return ret;
148 }
149 
150 /* map an incomplete host page */
151 static int mmap_frag(abi_ulong real_start,
152                      abi_ulong start, abi_ulong end,
153                      int prot, int flags, int fd, abi_ulong offset)
154 {
155     abi_ulong real_end, addr;
156     void *host_start;
157     int prot1, prot_new;
158 
159     real_end = real_start + qemu_host_page_size;
160     host_start = g2h(real_start);
161 
162     /* get the protection of the target pages outside the mapping */
163     prot1 = 0;
164     for(addr = real_start; addr < real_end; addr++) {
165         if (addr < start || addr >= end)
166             prot1 |= page_get_flags(addr);
167     }
168 
169     if (prot1 == 0) {
170         /* no page was there, so we allocate one */
171         void *p = mmap(host_start, qemu_host_page_size, prot,
172                        flags | MAP_ANONYMOUS, -1, 0);
173         if (p == MAP_FAILED)
174             return -1;
175         prot1 = prot;
176     }
177     prot1 &= PAGE_BITS;
178 
179     prot_new = prot | prot1;
180     if (!(flags & MAP_ANONYMOUS)) {
181         /* msync() won't work here, so we return an error if write is
182            possible while it is a shared mapping */
183         if ((flags & MAP_TYPE) == MAP_SHARED &&
184             (prot & PROT_WRITE))
185             return -1;
186 
187         /* adjust protection to be able to read */
188         if (!(prot1 & PROT_WRITE))
189             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
190 
191         /* read the corresponding file data */
192         if (pread(fd, g2h(start), end - start, offset) == -1)
193             return -1;
194 
195         /* put final protection */
196         if (prot_new != (prot1 | PROT_WRITE))
197             mprotect(host_start, qemu_host_page_size, prot_new);
198     } else {
199         /* just update the protection */
200         if (prot_new != prot1) {
201             mprotect(host_start, qemu_host_page_size, prot_new);
202         }
203     }
204     return 0;
205 }
206 
207 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
208 # define TASK_UNMAPPED_BASE  (1ul << 38)
209 #elif defined(__CYGWIN__)
210 /* Cygwin doesn't have a whole lot of address space.  */
211 # define TASK_UNMAPPED_BASE  0x18000000
212 #else
213 # define TASK_UNMAPPED_BASE  0x40000000
214 #endif
215 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
216 
217 unsigned long last_brk;
218 
219 #ifdef CONFIG_USE_GUEST_BASE
220 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
221    of guest address space.  */
222 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
223 {
224     abi_ulong addr;
225     abi_ulong end_addr;
226     int prot;
227     int looped = 0;
228 
229     if (size > RESERVED_VA) {
230         return (abi_ulong)-1;
231     }
232 
233     size = HOST_PAGE_ALIGN(size);
234     end_addr = start + size;
235     if (end_addr > RESERVED_VA) {
236         end_addr = RESERVED_VA;
237     }
238     addr = end_addr - qemu_host_page_size;
239 
240     while (1) {
241         if (addr > end_addr) {
242             if (looped) {
243                 return (abi_ulong)-1;
244             }
245             end_addr = RESERVED_VA;
246             addr = end_addr - qemu_host_page_size;
247             looped = 1;
248             continue;
249         }
250         prot = page_get_flags(addr);
251         if (prot) {
252             end_addr = addr;
253         }
254         if (addr + size == end_addr) {
255             break;
256         }
257         addr -= qemu_host_page_size;
258     }
259 
260     if (start == mmap_next_start) {
261         mmap_next_start = addr;
262     }
263 
264     return addr;
265 }
266 #endif
267 
268 /*
269  * Find and reserve a free memory area of size 'size'. The search
270  * starts at 'start'.
271  * It must be called with mmap_lock() held.
272  * Return -1 if error.
273  */
274 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
275 {
276     void *ptr, *prev;
277     abi_ulong addr;
278     int wrapped, repeat;
279 
280     /* If 'start' == 0, then a default start address is used. */
281     if (start == 0) {
282         start = mmap_next_start;
283     } else {
284         start &= qemu_host_page_mask;
285     }
286 
287     size = HOST_PAGE_ALIGN(size);
288 
289 #ifdef CONFIG_USE_GUEST_BASE
290     if (RESERVED_VA) {
291         return mmap_find_vma_reserved(start, size);
292     }
293 #endif
294 
295     addr = start;
296     wrapped = repeat = 0;
297     prev = 0;
298 
299     for (;; prev = ptr) {
300         /*
301          * Reserve needed memory area to avoid a race.
302          * It should be discarded using:
303          *  - mmap() with MAP_FIXED flag
304          *  - mremap() with MREMAP_FIXED flag
305          *  - shmat() with SHM_REMAP flag
306          */
307         ptr = mmap(g2h(addr), size, PROT_NONE,
308                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
309 
310         /* ENOMEM, if host address space has no memory */
311         if (ptr == MAP_FAILED) {
312             return (abi_ulong)-1;
313         }
314 
315         /* Count the number of sequential returns of the same address.
316            This is used to modify the search algorithm below.  */
317         repeat = (ptr == prev ? repeat + 1 : 0);
318 
319         if (h2g_valid(ptr + size - 1)) {
320             addr = h2g(ptr);
321 
322             if ((addr & ~TARGET_PAGE_MASK) == 0) {
323                 /* Success.  */
324                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
325                     mmap_next_start = addr + size;
326                 }
327                 return addr;
328             }
329 
330             /* The address is not properly aligned for the target.  */
331             switch (repeat) {
332             case 0:
333                 /* Assume the result that the kernel gave us is the
334                    first with enough free space, so start again at the
335                    next higher target page.  */
336                 addr = TARGET_PAGE_ALIGN(addr);
337                 break;
338             case 1:
339                 /* Sometimes the kernel decides to perform the allocation
340                    at the top end of memory instead.  */
341                 addr &= TARGET_PAGE_MASK;
342                 break;
343             case 2:
344                 /* Start over at low memory.  */
345                 addr = 0;
346                 break;
347             default:
348                 /* Fail.  This unaligned block must the last.  */
349                 addr = -1;
350                 break;
351             }
352         } else {
353             /* Since the result the kernel gave didn't fit, start
354                again at low memory.  If any repetition, fail.  */
355             addr = (repeat ? -1 : 0);
356         }
357 
358         /* Unmap and try again.  */
359         munmap(ptr, size);
360 
361         /* ENOMEM if we checked the whole of the target address space.  */
362         if (addr == (abi_ulong)-1) {
363             return (abi_ulong)-1;
364         } else if (addr == 0) {
365             if (wrapped) {
366                 return (abi_ulong)-1;
367             }
368             wrapped = 1;
369             /* Don't actually use 0 when wrapping, instead indicate
370                that we'd truly like an allocation in low memory.  */
371             addr = (mmap_min_addr > TARGET_PAGE_SIZE
372                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
373                      : TARGET_PAGE_SIZE);
374         } else if (wrapped && addr >= start) {
375             return (abi_ulong)-1;
376         }
377     }
378 }
379 
380 /* NOTE: all the constants are the HOST ones */
381 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
382                      int flags, int fd, abi_ulong offset)
383 {
384     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
385 
386     mmap_lock();
387 #ifdef DEBUG_MMAP
388     {
389         printf("mmap: start=0x" TARGET_ABI_FMT_lx
390                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
391                start, len,
392                prot & PROT_READ ? 'r' : '-',
393                prot & PROT_WRITE ? 'w' : '-',
394                prot & PROT_EXEC ? 'x' : '-');
395         if (flags & MAP_FIXED)
396             printf("MAP_FIXED ");
397         if (flags & MAP_ANONYMOUS)
398             printf("MAP_ANON ");
399         switch(flags & MAP_TYPE) {
400         case MAP_PRIVATE:
401             printf("MAP_PRIVATE ");
402             break;
403         case MAP_SHARED:
404             printf("MAP_SHARED ");
405             break;
406         default:
407             printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
408             break;
409         }
410         printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
411     }
412 #endif
413 
414     if (offset & ~TARGET_PAGE_MASK) {
415         errno = EINVAL;
416         goto fail;
417     }
418 
419     len = TARGET_PAGE_ALIGN(len);
420     if (len == 0)
421         goto the_end;
422     real_start = start & qemu_host_page_mask;
423     host_offset = offset & qemu_host_page_mask;
424 
425     /* If the user is asking for the kernel to find a location, do that
426        before we truncate the length for mapping files below.  */
427     if (!(flags & MAP_FIXED)) {
428         host_len = len + offset - host_offset;
429         host_len = HOST_PAGE_ALIGN(host_len);
430         start = mmap_find_vma(real_start, host_len);
431         if (start == (abi_ulong)-1) {
432             errno = ENOMEM;
433             goto fail;
434         }
435     }
436 
437     /* When mapping files into a memory area larger than the file, accesses
438        to pages beyond the file size will cause a SIGBUS.
439 
440        For example, if mmaping a file of 100 bytes on a host with 4K pages
441        emulating a target with 8K pages, the target expects to be able to
442        access the first 8K. But the host will trap us on any access beyond
443        4K.
444 
445        When emulating a target with a larger page-size than the hosts, we
446        may need to truncate file maps at EOF and add extra anonymous pages
447        up to the targets page boundary.  */
448 
449     if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
450         && !(flags & MAP_ANONYMOUS)) {
451        struct stat sb;
452 
453        if (fstat (fd, &sb) == -1)
454            goto fail;
455 
456        /* Are we trying to create a map beyond EOF?.  */
457        if (offset + len > sb.st_size) {
458            /* If so, truncate the file map at eof aligned with
459               the hosts real pagesize. Additional anonymous maps
460               will be created beyond EOF.  */
461            len = (sb.st_size - offset);
462            len += qemu_real_host_page_size - 1;
463            len &= ~(qemu_real_host_page_size - 1);
464        }
465     }
466 
467     if (!(flags & MAP_FIXED)) {
468         unsigned long host_start;
469         void *p;
470 
471         host_len = len + offset - host_offset;
472         host_len = HOST_PAGE_ALIGN(host_len);
473 
474         /* Note: we prefer to control the mapping address. It is
475            especially important if qemu_host_page_size >
476            qemu_real_host_page_size */
477         p = mmap(g2h(start), host_len, prot,
478                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
479         if (p == MAP_FAILED)
480             goto fail;
481         /* update start so that it points to the file position at 'offset' */
482         host_start = (unsigned long)p;
483         if (!(flags & MAP_ANONYMOUS)) {
484             p = mmap(g2h(start), len, prot,
485                      flags | MAP_FIXED, fd, host_offset);
486             if (p == MAP_FAILED) {
487                 munmap(g2h(start), host_len);
488                 goto fail;
489             }
490             host_start += offset - host_offset;
491         }
492         start = h2g(host_start);
493     } else {
494         if (start & ~TARGET_PAGE_MASK) {
495             errno = EINVAL;
496             goto fail;
497         }
498         end = start + len;
499         real_end = HOST_PAGE_ALIGN(end);
500 
501 	/*
502 	 * Test if requested memory area fits target address space
503 	 * It can fail only on 64-bit host with 32-bit target.
504 	 * On any other target/host host mmap() handles this error correctly.
505 	 */
506         if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
507             errno = EINVAL;
508             goto fail;
509         }
510 
511         /* worst case: we cannot map the file because the offset is not
512            aligned, so we read it */
513         if (!(flags & MAP_ANONYMOUS) &&
514             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
515             /* msync() won't work here, so we return an error if write is
516                possible while it is a shared mapping */
517             if ((flags & MAP_TYPE) == MAP_SHARED &&
518                 (prot & PROT_WRITE)) {
519                 errno = EINVAL;
520                 goto fail;
521             }
522             retaddr = target_mmap(start, len, prot | PROT_WRITE,
523                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
524                                   -1, 0);
525             if (retaddr == -1)
526                 goto fail;
527             if (pread(fd, g2h(start), len, offset) == -1)
528                 goto fail;
529             if (!(prot & PROT_WRITE)) {
530                 ret = target_mprotect(start, len, prot);
531                 if (ret != 0) {
532                     start = ret;
533                     goto the_end;
534                 }
535             }
536             goto the_end;
537         }
538 
539         /* handle the start of the mapping */
540         if (start > real_start) {
541             if (real_end == real_start + qemu_host_page_size) {
542                 /* one single host page */
543                 ret = mmap_frag(real_start, start, end,
544                                 prot, flags, fd, offset);
545                 if (ret == -1)
546                     goto fail;
547                 goto the_end1;
548             }
549             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
550                             prot, flags, fd, offset);
551             if (ret == -1)
552                 goto fail;
553             real_start += qemu_host_page_size;
554         }
555         /* handle the end of the mapping */
556         if (end < real_end) {
557             ret = mmap_frag(real_end - qemu_host_page_size,
558                             real_end - qemu_host_page_size, real_end,
559                             prot, flags, fd,
560                             offset + real_end - qemu_host_page_size - start);
561             if (ret == -1)
562                 goto fail;
563             real_end -= qemu_host_page_size;
564         }
565 
566         /* map the middle (easier) */
567         if (real_start < real_end) {
568             void *p;
569             unsigned long offset1;
570             if (flags & MAP_ANONYMOUS)
571                 offset1 = 0;
572             else
573                 offset1 = offset + real_start - start;
574             p = mmap(g2h(real_start), real_end - real_start,
575                      prot, flags, fd, offset1);
576             if (p == MAP_FAILED)
577                 goto fail;
578         }
579     }
580  the_end1:
581     page_set_flags(start, start + len, prot | PAGE_VALID);
582  the_end:
583 #ifdef DEBUG_MMAP
584     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
585     page_dump(stdout);
586     printf("\n");
587 #endif
588     tb_invalidate_phys_range(start, start + len, 0);
589     mmap_unlock();
590     return start;
591 fail:
592     mmap_unlock();
593     return -1;
594 }
595 
596 static void mmap_reserve(abi_ulong start, abi_ulong size)
597 {
598     abi_ulong real_start;
599     abi_ulong real_end;
600     abi_ulong addr;
601     abi_ulong end;
602     int prot;
603 
604     real_start = start & qemu_host_page_mask;
605     real_end = HOST_PAGE_ALIGN(start + size);
606     end = start + size;
607     if (start > real_start) {
608         /* handle host page containing start */
609         prot = 0;
610         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
611             prot |= page_get_flags(addr);
612         }
613         if (real_end == real_start + qemu_host_page_size) {
614             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
615                 prot |= page_get_flags(addr);
616             }
617             end = real_end;
618         }
619         if (prot != 0)
620             real_start += qemu_host_page_size;
621     }
622     if (end < real_end) {
623         prot = 0;
624         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
625             prot |= page_get_flags(addr);
626         }
627         if (prot != 0)
628             real_end -= qemu_host_page_size;
629     }
630     if (real_start != real_end) {
631         mmap(g2h(real_start), real_end - real_start, PROT_NONE,
632                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
633                  -1, 0);
634     }
635 }
636 
637 int target_munmap(abi_ulong start, abi_ulong len)
638 {
639     abi_ulong end, real_start, real_end, addr;
640     int prot, ret;
641 
642 #ifdef DEBUG_MMAP
643     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
644            TARGET_ABI_FMT_lx "\n",
645            start, len);
646 #endif
647     if (start & ~TARGET_PAGE_MASK)
648         return -EINVAL;
649     len = TARGET_PAGE_ALIGN(len);
650     if (len == 0)
651         return -EINVAL;
652     mmap_lock();
653     end = start + len;
654     real_start = start & qemu_host_page_mask;
655     real_end = HOST_PAGE_ALIGN(end);
656 
657     if (start > real_start) {
658         /* handle host page containing start */
659         prot = 0;
660         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
661             prot |= page_get_flags(addr);
662         }
663         if (real_end == real_start + qemu_host_page_size) {
664             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
665                 prot |= page_get_flags(addr);
666             }
667             end = real_end;
668         }
669         if (prot != 0)
670             real_start += qemu_host_page_size;
671     }
672     if (end < real_end) {
673         prot = 0;
674         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
675             prot |= page_get_flags(addr);
676         }
677         if (prot != 0)
678             real_end -= qemu_host_page_size;
679     }
680 
681     ret = 0;
682     /* unmap what we can */
683     if (real_start < real_end) {
684         if (RESERVED_VA) {
685             mmap_reserve(real_start, real_end - real_start);
686         } else {
687             ret = munmap(g2h(real_start), real_end - real_start);
688         }
689     }
690 
691     if (ret == 0) {
692         page_set_flags(start, start + len, 0);
693         tb_invalidate_phys_range(start, start + len, 0);
694     }
695     mmap_unlock();
696     return ret;
697 }
698 
699 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
700                        abi_ulong new_size, unsigned long flags,
701                        abi_ulong new_addr)
702 {
703     int prot;
704     void *host_addr;
705 
706     mmap_lock();
707 
708     if (flags & MREMAP_FIXED) {
709         host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
710                                      old_size, new_size,
711                                      flags,
712                                      g2h(new_addr));
713 
714         if (RESERVED_VA && host_addr != MAP_FAILED) {
715             /* If new and old addresses overlap then the above mremap will
716                already have failed with EINVAL.  */
717             mmap_reserve(old_addr, old_size);
718         }
719     } else if (flags & MREMAP_MAYMOVE) {
720         abi_ulong mmap_start;
721 
722         mmap_start = mmap_find_vma(0, new_size);
723 
724         if (mmap_start == -1) {
725             errno = ENOMEM;
726             host_addr = MAP_FAILED;
727         } else {
728             host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
729                                          old_size, new_size,
730                                          flags | MREMAP_FIXED,
731                                          g2h(mmap_start));
732             if ( RESERVED_VA ) {
733                 mmap_reserve(old_addr, old_size);
734             }
735         }
736     } else {
737         int prot = 0;
738         if (RESERVED_VA && old_size < new_size) {
739             abi_ulong addr;
740             for (addr = old_addr + old_size;
741                  addr < old_addr + new_size;
742                  addr++) {
743                 prot |= page_get_flags(addr);
744             }
745         }
746         if (prot == 0) {
747             host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
748             if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
749                 mmap_reserve(old_addr + old_size, new_size - old_size);
750             }
751         } else {
752             errno = ENOMEM;
753             host_addr = MAP_FAILED;
754         }
755         /* Check if address fits target address space */
756         if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
757             /* Revert mremap() changes */
758             host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
759             errno = ENOMEM;
760             host_addr = MAP_FAILED;
761         }
762     }
763 
764     if (host_addr == MAP_FAILED) {
765         new_addr = -1;
766     } else {
767         new_addr = h2g(host_addr);
768         prot = page_get_flags(old_addr);
769         page_set_flags(old_addr, old_addr + old_size, 0);
770         page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
771     }
772     tb_invalidate_phys_range(new_addr, new_addr + new_size, 0);
773     mmap_unlock();
774     return new_addr;
775 }
776 
777 int target_msync(abi_ulong start, abi_ulong len, int flags)
778 {
779     abi_ulong end;
780 
781     if (start & ~TARGET_PAGE_MASK)
782         return -EINVAL;
783     len = TARGET_PAGE_ALIGN(len);
784     end = start + len;
785     if (end < start)
786         return -EINVAL;
787     if (end == start)
788         return 0;
789 
790     start &= qemu_host_page_mask;
791     return msync(g2h(start), end - start, flags);
792 }
793