xref: /openbmc/qemu/linux-user/mmap.c (revision 200dbf37)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu.h"
22 #include "qemu-common.h"
23 
24 //#define DEBUG_MMAP
25 
26 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
27 static __thread int mmap_lock_count;
28 
29 void mmap_lock(void)
30 {
31     if (mmap_lock_count++ == 0) {
32         pthread_mutex_lock(&mmap_mutex);
33     }
34 }
35 
36 void mmap_unlock(void)
37 {
38     if (--mmap_lock_count == 0) {
39         pthread_mutex_unlock(&mmap_mutex);
40     }
41 }
42 
43 bool have_mmap_lock(void)
44 {
45     return mmap_lock_count > 0 ? true : false;
46 }
47 
48 /* Grab lock to make sure things are in a consistent state after fork().  */
49 void mmap_fork_start(void)
50 {
51     if (mmap_lock_count)
52         abort();
53     pthread_mutex_lock(&mmap_mutex);
54 }
55 
56 void mmap_fork_end(int child)
57 {
58     if (child)
59         pthread_mutex_init(&mmap_mutex, NULL);
60     else
61         pthread_mutex_unlock(&mmap_mutex);
62 }
63 
64 /* NOTE: all the constants are the HOST ones, but addresses are target. */
65 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
66 {
67     abi_ulong end, host_start, host_end, addr;
68     int prot1, ret;
69 
70 #ifdef DEBUG_MMAP
71     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
72            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
73            prot & PROT_READ ? 'r' : '-',
74            prot & PROT_WRITE ? 'w' : '-',
75            prot & PROT_EXEC ? 'x' : '-');
76 #endif
77 
78     if ((start & ~TARGET_PAGE_MASK) != 0)
79         return -TARGET_EINVAL;
80     len = TARGET_PAGE_ALIGN(len);
81     end = start + len;
82     if (!guest_range_valid(start, len)) {
83         return -TARGET_ENOMEM;
84     }
85     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
86     if (len == 0)
87         return 0;
88 
89     mmap_lock();
90     host_start = start & qemu_host_page_mask;
91     host_end = HOST_PAGE_ALIGN(end);
92     if (start > host_start) {
93         /* handle host page containing start */
94         prot1 = prot;
95         for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
96             prot1 |= page_get_flags(addr);
97         }
98         if (host_end == host_start + qemu_host_page_size) {
99             for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
100                 prot1 |= page_get_flags(addr);
101             }
102             end = host_end;
103         }
104         ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
105         if (ret != 0)
106             goto error;
107         host_start += qemu_host_page_size;
108     }
109     if (end < host_end) {
110         prot1 = prot;
111         for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
112             prot1 |= page_get_flags(addr);
113         }
114         ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
115                        prot1 & PAGE_BITS);
116         if (ret != 0)
117             goto error;
118         host_end -= qemu_host_page_size;
119     }
120 
121     /* handle the pages in the middle */
122     if (host_start < host_end) {
123         ret = mprotect(g2h(host_start), host_end - host_start, prot);
124         if (ret != 0)
125             goto error;
126     }
127     page_set_flags(start, start + len, prot | PAGE_VALID);
128     mmap_unlock();
129     return 0;
130 error:
131     mmap_unlock();
132     return ret;
133 }
134 
135 /* map an incomplete host page */
136 static int mmap_frag(abi_ulong real_start,
137                      abi_ulong start, abi_ulong end,
138                      int prot, int flags, int fd, abi_ulong offset)
139 {
140     abi_ulong real_end, addr;
141     void *host_start;
142     int prot1, prot_new;
143 
144     real_end = real_start + qemu_host_page_size;
145     host_start = g2h(real_start);
146 
147     /* get the protection of the target pages outside the mapping */
148     prot1 = 0;
149     for(addr = real_start; addr < real_end; addr++) {
150         if (addr < start || addr >= end)
151             prot1 |= page_get_flags(addr);
152     }
153 
154     if (prot1 == 0) {
155         /* no page was there, so we allocate one */
156         void *p = mmap(host_start, qemu_host_page_size, prot,
157                        flags | MAP_ANONYMOUS, -1, 0);
158         if (p == MAP_FAILED)
159             return -1;
160         prot1 = prot;
161     }
162     prot1 &= PAGE_BITS;
163 
164     prot_new = prot | prot1;
165     if (!(flags & MAP_ANONYMOUS)) {
166         /* msync() won't work here, so we return an error if write is
167            possible while it is a shared mapping */
168         if ((flags & MAP_TYPE) == MAP_SHARED &&
169             (prot & PROT_WRITE))
170             return -1;
171 
172         /* adjust protection to be able to read */
173         if (!(prot1 & PROT_WRITE))
174             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
175 
176         /* read the corresponding file data */
177         if (pread(fd, g2h(start), end - start, offset) == -1)
178             return -1;
179 
180         /* put final protection */
181         if (prot_new != (prot1 | PROT_WRITE))
182             mprotect(host_start, qemu_host_page_size, prot_new);
183     } else {
184         if (prot_new != prot1) {
185             mprotect(host_start, qemu_host_page_size, prot_new);
186         }
187         if (prot_new & PROT_WRITE) {
188             memset(g2h(start), 0, end - start);
189         }
190     }
191     return 0;
192 }
193 
194 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
195 # define TASK_UNMAPPED_BASE  (1ul << 38)
196 #else
197 # define TASK_UNMAPPED_BASE  0x40000000
198 #endif
199 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
200 
201 unsigned long last_brk;
202 
203 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
204    of guest address space.  */
205 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
206                                         abi_ulong align)
207 {
208     abi_ulong addr, end_addr, incr = qemu_host_page_size;
209     int prot;
210     bool looped = false;
211 
212     if (size > reserved_va) {
213         return (abi_ulong)-1;
214     }
215 
216     /* Note that start and size have already been aligned by mmap_find_vma. */
217 
218     end_addr = start + size;
219     if (start > reserved_va - size) {
220         /* Start at the top of the address space.  */
221         end_addr = ((reserved_va - size) & -align) + size;
222         looped = true;
223     }
224 
225     /* Search downward from END_ADDR, checking to see if a page is in use.  */
226     addr = end_addr;
227     while (1) {
228         addr -= incr;
229         if (addr > end_addr) {
230             if (looped) {
231                 /* Failure.  The entire address space has been searched.  */
232                 return (abi_ulong)-1;
233             }
234             /* Re-start at the top of the address space.  */
235             addr = end_addr = ((reserved_va - size) & -align) + size;
236             looped = true;
237         } else {
238             prot = page_get_flags(addr);
239             if (prot) {
240                 /* Page in use.  Restart below this page.  */
241                 addr = end_addr = ((addr - size) & -align) + size;
242             } else if (addr && addr + size == end_addr) {
243                 /* Success!  All pages between ADDR and END_ADDR are free.  */
244                 if (start == mmap_next_start) {
245                     mmap_next_start = addr;
246                 }
247                 return addr;
248             }
249         }
250     }
251 }
252 
253 /*
254  * Find and reserve a free memory area of size 'size'. The search
255  * starts at 'start'.
256  * It must be called with mmap_lock() held.
257  * Return -1 if error.
258  */
259 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
260 {
261     void *ptr, *prev;
262     abi_ulong addr;
263     int wrapped, repeat;
264 
265     align = MAX(align, qemu_host_page_size);
266 
267     /* If 'start' == 0, then a default start address is used. */
268     if (start == 0) {
269         start = mmap_next_start;
270     } else {
271         start &= qemu_host_page_mask;
272     }
273     start = ROUND_UP(start, align);
274 
275     size = HOST_PAGE_ALIGN(size);
276 
277     if (reserved_va) {
278         return mmap_find_vma_reserved(start, size, align);
279     }
280 
281     addr = start;
282     wrapped = repeat = 0;
283     prev = 0;
284 
285     for (;; prev = ptr) {
286         /*
287          * Reserve needed memory area to avoid a race.
288          * It should be discarded using:
289          *  - mmap() with MAP_FIXED flag
290          *  - mremap() with MREMAP_FIXED flag
291          *  - shmat() with SHM_REMAP flag
292          */
293         ptr = mmap(g2h(addr), size, PROT_NONE,
294                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
295 
296         /* ENOMEM, if host address space has no memory */
297         if (ptr == MAP_FAILED) {
298             return (abi_ulong)-1;
299         }
300 
301         /* Count the number of sequential returns of the same address.
302            This is used to modify the search algorithm below.  */
303         repeat = (ptr == prev ? repeat + 1 : 0);
304 
305         if (h2g_valid(ptr + size - 1)) {
306             addr = h2g(ptr);
307 
308             if ((addr & (align - 1)) == 0) {
309                 /* Success.  */
310                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
311                     mmap_next_start = addr + size;
312                 }
313                 return addr;
314             }
315 
316             /* The address is not properly aligned for the target.  */
317             switch (repeat) {
318             case 0:
319                 /* Assume the result that the kernel gave us is the
320                    first with enough free space, so start again at the
321                    next higher target page.  */
322                 addr = ROUND_UP(addr, align);
323                 break;
324             case 1:
325                 /* Sometimes the kernel decides to perform the allocation
326                    at the top end of memory instead.  */
327                 addr &= -align;
328                 break;
329             case 2:
330                 /* Start over at low memory.  */
331                 addr = 0;
332                 break;
333             default:
334                 /* Fail.  This unaligned block must the last.  */
335                 addr = -1;
336                 break;
337             }
338         } else {
339             /* Since the result the kernel gave didn't fit, start
340                again at low memory.  If any repetition, fail.  */
341             addr = (repeat ? -1 : 0);
342         }
343 
344         /* Unmap and try again.  */
345         munmap(ptr, size);
346 
347         /* ENOMEM if we checked the whole of the target address space.  */
348         if (addr == (abi_ulong)-1) {
349             return (abi_ulong)-1;
350         } else if (addr == 0) {
351             if (wrapped) {
352                 return (abi_ulong)-1;
353             }
354             wrapped = 1;
355             /* Don't actually use 0 when wrapping, instead indicate
356                that we'd truly like an allocation in low memory.  */
357             addr = (mmap_min_addr > TARGET_PAGE_SIZE
358                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
359                      : TARGET_PAGE_SIZE);
360         } else if (wrapped && addr >= start) {
361             return (abi_ulong)-1;
362         }
363     }
364 }
365 
366 /* NOTE: all the constants are the HOST ones */
367 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
368                      int flags, int fd, abi_ulong offset)
369 {
370     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
371 
372     mmap_lock();
373 #ifdef DEBUG_MMAP
374     {
375         printf("mmap: start=0x" TARGET_ABI_FMT_lx
376                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
377                start, len,
378                prot & PROT_READ ? 'r' : '-',
379                prot & PROT_WRITE ? 'w' : '-',
380                prot & PROT_EXEC ? 'x' : '-');
381         if (flags & MAP_FIXED)
382             printf("MAP_FIXED ");
383         if (flags & MAP_ANONYMOUS)
384             printf("MAP_ANON ");
385         switch(flags & MAP_TYPE) {
386         case MAP_PRIVATE:
387             printf("MAP_PRIVATE ");
388             break;
389         case MAP_SHARED:
390             printf("MAP_SHARED ");
391             break;
392         default:
393             printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
394             break;
395         }
396         printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
397     }
398 #endif
399 
400     if (!len) {
401         errno = EINVAL;
402         goto fail;
403     }
404 
405     /* Also check for overflows... */
406     len = TARGET_PAGE_ALIGN(len);
407     if (!len) {
408         errno = ENOMEM;
409         goto fail;
410     }
411 
412     if (offset & ~TARGET_PAGE_MASK) {
413         errno = EINVAL;
414         goto fail;
415     }
416 
417     real_start = start & qemu_host_page_mask;
418     host_offset = offset & qemu_host_page_mask;
419 
420     /* If the user is asking for the kernel to find a location, do that
421        before we truncate the length for mapping files below.  */
422     if (!(flags & MAP_FIXED)) {
423         host_len = len + offset - host_offset;
424         host_len = HOST_PAGE_ALIGN(host_len);
425         start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
426         if (start == (abi_ulong)-1) {
427             errno = ENOMEM;
428             goto fail;
429         }
430     }
431 
432     /* When mapping files into a memory area larger than the file, accesses
433        to pages beyond the file size will cause a SIGBUS.
434 
435        For example, if mmaping a file of 100 bytes on a host with 4K pages
436        emulating a target with 8K pages, the target expects to be able to
437        access the first 8K. But the host will trap us on any access beyond
438        4K.
439 
440        When emulating a target with a larger page-size than the hosts, we
441        may need to truncate file maps at EOF and add extra anonymous pages
442        up to the targets page boundary.  */
443 
444     if ((qemu_real_host_page_size < qemu_host_page_size) &&
445         !(flags & MAP_ANONYMOUS)) {
446         struct stat sb;
447 
448        if (fstat (fd, &sb) == -1)
449            goto fail;
450 
451        /* Are we trying to create a map beyond EOF?.  */
452        if (offset + len > sb.st_size) {
453            /* If so, truncate the file map at eof aligned with
454               the hosts real pagesize. Additional anonymous maps
455               will be created beyond EOF.  */
456            len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
457        }
458     }
459 
460     if (!(flags & MAP_FIXED)) {
461         unsigned long host_start;
462         void *p;
463 
464         host_len = len + offset - host_offset;
465         host_len = HOST_PAGE_ALIGN(host_len);
466 
467         /* Note: we prefer to control the mapping address. It is
468            especially important if qemu_host_page_size >
469            qemu_real_host_page_size */
470         p = mmap(g2h(start), host_len, prot,
471                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
472         if (p == MAP_FAILED)
473             goto fail;
474         /* update start so that it points to the file position at 'offset' */
475         host_start = (unsigned long)p;
476         if (!(flags & MAP_ANONYMOUS)) {
477             p = mmap(g2h(start), len, prot,
478                      flags | MAP_FIXED, fd, host_offset);
479             if (p == MAP_FAILED) {
480                 munmap(g2h(start), host_len);
481                 goto fail;
482             }
483             host_start += offset - host_offset;
484         }
485         start = h2g(host_start);
486     } else {
487         if (start & ~TARGET_PAGE_MASK) {
488             errno = EINVAL;
489             goto fail;
490         }
491         end = start + len;
492         real_end = HOST_PAGE_ALIGN(end);
493 
494         /*
495          * Test if requested memory area fits target address space
496          * It can fail only on 64-bit host with 32-bit target.
497          * On any other target/host host mmap() handles this error correctly.
498          */
499         if (!guest_range_valid(start, len)) {
500             errno = ENOMEM;
501             goto fail;
502         }
503 
504         /* worst case: we cannot map the file because the offset is not
505            aligned, so we read it */
506         if (!(flags & MAP_ANONYMOUS) &&
507             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
508             /* msync() won't work here, so we return an error if write is
509                possible while it is a shared mapping */
510             if ((flags & MAP_TYPE) == MAP_SHARED &&
511                 (prot & PROT_WRITE)) {
512                 errno = EINVAL;
513                 goto fail;
514             }
515             retaddr = target_mmap(start, len, prot | PROT_WRITE,
516                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
517                                   -1, 0);
518             if (retaddr == -1)
519                 goto fail;
520             if (pread(fd, g2h(start), len, offset) == -1)
521                 goto fail;
522             if (!(prot & PROT_WRITE)) {
523                 ret = target_mprotect(start, len, prot);
524                 assert(ret == 0);
525             }
526             goto the_end;
527         }
528 
529         /* handle the start of the mapping */
530         if (start > real_start) {
531             if (real_end == real_start + qemu_host_page_size) {
532                 /* one single host page */
533                 ret = mmap_frag(real_start, start, end,
534                                 prot, flags, fd, offset);
535                 if (ret == -1)
536                     goto fail;
537                 goto the_end1;
538             }
539             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
540                             prot, flags, fd, offset);
541             if (ret == -1)
542                 goto fail;
543             real_start += qemu_host_page_size;
544         }
545         /* handle the end of the mapping */
546         if (end < real_end) {
547             ret = mmap_frag(real_end - qemu_host_page_size,
548                             real_end - qemu_host_page_size, end,
549                             prot, flags, fd,
550                             offset + real_end - qemu_host_page_size - start);
551             if (ret == -1)
552                 goto fail;
553             real_end -= qemu_host_page_size;
554         }
555 
556         /* map the middle (easier) */
557         if (real_start < real_end) {
558             void *p;
559             unsigned long offset1;
560             if (flags & MAP_ANONYMOUS)
561                 offset1 = 0;
562             else
563                 offset1 = offset + real_start - start;
564             p = mmap(g2h(real_start), real_end - real_start,
565                      prot, flags, fd, offset1);
566             if (p == MAP_FAILED)
567                 goto fail;
568         }
569     }
570  the_end1:
571     page_set_flags(start, start + len, prot | PAGE_VALID);
572  the_end:
573 #ifdef DEBUG_MMAP
574     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
575     page_dump(stdout);
576     printf("\n");
577 #endif
578     tb_invalidate_phys_range(start, start + len);
579     mmap_unlock();
580     return start;
581 fail:
582     mmap_unlock();
583     return -1;
584 }
585 
586 static void mmap_reserve(abi_ulong start, abi_ulong size)
587 {
588     abi_ulong real_start;
589     abi_ulong real_end;
590     abi_ulong addr;
591     abi_ulong end;
592     int prot;
593 
594     real_start = start & qemu_host_page_mask;
595     real_end = HOST_PAGE_ALIGN(start + size);
596     end = start + size;
597     if (start > real_start) {
598         /* handle host page containing start */
599         prot = 0;
600         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
601             prot |= page_get_flags(addr);
602         }
603         if (real_end == real_start + qemu_host_page_size) {
604             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
605                 prot |= page_get_flags(addr);
606             }
607             end = real_end;
608         }
609         if (prot != 0)
610             real_start += qemu_host_page_size;
611     }
612     if (end < real_end) {
613         prot = 0;
614         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
615             prot |= page_get_flags(addr);
616         }
617         if (prot != 0)
618             real_end -= qemu_host_page_size;
619     }
620     if (real_start != real_end) {
621         mmap(g2h(real_start), real_end - real_start, PROT_NONE,
622                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
623                  -1, 0);
624     }
625 }
626 
627 int target_munmap(abi_ulong start, abi_ulong len)
628 {
629     abi_ulong end, real_start, real_end, addr;
630     int prot, ret;
631 
632 #ifdef DEBUG_MMAP
633     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
634            TARGET_ABI_FMT_lx "\n",
635            start, len);
636 #endif
637     if (start & ~TARGET_PAGE_MASK)
638         return -TARGET_EINVAL;
639     len = TARGET_PAGE_ALIGN(len);
640     if (len == 0 || !guest_range_valid(start, len)) {
641         return -TARGET_EINVAL;
642     }
643 
644     mmap_lock();
645     end = start + len;
646     real_start = start & qemu_host_page_mask;
647     real_end = HOST_PAGE_ALIGN(end);
648 
649     if (start > real_start) {
650         /* handle host page containing start */
651         prot = 0;
652         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
653             prot |= page_get_flags(addr);
654         }
655         if (real_end == real_start + qemu_host_page_size) {
656             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
657                 prot |= page_get_flags(addr);
658             }
659             end = real_end;
660         }
661         if (prot != 0)
662             real_start += qemu_host_page_size;
663     }
664     if (end < real_end) {
665         prot = 0;
666         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
667             prot |= page_get_flags(addr);
668         }
669         if (prot != 0)
670             real_end -= qemu_host_page_size;
671     }
672 
673     ret = 0;
674     /* unmap what we can */
675     if (real_start < real_end) {
676         if (reserved_va) {
677             mmap_reserve(real_start, real_end - real_start);
678         } else {
679             ret = munmap(g2h(real_start), real_end - real_start);
680         }
681     }
682 
683     if (ret == 0) {
684         page_set_flags(start, start + len, 0);
685         tb_invalidate_phys_range(start, start + len);
686     }
687     mmap_unlock();
688     return ret;
689 }
690 
691 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
692                        abi_ulong new_size, unsigned long flags,
693                        abi_ulong new_addr)
694 {
695     int prot;
696     void *host_addr;
697 
698     if (!guest_range_valid(old_addr, old_size) ||
699         ((flags & MREMAP_FIXED) &&
700          !guest_range_valid(new_addr, new_size))) {
701         errno = ENOMEM;
702         return -1;
703     }
704 
705     mmap_lock();
706 
707     if (flags & MREMAP_FIXED) {
708         host_addr = mremap(g2h(old_addr), old_size, new_size,
709                            flags, g2h(new_addr));
710 
711         if (reserved_va && host_addr != MAP_FAILED) {
712             /* If new and old addresses overlap then the above mremap will
713                already have failed with EINVAL.  */
714             mmap_reserve(old_addr, old_size);
715         }
716     } else if (flags & MREMAP_MAYMOVE) {
717         abi_ulong mmap_start;
718 
719         mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
720 
721         if (mmap_start == -1) {
722             errno = ENOMEM;
723             host_addr = MAP_FAILED;
724         } else {
725             host_addr = mremap(g2h(old_addr), old_size, new_size,
726                                flags | MREMAP_FIXED, g2h(mmap_start));
727             if (reserved_va) {
728                 mmap_reserve(old_addr, old_size);
729             }
730         }
731     } else {
732         int prot = 0;
733         if (reserved_va && old_size < new_size) {
734             abi_ulong addr;
735             for (addr = old_addr + old_size;
736                  addr < old_addr + new_size;
737                  addr++) {
738                 prot |= page_get_flags(addr);
739             }
740         }
741         if (prot == 0) {
742             host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
743             if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
744                 mmap_reserve(old_addr + old_size, new_size - old_size);
745             }
746         } else {
747             errno = ENOMEM;
748             host_addr = MAP_FAILED;
749         }
750         /* Check if address fits target address space */
751         if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
752             /* Revert mremap() changes */
753             host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
754             errno = ENOMEM;
755             host_addr = MAP_FAILED;
756         }
757     }
758 
759     if (host_addr == MAP_FAILED) {
760         new_addr = -1;
761     } else {
762         new_addr = h2g(host_addr);
763         prot = page_get_flags(old_addr);
764         page_set_flags(old_addr, old_addr + old_size, 0);
765         page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
766     }
767     tb_invalidate_phys_range(new_addr, new_addr + new_size);
768     mmap_unlock();
769     return new_addr;
770 }
771