xref: /openbmc/qemu/linux-user/mmap.c (revision 40f23e4e)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "trace.h"
21 #include "exec/log.h"
22 #include "qemu.h"
23 
24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25 static __thread int mmap_lock_count;
26 
27 void mmap_lock(void)
28 {
29     if (mmap_lock_count++ == 0) {
30         pthread_mutex_lock(&mmap_mutex);
31     }
32 }
33 
34 void mmap_unlock(void)
35 {
36     if (--mmap_lock_count == 0) {
37         pthread_mutex_unlock(&mmap_mutex);
38     }
39 }
40 
41 bool have_mmap_lock(void)
42 {
43     return mmap_lock_count > 0 ? true : false;
44 }
45 
46 /* Grab lock to make sure things are in a consistent state after fork().  */
47 void mmap_fork_start(void)
48 {
49     if (mmap_lock_count)
50         abort();
51     pthread_mutex_lock(&mmap_mutex);
52 }
53 
54 void mmap_fork_end(int child)
55 {
56     if (child)
57         pthread_mutex_init(&mmap_mutex, NULL);
58     else
59         pthread_mutex_unlock(&mmap_mutex);
60 }
61 
62 /*
63  * Validate target prot bitmask.
64  * Return the prot bitmask for the host in *HOST_PROT.
65  * Return 0 if the target prot bitmask is invalid, otherwise
66  * the internal qemu page_flags (which will include PAGE_VALID).
67  */
68 static int validate_prot_to_pageflags(int *host_prot, int prot)
69 {
70     int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
71     int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
72 
73     /*
74      * For the host, we need not pass anything except read/write/exec.
75      * While PROT_SEM is allowed by all hosts, it is also ignored, so
76      * don't bother transforming guest bit to host bit.  Any other
77      * target-specific prot bits will not be understood by the host
78      * and will need to be encoded into page_flags for qemu emulation.
79      *
80      * Pages that are executable by the guest will never be executed
81      * by the host, but the host will need to be able to read them.
82      */
83     *host_prot = (prot & (PROT_READ | PROT_WRITE))
84                | (prot & PROT_EXEC ? PROT_READ : 0);
85 
86 #ifdef TARGET_AARCH64
87     {
88         ARMCPU *cpu = ARM_CPU(thread_cpu);
89 
90         /*
91          * The PROT_BTI bit is only accepted if the cpu supports the feature.
92          * Since this is the unusual case, don't bother checking unless
93          * the bit has been requested.  If set and valid, record the bit
94          * within QEMU's page_flags.
95          */
96         if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
97             valid |= TARGET_PROT_BTI;
98             page_flags |= PAGE_BTI;
99         }
100         /* Similarly for the PROT_MTE bit. */
101         if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
102             valid |= TARGET_PROT_MTE;
103             page_flags |= PAGE_MTE;
104         }
105     }
106 #endif
107 
108     return prot & ~valid ? 0 : page_flags;
109 }
110 
111 /* NOTE: all the constants are the HOST ones, but addresses are target. */
112 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
113 {
114     abi_ulong end, host_start, host_end, addr;
115     int prot1, ret, page_flags, host_prot;
116 
117     trace_target_mprotect(start, len, target_prot);
118 
119     if ((start & ~TARGET_PAGE_MASK) != 0) {
120         return -TARGET_EINVAL;
121     }
122     page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
123     if (!page_flags) {
124         return -TARGET_EINVAL;
125     }
126     len = TARGET_PAGE_ALIGN(len);
127     end = start + len;
128     if (!guest_range_valid_untagged(start, len)) {
129         return -TARGET_ENOMEM;
130     }
131     if (len == 0) {
132         return 0;
133     }
134 
135     mmap_lock();
136     host_start = start & qemu_host_page_mask;
137     host_end = HOST_PAGE_ALIGN(end);
138     if (start > host_start) {
139         /* handle host page containing start */
140         prot1 = host_prot;
141         for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
142             prot1 |= page_get_flags(addr);
143         }
144         if (host_end == host_start + qemu_host_page_size) {
145             for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
146                 prot1 |= page_get_flags(addr);
147             }
148             end = host_end;
149         }
150         ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
151                        prot1 & PAGE_BITS);
152         if (ret != 0) {
153             goto error;
154         }
155         host_start += qemu_host_page_size;
156     }
157     if (end < host_end) {
158         prot1 = host_prot;
159         for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
160             prot1 |= page_get_flags(addr);
161         }
162         ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
163                        qemu_host_page_size, prot1 & PAGE_BITS);
164         if (ret != 0) {
165             goto error;
166         }
167         host_end -= qemu_host_page_size;
168     }
169 
170     /* handle the pages in the middle */
171     if (host_start < host_end) {
172         ret = mprotect(g2h_untagged(host_start),
173                        host_end - host_start, host_prot);
174         if (ret != 0) {
175             goto error;
176         }
177     }
178     page_set_flags(start, start + len, page_flags);
179     mmap_unlock();
180     return 0;
181 error:
182     mmap_unlock();
183     return ret;
184 }
185 
186 /* map an incomplete host page */
187 static int mmap_frag(abi_ulong real_start,
188                      abi_ulong start, abi_ulong end,
189                      int prot, int flags, int fd, abi_ulong offset)
190 {
191     abi_ulong real_end, addr;
192     void *host_start;
193     int prot1, prot_new;
194 
195     real_end = real_start + qemu_host_page_size;
196     host_start = g2h_untagged(real_start);
197 
198     /* get the protection of the target pages outside the mapping */
199     prot1 = 0;
200     for(addr = real_start; addr < real_end; addr++) {
201         if (addr < start || addr >= end)
202             prot1 |= page_get_flags(addr);
203     }
204 
205     if (prot1 == 0) {
206         /* no page was there, so we allocate one */
207         void *p = mmap(host_start, qemu_host_page_size, prot,
208                        flags | MAP_ANONYMOUS, -1, 0);
209         if (p == MAP_FAILED)
210             return -1;
211         prot1 = prot;
212     }
213     prot1 &= PAGE_BITS;
214 
215     prot_new = prot | prot1;
216     if (!(flags & MAP_ANONYMOUS)) {
217         /* msync() won't work here, so we return an error if write is
218            possible while it is a shared mapping */
219         if ((flags & MAP_TYPE) == MAP_SHARED &&
220             (prot & PROT_WRITE))
221             return -1;
222 
223         /* adjust protection to be able to read */
224         if (!(prot1 & PROT_WRITE))
225             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
226 
227         /* read the corresponding file data */
228         if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
229             return -1;
230 
231         /* put final protection */
232         if (prot_new != (prot1 | PROT_WRITE))
233             mprotect(host_start, qemu_host_page_size, prot_new);
234     } else {
235         if (prot_new != prot1) {
236             mprotect(host_start, qemu_host_page_size, prot_new);
237         }
238         if (prot_new & PROT_WRITE) {
239             memset(g2h_untagged(start), 0, end - start);
240         }
241     }
242     return 0;
243 }
244 
245 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
246 #ifdef TARGET_AARCH64
247 # define TASK_UNMAPPED_BASE  0x5500000000
248 #else
249 # define TASK_UNMAPPED_BASE  (1ul << 38)
250 #endif
251 #else
252 # define TASK_UNMAPPED_BASE  0x40000000
253 #endif
254 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
255 
256 unsigned long last_brk;
257 
258 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
259    of guest address space.  */
260 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
261                                         abi_ulong align)
262 {
263     abi_ulong addr, end_addr, incr = qemu_host_page_size;
264     int prot;
265     bool looped = false;
266 
267     if (size > reserved_va) {
268         return (abi_ulong)-1;
269     }
270 
271     /* Note that start and size have already been aligned by mmap_find_vma. */
272 
273     end_addr = start + size;
274     if (start > reserved_va - size) {
275         /* Start at the top of the address space.  */
276         end_addr = ((reserved_va - size) & -align) + size;
277         looped = true;
278     }
279 
280     /* Search downward from END_ADDR, checking to see if a page is in use.  */
281     addr = end_addr;
282     while (1) {
283         addr -= incr;
284         if (addr > end_addr) {
285             if (looped) {
286                 /* Failure.  The entire address space has been searched.  */
287                 return (abi_ulong)-1;
288             }
289             /* Re-start at the top of the address space.  */
290             addr = end_addr = ((reserved_va - size) & -align) + size;
291             looped = true;
292         } else {
293             prot = page_get_flags(addr);
294             if (prot) {
295                 /* Page in use.  Restart below this page.  */
296                 addr = end_addr = ((addr - size) & -align) + size;
297             } else if (addr && addr + size == end_addr) {
298                 /* Success!  All pages between ADDR and END_ADDR are free.  */
299                 if (start == mmap_next_start) {
300                     mmap_next_start = addr;
301                 }
302                 return addr;
303             }
304         }
305     }
306 }
307 
308 /*
309  * Find and reserve a free memory area of size 'size'. The search
310  * starts at 'start'.
311  * It must be called with mmap_lock() held.
312  * Return -1 if error.
313  */
314 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
315 {
316     void *ptr, *prev;
317     abi_ulong addr;
318     int wrapped, repeat;
319 
320     align = MAX(align, qemu_host_page_size);
321 
322     /* If 'start' == 0, then a default start address is used. */
323     if (start == 0) {
324         start = mmap_next_start;
325     } else {
326         start &= qemu_host_page_mask;
327     }
328     start = ROUND_UP(start, align);
329 
330     size = HOST_PAGE_ALIGN(size);
331 
332     if (reserved_va) {
333         return mmap_find_vma_reserved(start, size, align);
334     }
335 
336     addr = start;
337     wrapped = repeat = 0;
338     prev = 0;
339 
340     for (;; prev = ptr) {
341         /*
342          * Reserve needed memory area to avoid a race.
343          * It should be discarded using:
344          *  - mmap() with MAP_FIXED flag
345          *  - mremap() with MREMAP_FIXED flag
346          *  - shmat() with SHM_REMAP flag
347          */
348         ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
349                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
350 
351         /* ENOMEM, if host address space has no memory */
352         if (ptr == MAP_FAILED) {
353             return (abi_ulong)-1;
354         }
355 
356         /* Count the number of sequential returns of the same address.
357            This is used to modify the search algorithm below.  */
358         repeat = (ptr == prev ? repeat + 1 : 0);
359 
360         if (h2g_valid(ptr + size - 1)) {
361             addr = h2g(ptr);
362 
363             if ((addr & (align - 1)) == 0) {
364                 /* Success.  */
365                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
366                     mmap_next_start = addr + size;
367                 }
368                 return addr;
369             }
370 
371             /* The address is not properly aligned for the target.  */
372             switch (repeat) {
373             case 0:
374                 /* Assume the result that the kernel gave us is the
375                    first with enough free space, so start again at the
376                    next higher target page.  */
377                 addr = ROUND_UP(addr, align);
378                 break;
379             case 1:
380                 /* Sometimes the kernel decides to perform the allocation
381                    at the top end of memory instead.  */
382                 addr &= -align;
383                 break;
384             case 2:
385                 /* Start over at low memory.  */
386                 addr = 0;
387                 break;
388             default:
389                 /* Fail.  This unaligned block must the last.  */
390                 addr = -1;
391                 break;
392             }
393         } else {
394             /* Since the result the kernel gave didn't fit, start
395                again at low memory.  If any repetition, fail.  */
396             addr = (repeat ? -1 : 0);
397         }
398 
399         /* Unmap and try again.  */
400         munmap(ptr, size);
401 
402         /* ENOMEM if we checked the whole of the target address space.  */
403         if (addr == (abi_ulong)-1) {
404             return (abi_ulong)-1;
405         } else if (addr == 0) {
406             if (wrapped) {
407                 return (abi_ulong)-1;
408             }
409             wrapped = 1;
410             /* Don't actually use 0 when wrapping, instead indicate
411                that we'd truly like an allocation in low memory.  */
412             addr = (mmap_min_addr > TARGET_PAGE_SIZE
413                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
414                      : TARGET_PAGE_SIZE);
415         } else if (wrapped && addr >= start) {
416             return (abi_ulong)-1;
417         }
418     }
419 }
420 
421 /* NOTE: all the constants are the HOST ones */
422 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
423                      int flags, int fd, abi_ulong offset)
424 {
425     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
426     int page_flags, host_prot;
427 
428     mmap_lock();
429     trace_target_mmap(start, len, target_prot, flags, fd, offset);
430 
431     if (!len) {
432         errno = EINVAL;
433         goto fail;
434     }
435 
436     page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
437     if (!page_flags) {
438         errno = EINVAL;
439         goto fail;
440     }
441 
442     /* Also check for overflows... */
443     len = TARGET_PAGE_ALIGN(len);
444     if (!len) {
445         errno = ENOMEM;
446         goto fail;
447     }
448 
449     if (offset & ~TARGET_PAGE_MASK) {
450         errno = EINVAL;
451         goto fail;
452     }
453 
454     /*
455      * If we're mapping shared memory, ensure we generate code for parallel
456      * execution and flush old translations.  This will work up to the level
457      * supported by the host -- anything that requires EXCP_ATOMIC will not
458      * be atomic with respect to an external process.
459      */
460     if (flags & MAP_SHARED) {
461         CPUState *cpu = thread_cpu;
462         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
463             cpu->tcg_cflags |= CF_PARALLEL;
464             tb_flush(cpu);
465         }
466     }
467 
468     real_start = start & qemu_host_page_mask;
469     host_offset = offset & qemu_host_page_mask;
470 
471     /* If the user is asking for the kernel to find a location, do that
472        before we truncate the length for mapping files below.  */
473     if (!(flags & MAP_FIXED)) {
474         host_len = len + offset - host_offset;
475         host_len = HOST_PAGE_ALIGN(host_len);
476         start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
477         if (start == (abi_ulong)-1) {
478             errno = ENOMEM;
479             goto fail;
480         }
481     }
482 
483     /* When mapping files into a memory area larger than the file, accesses
484        to pages beyond the file size will cause a SIGBUS.
485 
486        For example, if mmaping a file of 100 bytes on a host with 4K pages
487        emulating a target with 8K pages, the target expects to be able to
488        access the first 8K. But the host will trap us on any access beyond
489        4K.
490 
491        When emulating a target with a larger page-size than the hosts, we
492        may need to truncate file maps at EOF and add extra anonymous pages
493        up to the targets page boundary.  */
494 
495     if ((qemu_real_host_page_size < qemu_host_page_size) &&
496         !(flags & MAP_ANONYMOUS)) {
497         struct stat sb;
498 
499        if (fstat (fd, &sb) == -1)
500            goto fail;
501 
502        /* Are we trying to create a map beyond EOF?.  */
503        if (offset + len > sb.st_size) {
504            /* If so, truncate the file map at eof aligned with
505               the hosts real pagesize. Additional anonymous maps
506               will be created beyond EOF.  */
507            len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
508        }
509     }
510 
511     if (!(flags & MAP_FIXED)) {
512         unsigned long host_start;
513         void *p;
514 
515         host_len = len + offset - host_offset;
516         host_len = HOST_PAGE_ALIGN(host_len);
517 
518         /* Note: we prefer to control the mapping address. It is
519            especially important if qemu_host_page_size >
520            qemu_real_host_page_size */
521         p = mmap(g2h_untagged(start), host_len, host_prot,
522                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
523         if (p == MAP_FAILED) {
524             goto fail;
525         }
526         /* update start so that it points to the file position at 'offset' */
527         host_start = (unsigned long)p;
528         if (!(flags & MAP_ANONYMOUS)) {
529             p = mmap(g2h_untagged(start), len, host_prot,
530                      flags | MAP_FIXED, fd, host_offset);
531             if (p == MAP_FAILED) {
532                 munmap(g2h_untagged(start), host_len);
533                 goto fail;
534             }
535             host_start += offset - host_offset;
536         }
537         start = h2g(host_start);
538     } else {
539         if (start & ~TARGET_PAGE_MASK) {
540             errno = EINVAL;
541             goto fail;
542         }
543         end = start + len;
544         real_end = HOST_PAGE_ALIGN(end);
545 
546         /*
547          * Test if requested memory area fits target address space
548          * It can fail only on 64-bit host with 32-bit target.
549          * On any other target/host host mmap() handles this error correctly.
550          */
551         if (end < start || !guest_range_valid_untagged(start, len)) {
552             errno = ENOMEM;
553             goto fail;
554         }
555 
556         /* worst case: we cannot map the file because the offset is not
557            aligned, so we read it */
558         if (!(flags & MAP_ANONYMOUS) &&
559             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
560             /* msync() won't work here, so we return an error if write is
561                possible while it is a shared mapping */
562             if ((flags & MAP_TYPE) == MAP_SHARED &&
563                 (host_prot & PROT_WRITE)) {
564                 errno = EINVAL;
565                 goto fail;
566             }
567             retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
568                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
569                                   -1, 0);
570             if (retaddr == -1)
571                 goto fail;
572             if (pread(fd, g2h_untagged(start), len, offset) == -1)
573                 goto fail;
574             if (!(host_prot & PROT_WRITE)) {
575                 ret = target_mprotect(start, len, target_prot);
576                 assert(ret == 0);
577             }
578             goto the_end;
579         }
580 
581         /* handle the start of the mapping */
582         if (start > real_start) {
583             if (real_end == real_start + qemu_host_page_size) {
584                 /* one single host page */
585                 ret = mmap_frag(real_start, start, end,
586                                 host_prot, flags, fd, offset);
587                 if (ret == -1)
588                     goto fail;
589                 goto the_end1;
590             }
591             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
592                             host_prot, flags, fd, offset);
593             if (ret == -1)
594                 goto fail;
595             real_start += qemu_host_page_size;
596         }
597         /* handle the end of the mapping */
598         if (end < real_end) {
599             ret = mmap_frag(real_end - qemu_host_page_size,
600                             real_end - qemu_host_page_size, end,
601                             host_prot, flags, fd,
602                             offset + real_end - qemu_host_page_size - start);
603             if (ret == -1)
604                 goto fail;
605             real_end -= qemu_host_page_size;
606         }
607 
608         /* map the middle (easier) */
609         if (real_start < real_end) {
610             void *p;
611             unsigned long offset1;
612             if (flags & MAP_ANONYMOUS)
613                 offset1 = 0;
614             else
615                 offset1 = offset + real_start - start;
616             p = mmap(g2h_untagged(real_start), real_end - real_start,
617                      host_prot, flags, fd, offset1);
618             if (p == MAP_FAILED)
619                 goto fail;
620         }
621     }
622  the_end1:
623     if (flags & MAP_ANONYMOUS) {
624         page_flags |= PAGE_ANON;
625     }
626     page_flags |= PAGE_RESET;
627     page_set_flags(start, start + len, page_flags);
628  the_end:
629     trace_target_mmap_complete(start);
630     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
631         log_page_dump(__func__);
632     }
633     tb_invalidate_phys_range(start, start + len);
634     mmap_unlock();
635     return start;
636 fail:
637     mmap_unlock();
638     return -1;
639 }
640 
641 static void mmap_reserve(abi_ulong start, abi_ulong size)
642 {
643     abi_ulong real_start;
644     abi_ulong real_end;
645     abi_ulong addr;
646     abi_ulong end;
647     int prot;
648 
649     real_start = start & qemu_host_page_mask;
650     real_end = HOST_PAGE_ALIGN(start + size);
651     end = start + size;
652     if (start > real_start) {
653         /* handle host page containing start */
654         prot = 0;
655         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
656             prot |= page_get_flags(addr);
657         }
658         if (real_end == real_start + qemu_host_page_size) {
659             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
660                 prot |= page_get_flags(addr);
661             }
662             end = real_end;
663         }
664         if (prot != 0)
665             real_start += qemu_host_page_size;
666     }
667     if (end < real_end) {
668         prot = 0;
669         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
670             prot |= page_get_flags(addr);
671         }
672         if (prot != 0)
673             real_end -= qemu_host_page_size;
674     }
675     if (real_start != real_end) {
676         mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
677                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
678                  -1, 0);
679     }
680 }
681 
682 int target_munmap(abi_ulong start, abi_ulong len)
683 {
684     abi_ulong end, real_start, real_end, addr;
685     int prot, ret;
686 
687     trace_target_munmap(start, len);
688 
689     if (start & ~TARGET_PAGE_MASK)
690         return -TARGET_EINVAL;
691     len = TARGET_PAGE_ALIGN(len);
692     if (len == 0 || !guest_range_valid_untagged(start, len)) {
693         return -TARGET_EINVAL;
694     }
695 
696     mmap_lock();
697     end = start + len;
698     real_start = start & qemu_host_page_mask;
699     real_end = HOST_PAGE_ALIGN(end);
700 
701     if (start > real_start) {
702         /* handle host page containing start */
703         prot = 0;
704         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
705             prot |= page_get_flags(addr);
706         }
707         if (real_end == real_start + qemu_host_page_size) {
708             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
709                 prot |= page_get_flags(addr);
710             }
711             end = real_end;
712         }
713         if (prot != 0)
714             real_start += qemu_host_page_size;
715     }
716     if (end < real_end) {
717         prot = 0;
718         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
719             prot |= page_get_flags(addr);
720         }
721         if (prot != 0)
722             real_end -= qemu_host_page_size;
723     }
724 
725     ret = 0;
726     /* unmap what we can */
727     if (real_start < real_end) {
728         if (reserved_va) {
729             mmap_reserve(real_start, real_end - real_start);
730         } else {
731             ret = munmap(g2h_untagged(real_start), real_end - real_start);
732         }
733     }
734 
735     if (ret == 0) {
736         page_set_flags(start, start + len, 0);
737         tb_invalidate_phys_range(start, start + len);
738     }
739     mmap_unlock();
740     return ret;
741 }
742 
743 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
744                        abi_ulong new_size, unsigned long flags,
745                        abi_ulong new_addr)
746 {
747     int prot;
748     void *host_addr;
749 
750     if (!guest_range_valid_untagged(old_addr, old_size) ||
751         ((flags & MREMAP_FIXED) &&
752          !guest_range_valid_untagged(new_addr, new_size)) ||
753         ((flags & MREMAP_MAYMOVE) == 0 &&
754          !guest_range_valid_untagged(old_addr, new_size))) {
755         errno = ENOMEM;
756         return -1;
757     }
758 
759     mmap_lock();
760 
761     if (flags & MREMAP_FIXED) {
762         host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
763                            flags, g2h_untagged(new_addr));
764 
765         if (reserved_va && host_addr != MAP_FAILED) {
766             /* If new and old addresses overlap then the above mremap will
767                already have failed with EINVAL.  */
768             mmap_reserve(old_addr, old_size);
769         }
770     } else if (flags & MREMAP_MAYMOVE) {
771         abi_ulong mmap_start;
772 
773         mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
774 
775         if (mmap_start == -1) {
776             errno = ENOMEM;
777             host_addr = MAP_FAILED;
778         } else {
779             host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
780                                flags | MREMAP_FIXED,
781                                g2h_untagged(mmap_start));
782             if (reserved_va) {
783                 mmap_reserve(old_addr, old_size);
784             }
785         }
786     } else {
787         int prot = 0;
788         if (reserved_va && old_size < new_size) {
789             abi_ulong addr;
790             for (addr = old_addr + old_size;
791                  addr < old_addr + new_size;
792                  addr++) {
793                 prot |= page_get_flags(addr);
794             }
795         }
796         if (prot == 0) {
797             host_addr = mremap(g2h_untagged(old_addr),
798                                old_size, new_size, flags);
799 
800             if (host_addr != MAP_FAILED) {
801                 /* Check if address fits target address space */
802                 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
803                     /* Revert mremap() changes */
804                     host_addr = mremap(g2h_untagged(old_addr),
805                                        new_size, old_size, flags);
806                     errno = ENOMEM;
807                     host_addr = MAP_FAILED;
808                 } else if (reserved_va && old_size > new_size) {
809                     mmap_reserve(old_addr + old_size, old_size - new_size);
810                 }
811             }
812         } else {
813             errno = ENOMEM;
814             host_addr = MAP_FAILED;
815         }
816     }
817 
818     if (host_addr == MAP_FAILED) {
819         new_addr = -1;
820     } else {
821         new_addr = h2g(host_addr);
822         prot = page_get_flags(old_addr);
823         page_set_flags(old_addr, old_addr + old_size, 0);
824         page_set_flags(new_addr, new_addr + new_size,
825                        prot | PAGE_VALID | PAGE_RESET);
826     }
827     tb_invalidate_phys_range(new_addr, new_addr + new_size);
828     mmap_unlock();
829     return new_addr;
830 }
831