xref: /openbmc/qemu/linux-user/mmap.c (revision f36538b86b9baab4ff255e4c97a396a5e4723727)
154936004Sbellard /*
254936004Sbellard  *  mmap support for qemu
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  *  This program is free software; you can redistribute it and/or modify
754936004Sbellard  *  it under the terms of the GNU General Public License as published by
854936004Sbellard  *  the Free Software Foundation; either version 2 of the License, or
954936004Sbellard  *  (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  *  This program is distributed in the hope that it will be useful,
1254936004Sbellard  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1454936004Sbellard  *  GNU General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  *  You should have received a copy of the GNU General Public License
178167ee88SBlue Swirl  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
19d39594e9SPeter Maydell #include "qemu/osdep.h"
20225a206cSRichard Henderson #include <sys/shm.h>
2111d96056SAlex Bennée #include "trace.h"
2210d0d505SAlex Bennée #include "exec/log.h"
2374781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
2454936004Sbellard #include "qemu.h"
253b249d26SPeter Maydell #include "user-internals.h"
265423e6d3SPeter Maydell #include "user-mmap.h"
278655b4c7SIlya Leoshkevich #include "target_mman.h"
28044e95c8SRichard Henderson #include "qemu/interval-tree.h"
2954936004Sbellard 
305a534314SPeter Maydell #ifdef TARGET_ARM
315a534314SPeter Maydell #include "target/arm/cpu-features.h"
325a534314SPeter Maydell #endif
335a534314SPeter Maydell 
341e6eec8bSBlue Swirl static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
35dfd3f85cSJuan Quintela static __thread int mmap_lock_count;
36c8a706feSpbrook 
mmap_lock(void)37c8a706feSpbrook void mmap_lock(void)
38c8a706feSpbrook {
39c8a706feSpbrook     if (mmap_lock_count++ == 0) {
40c8a706feSpbrook         pthread_mutex_lock(&mmap_mutex);
41c8a706feSpbrook     }
42c8a706feSpbrook }
43c8a706feSpbrook 
mmap_unlock(void)44c8a706feSpbrook void mmap_unlock(void)
45c8a706feSpbrook {
46990ef918SRichard Henderson     assert(mmap_lock_count > 0);
47c8a706feSpbrook     if (--mmap_lock_count == 0) {
48c8a706feSpbrook         pthread_mutex_unlock(&mmap_mutex);
49c8a706feSpbrook     }
50c8a706feSpbrook }
51d5975363Spbrook 
have_mmap_lock(void)52301e40edSAlex Bennée bool have_mmap_lock(void)
53301e40edSAlex Bennée {
54301e40edSAlex Bennée     return mmap_lock_count > 0 ? true : false;
55301e40edSAlex Bennée }
56301e40edSAlex Bennée 
57d5975363Spbrook /* Grab lock to make sure things are in a consistent state after fork().  */
mmap_fork_start(void)58d5975363Spbrook void mmap_fork_start(void)
59d5975363Spbrook {
60d5975363Spbrook     if (mmap_lock_count)
61d5975363Spbrook         abort();
62d5975363Spbrook     pthread_mutex_lock(&mmap_mutex);
63d5975363Spbrook }
64d5975363Spbrook 
mmap_fork_end(int child)65d5975363Spbrook void mmap_fork_end(int child)
66d5975363Spbrook {
672b730f79SRichard Henderson     if (child) {
68d5975363Spbrook         pthread_mutex_init(&mmap_mutex, NULL);
692b730f79SRichard Henderson     } else {
70d5975363Spbrook         pthread_mutex_unlock(&mmap_mutex);
71d5975363Spbrook     }
722b730f79SRichard Henderson }
73c8a706feSpbrook 
74044e95c8SRichard Henderson /* Protected by mmap_lock. */
75044e95c8SRichard Henderson static IntervalTreeRoot shm_regions;
76044e95c8SRichard Henderson 
shm_region_add(abi_ptr start,abi_ptr last)77044e95c8SRichard Henderson static void shm_region_add(abi_ptr start, abi_ptr last)
78044e95c8SRichard Henderson {
79044e95c8SRichard Henderson     IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
80044e95c8SRichard Henderson 
81044e95c8SRichard Henderson     i->start = start;
82044e95c8SRichard Henderson     i->last = last;
83044e95c8SRichard Henderson     interval_tree_insert(i, &shm_regions);
84044e95c8SRichard Henderson }
85044e95c8SRichard Henderson 
shm_region_find(abi_ptr start)86044e95c8SRichard Henderson static abi_ptr shm_region_find(abi_ptr start)
87044e95c8SRichard Henderson {
88044e95c8SRichard Henderson     IntervalTreeNode *i;
89044e95c8SRichard Henderson 
90044e95c8SRichard Henderson     for (i = interval_tree_iter_first(&shm_regions, start, start); i;
91044e95c8SRichard Henderson          i = interval_tree_iter_next(i, start, start)) {
92044e95c8SRichard Henderson         if (i->start == start) {
93044e95c8SRichard Henderson             return i->last;
94044e95c8SRichard Henderson         }
95044e95c8SRichard Henderson     }
96044e95c8SRichard Henderson     return 0;
97044e95c8SRichard Henderson }
98044e95c8SRichard Henderson 
shm_region_rm_complete(abi_ptr start,abi_ptr last)99044e95c8SRichard Henderson static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
100044e95c8SRichard Henderson {
101044e95c8SRichard Henderson     IntervalTreeNode *i, *n;
102044e95c8SRichard Henderson 
103044e95c8SRichard Henderson     for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
104044e95c8SRichard Henderson         n = interval_tree_iter_next(i, start, last);
105044e95c8SRichard Henderson         if (i->start >= start && i->last <= last) {
106044e95c8SRichard Henderson             interval_tree_remove(i, &shm_regions);
107044e95c8SRichard Henderson             g_free(i);
108044e95c8SRichard Henderson         }
109044e95c8SRichard Henderson     }
110044e95c8SRichard Henderson }
111044e95c8SRichard Henderson 
1129dba3ca5SRichard Henderson /*
1139dba3ca5SRichard Henderson  * Validate target prot bitmask.
1149dba3ca5SRichard Henderson  * Return the prot bitmask for the host in *HOST_PROT.
1159dba3ca5SRichard Henderson  * Return 0 if the target prot bitmask is invalid, otherwise
1169dba3ca5SRichard Henderson  * the internal qemu page_flags (which will include PAGE_VALID).
1179dba3ca5SRichard Henderson  */
validate_prot_to_pageflags(int prot)1180dd55812SRichard Henderson static int validate_prot_to_pageflags(int prot)
1199dba3ca5SRichard Henderson {
1209dba3ca5SRichard Henderson     int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
12186b7c551SBALATON Zoltan     int page_flags = (prot & PAGE_RWX) | PAGE_VALID;
1229dba3ca5SRichard Henderson 
123be5d6f48SRichard Henderson #ifdef TARGET_AARCH64
124d109b46dSRichard Henderson     {
125d109b46dSRichard Henderson         ARMCPU *cpu = ARM_CPU(thread_cpu);
126d109b46dSRichard Henderson 
127be5d6f48SRichard Henderson         /*
128be5d6f48SRichard Henderson          * The PROT_BTI bit is only accepted if the cpu supports the feature.
129be5d6f48SRichard Henderson          * Since this is the unusual case, don't bother checking unless
130be5d6f48SRichard Henderson          * the bit has been requested.  If set and valid, record the bit
131be5d6f48SRichard Henderson          * within QEMU's page_flags.
132be5d6f48SRichard Henderson          */
133d109b46dSRichard Henderson         if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
134be5d6f48SRichard Henderson             valid |= TARGET_PROT_BTI;
135be5d6f48SRichard Henderson             page_flags |= PAGE_BTI;
136be5d6f48SRichard Henderson         }
137d109b46dSRichard Henderson         /* Similarly for the PROT_MTE bit. */
138d109b46dSRichard Henderson         if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
139d109b46dSRichard Henderson             valid |= TARGET_PROT_MTE;
140d109b46dSRichard Henderson             page_flags |= PAGE_MTE;
141d109b46dSRichard Henderson         }
142be5d6f48SRichard Henderson     }
1434c184e70SHelge Deller #elif defined(TARGET_HPPA)
1444c184e70SHelge Deller     valid |= PROT_GROWSDOWN | PROT_GROWSUP;
145be5d6f48SRichard Henderson #endif
146be5d6f48SRichard Henderson 
1479dba3ca5SRichard Henderson     return prot & ~valid ? 0 : page_flags;
1489dba3ca5SRichard Henderson }
1499dba3ca5SRichard Henderson 
1500dd55812SRichard Henderson /*
1510dd55812SRichard Henderson  * For the host, we need not pass anything except read/write/exec.
1520dd55812SRichard Henderson  * While PROT_SEM is allowed by all hosts, it is also ignored, so
1530dd55812SRichard Henderson  * don't bother transforming guest bit to host bit.  Any other
1540dd55812SRichard Henderson  * target-specific prot bits will not be understood by the host
1550dd55812SRichard Henderson  * and will need to be encoded into page_flags for qemu emulation.
1560dd55812SRichard Henderson  *
1570dd55812SRichard Henderson  * Pages that are executable by the guest will never be executed
1580dd55812SRichard Henderson  * by the host, but the host will need to be able to read them.
1590dd55812SRichard Henderson  */
target_to_host_prot(int prot)1600dd55812SRichard Henderson static int target_to_host_prot(int prot)
1610dd55812SRichard Henderson {
1620dd55812SRichard Henderson     return (prot & (PROT_READ | PROT_WRITE)) |
1630dd55812SRichard Henderson            (prot & PROT_EXEC ? PROT_READ : 0);
1640dd55812SRichard Henderson }
1650dd55812SRichard Henderson 
16653a5960aSpbrook /* NOTE: all the constants are the HOST ones, but addresses are target. */
target_mprotect(abi_ulong start,abi_ulong len,int target_prot)1679dba3ca5SRichard Henderson int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
16854936004Sbellard {
169621ac47dSRichard Henderson     int host_page_size = qemu_real_host_page_size();
1707bdc1accSRichard Henderson     abi_ulong starts[3];
1717bdc1accSRichard Henderson     abi_ulong lens[3];
1727bdc1accSRichard Henderson     int prots[3];
1737bdc1accSRichard Henderson     abi_ulong host_start, host_last, last;
1747bdc1accSRichard Henderson     int prot1, ret, page_flags, nranges;
17554936004Sbellard 
1769dba3ca5SRichard Henderson     trace_target_mprotect(start, len, target_prot);
17754936004Sbellard 
1789dba3ca5SRichard Henderson     if ((start & ~TARGET_PAGE_MASK) != 0) {
17978cf3390SMax Filippov         return -TARGET_EINVAL;
1809dba3ca5SRichard Henderson     }
1810dd55812SRichard Henderson     page_flags = validate_prot_to_pageflags(target_prot);
1829dba3ca5SRichard Henderson     if (!page_flags) {
1839dba3ca5SRichard Henderson         return -TARGET_EINVAL;
1849dba3ca5SRichard Henderson     }
1859dba3ca5SRichard Henderson     if (len == 0) {
18654936004Sbellard         return 0;
1879dba3ca5SRichard Henderson     }
1887bdc1accSRichard Henderson     len = TARGET_PAGE_ALIGN(len);
1897bdc1accSRichard Henderson     if (!guest_range_valid_untagged(start, len)) {
1907bdc1accSRichard Henderson         return -TARGET_ENOMEM;
1917bdc1accSRichard Henderson     }
1927bdc1accSRichard Henderson 
1937bdc1accSRichard Henderson     last = start + len - 1;
194621ac47dSRichard Henderson     host_start = start & -host_page_size;
195b36b2b1dSRichard Henderson     host_last = ROUND_UP(last, host_page_size) - 1;
1967bdc1accSRichard Henderson     nranges = 0;
19754936004Sbellard 
198c8a706feSpbrook     mmap_lock();
1997bdc1accSRichard Henderson 
200621ac47dSRichard Henderson     if (host_last - host_start < host_page_size) {
2017bdc1accSRichard Henderson         /* Single host page contains all guest pages: sum the prot. */
2020dd55812SRichard Henderson         prot1 = target_prot;
2037bdc1accSRichard Henderson         for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
2047bdc1accSRichard Henderson             prot1 |= page_get_flags(a);
20554936004Sbellard         }
2067bdc1accSRichard Henderson         for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
2077bdc1accSRichard Henderson             prot1 |= page_get_flags(a + 1);
208d418c81eSbellard         }
2097bdc1accSRichard Henderson         starts[nranges] = host_start;
210621ac47dSRichard Henderson         lens[nranges] = host_page_size;
2117bdc1accSRichard Henderson         prots[nranges] = prot1;
2127bdc1accSRichard Henderson         nranges++;
2137bdc1accSRichard Henderson     } else {
2147bdc1accSRichard Henderson         if (host_start < start) {
2157bdc1accSRichard Henderson             /* Host page contains more than one guest page: sum the prot. */
2167bdc1accSRichard Henderson             prot1 = target_prot;
2177bdc1accSRichard Henderson             for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
2187bdc1accSRichard Henderson                 prot1 |= page_get_flags(a);
219d418c81eSbellard             }
2207bdc1accSRichard Henderson             /* If the resulting sum differs, create a new range. */
2217bdc1accSRichard Henderson             if (prot1 != target_prot) {
2227bdc1accSRichard Henderson                 starts[nranges] = host_start;
223621ac47dSRichard Henderson                 lens[nranges] = host_page_size;
2247bdc1accSRichard Henderson                 prots[nranges] = prot1;
2257bdc1accSRichard Henderson                 nranges++;
226621ac47dSRichard Henderson                 host_start += host_page_size;
22754936004Sbellard             }
2287bdc1accSRichard Henderson         }
2297bdc1accSRichard Henderson 
2307bdc1accSRichard Henderson         if (last < host_last) {
2317bdc1accSRichard Henderson             /* Host page contains more than one guest page: sum the prot. */
2320dd55812SRichard Henderson             prot1 = target_prot;
2337bdc1accSRichard Henderson             for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
2347bdc1accSRichard Henderson                 prot1 |= page_get_flags(a + 1);
23554936004Sbellard             }
2367bdc1accSRichard Henderson             /* If the resulting sum differs, create a new range. */
2377bdc1accSRichard Henderson             if (prot1 != target_prot) {
238621ac47dSRichard Henderson                 host_last -= host_page_size;
2397bdc1accSRichard Henderson                 starts[nranges] = host_last + 1;
240621ac47dSRichard Henderson                 lens[nranges] = host_page_size;
2417bdc1accSRichard Henderson                 prots[nranges] = prot1;
2427bdc1accSRichard Henderson                 nranges++;
2439dba3ca5SRichard Henderson             }
24454936004Sbellard         }
24554936004Sbellard 
2467bdc1accSRichard Henderson         /* Create a range for the middle, if any remains. */
2477bdc1accSRichard Henderson         if (host_start < host_last) {
2487bdc1accSRichard Henderson             starts[nranges] = host_start;
2497bdc1accSRichard Henderson             lens[nranges] = host_last - host_start + 1;
2507bdc1accSRichard Henderson             prots[nranges] = target_prot;
2517bdc1accSRichard Henderson             nranges++;
2527bdc1accSRichard Henderson         }
2537bdc1accSRichard Henderson     }
2547bdc1accSRichard Henderson 
2557bdc1accSRichard Henderson     for (int i = 0; i < nranges; ++i) {
2567bdc1accSRichard Henderson         ret = mprotect(g2h_untagged(starts[i]), lens[i],
2577bdc1accSRichard Henderson                        target_to_host_prot(prots[i]));
2589dba3ca5SRichard Henderson         if (ret != 0) {
259c8a706feSpbrook             goto error;
26054936004Sbellard         }
2619dba3ca5SRichard Henderson     }
262aa98e2d8SIlya Leoshkevich 
2637bdc1accSRichard Henderson     page_set_flags(start, last, page_flags);
264aa98e2d8SIlya Leoshkevich     ret = 0;
265aa98e2d8SIlya Leoshkevich 
266c8a706feSpbrook  error:
267c8a706feSpbrook     mmap_unlock();
268c8a706feSpbrook     return ret;
26954936004Sbellard }
27054936004Sbellard 
2712952b642SRichard Henderson /*
2722952b642SRichard Henderson  * Perform munmap on behalf of the target, with host parameters.
2732952b642SRichard Henderson  * If reserved_va, we must replace the memory reservation.
2742952b642SRichard Henderson  */
do_munmap(void * addr,size_t len)2752952b642SRichard Henderson static int do_munmap(void *addr, size_t len)
2762952b642SRichard Henderson {
2772952b642SRichard Henderson     if (reserved_va) {
2782952b642SRichard Henderson         void *ptr = mmap(addr, len, PROT_NONE,
2792952b642SRichard Henderson                          MAP_FIXED | MAP_ANONYMOUS
2802952b642SRichard Henderson                          | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
2812952b642SRichard Henderson         return ptr == addr ? 0 : -1;
2822952b642SRichard Henderson     }
2832952b642SRichard Henderson     return munmap(addr, len);
2842952b642SRichard Henderson }
2852952b642SRichard Henderson 
286eb5027acSRichard Henderson /*
287*a4ad4a9dSRichard Henderson  * Perform a pread on behalf of target_mmap.  We can reach EOF, we can be
288*a4ad4a9dSRichard Henderson  * interrupted by signals, and in general there's no good error return path.
289*a4ad4a9dSRichard Henderson  * If @zero, zero the rest of the block at EOF.
290*a4ad4a9dSRichard Henderson  * Return true on success.
291*a4ad4a9dSRichard Henderson  */
mmap_pread(int fd,void * p,size_t len,off_t offset,bool zero)292*a4ad4a9dSRichard Henderson static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero)
293*a4ad4a9dSRichard Henderson {
294*a4ad4a9dSRichard Henderson     while (1) {
295*a4ad4a9dSRichard Henderson         ssize_t r = pread(fd, p, len, offset);
296*a4ad4a9dSRichard Henderson 
297*a4ad4a9dSRichard Henderson         if (likely(r == len)) {
298*a4ad4a9dSRichard Henderson             /* Complete */
299*a4ad4a9dSRichard Henderson             return true;
300*a4ad4a9dSRichard Henderson         }
301*a4ad4a9dSRichard Henderson         if (r == 0) {
302*a4ad4a9dSRichard Henderson             /* EOF */
303*a4ad4a9dSRichard Henderson             if (zero) {
304*a4ad4a9dSRichard Henderson                 memset(p, 0, len);
305*a4ad4a9dSRichard Henderson             }
306*a4ad4a9dSRichard Henderson             return true;
307*a4ad4a9dSRichard Henderson         }
308*a4ad4a9dSRichard Henderson         if (r > 0) {
309*a4ad4a9dSRichard Henderson             /* Short read */
310*a4ad4a9dSRichard Henderson             p += r;
311*a4ad4a9dSRichard Henderson             len -= r;
312*a4ad4a9dSRichard Henderson             offset += r;
313*a4ad4a9dSRichard Henderson         } else if (errno != EINTR) {
314*a4ad4a9dSRichard Henderson             /* Error */
315*a4ad4a9dSRichard Henderson             return false;
316*a4ad4a9dSRichard Henderson         }
317*a4ad4a9dSRichard Henderson     }
318*a4ad4a9dSRichard Henderson }
319*a4ad4a9dSRichard Henderson 
320*a4ad4a9dSRichard Henderson /*
321eb5027acSRichard Henderson  * Map an incomplete host page.
322eb5027acSRichard Henderson  *
323eb5027acSRichard Henderson  * Here be dragons.  This case will not work if there is an existing
324eb5027acSRichard Henderson  * overlapping host page, which is file mapped, and for which the mapping
325eb5027acSRichard Henderson  * is beyond the end of the file.  In that case, we will see SIGBUS when
326eb5027acSRichard Henderson  * trying to write a portion of this page.
327eb5027acSRichard Henderson  *
328eb5027acSRichard Henderson  * FIXME: Work around this with a temporary signal handler and longjmp.
329eb5027acSRichard Henderson  */
mmap_frag(abi_ulong real_start,abi_ulong start,abi_ulong last,int prot,int flags,int fd,off_t offset)33099982bebSRichard Henderson static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
33155baec0fSRichard Henderson                       int prot, int flags, int fd, off_t offset)
33254936004Sbellard {
333621ac47dSRichard Henderson     int host_page_size = qemu_real_host_page_size();
33499982bebSRichard Henderson     abi_ulong real_last;
33553a5960aSpbrook     void *host_start;
33699982bebSRichard Henderson     int prot_old, prot_new;
33799982bebSRichard Henderson     int host_prot_old, host_prot_new;
33854936004Sbellard 
33999982bebSRichard Henderson     if (!(flags & MAP_ANONYMOUS)
34099982bebSRichard Henderson         && (flags & MAP_TYPE) == MAP_SHARED
34199982bebSRichard Henderson         && (prot & PROT_WRITE)) {
34299982bebSRichard Henderson         /*
34399982bebSRichard Henderson          * msync() won't work with the partial page, so we return an
34499982bebSRichard Henderson          * error if write is possible while it is a shared mapping.
34599982bebSRichard Henderson          */
34699982bebSRichard Henderson         errno = EINVAL;
34799982bebSRichard Henderson         return false;
34899982bebSRichard Henderson     }
34999982bebSRichard Henderson 
350621ac47dSRichard Henderson     real_last = real_start + host_page_size - 1;
3513e8f1628SRichard Henderson     host_start = g2h_untagged(real_start);
35254936004Sbellard 
35399982bebSRichard Henderson     /* Get the protection of the target pages outside the mapping. */
35499982bebSRichard Henderson     prot_old = 0;
35599982bebSRichard Henderson     for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
35699982bebSRichard Henderson         prot_old |= page_get_flags(a);
35754936004Sbellard     }
35899982bebSRichard Henderson     for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
35999982bebSRichard Henderson         prot_old |= page_get_flags(a);
3602b730f79SRichard Henderson     }
36154936004Sbellard 
36299982bebSRichard Henderson     if (prot_old == 0) {
36399982bebSRichard Henderson         /*
36499982bebSRichard Henderson          * Since !(prot_old & PAGE_VALID), there were no guest pages
36599982bebSRichard Henderson          * outside of the fragment we need to map.  Allocate a new host
36699982bebSRichard Henderson          * page to cover, discarding whatever else may have been present.
36799982bebSRichard Henderson          */
368621ac47dSRichard Henderson         void *p = mmap(host_start, host_page_size,
3690dd55812SRichard Henderson                        target_to_host_prot(prot),
37054936004Sbellard                        flags | MAP_ANONYMOUS, -1, 0);
371ddcdd8c4SAkihiko Odaki         if (p != host_start) {
372ddcdd8c4SAkihiko Odaki             if (p != MAP_FAILED) {
3733bfa271eSRichard Henderson                 do_munmap(p, host_page_size);
374ddcdd8c4SAkihiko Odaki                 errno = EEXIST;
375ddcdd8c4SAkihiko Odaki             }
37699982bebSRichard Henderson             return false;
3772b730f79SRichard Henderson         }
37899982bebSRichard Henderson         prot_old = prot;
37954936004Sbellard     }
38099982bebSRichard Henderson     prot_new = prot | prot_old;
38154936004Sbellard 
38299982bebSRichard Henderson     host_prot_old = target_to_host_prot(prot_old);
38399982bebSRichard Henderson     host_prot_new = target_to_host_prot(prot_new);
38499982bebSRichard Henderson 
38599982bebSRichard Henderson     /* Adjust protection to be able to write. */
38699982bebSRichard Henderson     if (!(host_prot_old & PROT_WRITE)) {
38799982bebSRichard Henderson         host_prot_old |= PROT_WRITE;
388621ac47dSRichard Henderson         mprotect(host_start, host_page_size, host_prot_old);
3892b730f79SRichard Henderson     }
39054936004Sbellard 
39199982bebSRichard Henderson     /* Read or zero the new guest pages. */
39299982bebSRichard Henderson     if (flags & MAP_ANONYMOUS) {
39399982bebSRichard Henderson         memset(g2h_untagged(start), 0, last - start + 1);
394*a4ad4a9dSRichard Henderson     } else if (!mmap_pread(fd, g2h_untagged(start), last - start + 1,
395*a4ad4a9dSRichard Henderson                            offset, true)) {
39699982bebSRichard Henderson         return false;
397e6deac9cSChen Gang     }
39899982bebSRichard Henderson 
39999982bebSRichard Henderson     /* Put final protection */
40099982bebSRichard Henderson     if (host_prot_new != host_prot_old) {
401621ac47dSRichard Henderson         mprotect(host_start, host_page_size, host_prot_new);
40299982bebSRichard Henderson     }
40399982bebSRichard Henderson     return true;
40454936004Sbellard }
40554936004Sbellard 
406c8fb5cf9SRichard Henderson abi_ulong task_unmapped_base;
407da2b71faSRichard Henderson abi_ulong elf_et_dyn_base;
408c8fb5cf9SRichard Henderson abi_ulong mmap_next_start;
409a03e2d42Sbellard 
4102b730f79SRichard Henderson /*
4112b730f79SRichard Henderson  * Subroutine of mmap_find_vma, used when we have pre-allocated
4122b730f79SRichard Henderson  * a chunk of guest address space.
4132b730f79SRichard Henderson  */
mmap_find_vma_reserved(abi_ulong start,abi_ulong size,abi_ulong align)41430ab9ef2SRichard Henderson static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
41530ab9ef2SRichard Henderson                                         abi_ulong align)
41668a1c816SPaul Brook {
4174c13048eSRichard Henderson     target_ulong ret;
41868a1c816SPaul Brook 
4194c13048eSRichard Henderson     ret = page_find_range_empty(start, reserved_va, size, align);
4204c13048eSRichard Henderson     if (ret == -1 && start > mmap_min_addr) {
4214c13048eSRichard Henderson         /* Restart at the beginning of the address space. */
4224c13048eSRichard Henderson         ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
42368a1c816SPaul Brook     }
42468a1c816SPaul Brook 
4254c13048eSRichard Henderson     return ret;
42630ab9ef2SRichard Henderson }
42768a1c816SPaul Brook 
428fe3b4152SKirill A. Shutemov /*
429fe3b4152SKirill A. Shutemov  * Find and reserve a free memory area of size 'size'. The search
430fe3b4152SKirill A. Shutemov  * starts at 'start'.
431fe3b4152SKirill A. Shutemov  * It must be called with mmap_lock() held.
432fe3b4152SKirill A. Shutemov  * Return -1 if error.
433a03e2d42Sbellard  */
mmap_find_vma(abi_ulong start,abi_ulong size,abi_ulong align)43430ab9ef2SRichard Henderson abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
435a03e2d42Sbellard {
436621ac47dSRichard Henderson     int host_page_size = qemu_real_host_page_size();
43714f24e14SRichard Henderson     void *ptr, *prev;
438fe3b4152SKirill A. Shutemov     abi_ulong addr;
43914f24e14SRichard Henderson     int wrapped, repeat;
440fe3b4152SKirill A. Shutemov 
441621ac47dSRichard Henderson     align = MAX(align, host_page_size);
442443b7505SRichard Henderson 
443fe3b4152SKirill A. Shutemov     /* If 'start' == 0, then a default start address is used. */
44414f24e14SRichard Henderson     if (start == 0) {
445fe3b4152SKirill A. Shutemov         start = mmap_next_start;
44614f24e14SRichard Henderson     } else {
447621ac47dSRichard Henderson         start &= -host_page_size;
44814f24e14SRichard Henderson     }
44930ab9ef2SRichard Henderson     start = ROUND_UP(start, align);
450b36b2b1dSRichard Henderson     size = ROUND_UP(size, host_page_size);
451fe3b4152SKirill A. Shutemov 
452b76f21a7SLaurent Vivier     if (reserved_va) {
45330ab9ef2SRichard Henderson         return mmap_find_vma_reserved(start, size, align);
45468a1c816SPaul Brook     }
45568a1c816SPaul Brook 
456a03e2d42Sbellard     addr = start;
45714f24e14SRichard Henderson     wrapped = repeat = 0;
45814f24e14SRichard Henderson     prev = 0;
459fe3b4152SKirill A. Shutemov 
46014f24e14SRichard Henderson     for (;; prev = ptr) {
461fe3b4152SKirill A. Shutemov         /*
462fe3b4152SKirill A. Shutemov          * Reserve needed memory area to avoid a race.
463fe3b4152SKirill A. Shutemov          * It should be discarded using:
464fe3b4152SKirill A. Shutemov          *  - mmap() with MAP_FIXED flag
465fe3b4152SKirill A. Shutemov          *  - mremap() with MREMAP_FIXED flag
466fe3b4152SKirill A. Shutemov          *  - shmat() with SHM_REMAP flag
467fe3b4152SKirill A. Shutemov          */
4683e8f1628SRichard Henderson         ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
469fe3b4152SKirill A. Shutemov                    MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
470fe3b4152SKirill A. Shutemov 
471fe3b4152SKirill A. Shutemov         /* ENOMEM, if host address space has no memory */
47214f24e14SRichard Henderson         if (ptr == MAP_FAILED) {
473a03e2d42Sbellard             return (abi_ulong)-1;
474a03e2d42Sbellard         }
475fe3b4152SKirill A. Shutemov 
4762b730f79SRichard Henderson         /*
4772b730f79SRichard Henderson          * Count the number of sequential returns of the same address.
4782b730f79SRichard Henderson          * This is used to modify the search algorithm below.
4792b730f79SRichard Henderson          */
48014f24e14SRichard Henderson         repeat = (ptr == prev ? repeat + 1 : 0);
481fe3b4152SKirill A. Shutemov 
48214f24e14SRichard Henderson         if (h2g_valid(ptr + size - 1)) {
48314f24e14SRichard Henderson             addr = h2g(ptr);
48414f24e14SRichard Henderson 
48530ab9ef2SRichard Henderson             if ((addr & (align - 1)) == 0) {
48614f24e14SRichard Henderson                 /* Success.  */
487c8fb5cf9SRichard Henderson                 if (start == mmap_next_start && addr >= task_unmapped_base) {
48814f24e14SRichard Henderson                     mmap_next_start = addr + size;
48914f24e14SRichard Henderson                 }
49014f24e14SRichard Henderson                 return addr;
49114f24e14SRichard Henderson             }
49214f24e14SRichard Henderson 
49314f24e14SRichard Henderson             /* The address is not properly aligned for the target.  */
49414f24e14SRichard Henderson             switch (repeat) {
49514f24e14SRichard Henderson             case 0:
4962b730f79SRichard Henderson                 /*
4972b730f79SRichard Henderson                  * Assume the result that the kernel gave us is the
4982b730f79SRichard Henderson                  * first with enough free space, so start again at the
4992b730f79SRichard Henderson                  * next higher target page.
5002b730f79SRichard Henderson                  */
50130ab9ef2SRichard Henderson                 addr = ROUND_UP(addr, align);
50214f24e14SRichard Henderson                 break;
50314f24e14SRichard Henderson             case 1:
5042b730f79SRichard Henderson                 /*
5052b730f79SRichard Henderson                  * Sometimes the kernel decides to perform the allocation
5062b730f79SRichard Henderson                  * at the top end of memory instead.
5072b730f79SRichard Henderson                  */
50830ab9ef2SRichard Henderson                 addr &= -align;
50914f24e14SRichard Henderson                 break;
51014f24e14SRichard Henderson             case 2:
51114f24e14SRichard Henderson                 /* Start over at low memory.  */
51214f24e14SRichard Henderson                 addr = 0;
51314f24e14SRichard Henderson                 break;
51414f24e14SRichard Henderson             default:
51514f24e14SRichard Henderson                 /* Fail.  This unaligned block must the last.  */
51614f24e14SRichard Henderson                 addr = -1;
51714f24e14SRichard Henderson                 break;
51814f24e14SRichard Henderson             }
51914f24e14SRichard Henderson         } else {
5202b730f79SRichard Henderson             /*
5212b730f79SRichard Henderson              * Since the result the kernel gave didn't fit, start
5222b730f79SRichard Henderson              * again at low memory.  If any repetition, fail.
5232b730f79SRichard Henderson              */
52414f24e14SRichard Henderson             addr = (repeat ? -1 : 0);
52514f24e14SRichard Henderson         }
52614f24e14SRichard Henderson 
52714f24e14SRichard Henderson         /* Unmap and try again.  */
52814f24e14SRichard Henderson         munmap(ptr, size);
52914f24e14SRichard Henderson 
53014f24e14SRichard Henderson         /* ENOMEM if we checked the whole of the target address space.  */
531d0b3e4f5SBlue Swirl         if (addr == (abi_ulong)-1) {
53214f24e14SRichard Henderson             return (abi_ulong)-1;
53314f24e14SRichard Henderson         } else if (addr == 0) {
53414f24e14SRichard Henderson             if (wrapped) {
53514f24e14SRichard Henderson                 return (abi_ulong)-1;
53614f24e14SRichard Henderson             }
53714f24e14SRichard Henderson             wrapped = 1;
5382b730f79SRichard Henderson             /*
5392b730f79SRichard Henderson              * Don't actually use 0 when wrapping, instead indicate
5402b730f79SRichard Henderson              * that we'd truly like an allocation in low memory.
5412b730f79SRichard Henderson              */
54214f24e14SRichard Henderson             addr = (mmap_min_addr > TARGET_PAGE_SIZE
54314f24e14SRichard Henderson                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
54414f24e14SRichard Henderson                      : TARGET_PAGE_SIZE);
54514f24e14SRichard Henderson         } else if (wrapped && addr >= start) {
54614f24e14SRichard Henderson             return (abi_ulong)-1;
54714f24e14SRichard Henderson         }
54814f24e14SRichard Henderson     }
549a03e2d42Sbellard }
550a03e2d42Sbellard 
5516ecc2557SRichard Henderson /*
5526ecc2557SRichard Henderson  * Record a successful mmap within the user-exec interval tree.
5536ecc2557SRichard Henderson  */
mmap_end(abi_ulong start,abi_ulong last,abi_ulong passthrough_start,abi_ulong passthrough_last,int flags,int page_flags)5546ecc2557SRichard Henderson static abi_long mmap_end(abi_ulong start, abi_ulong last,
5556ecc2557SRichard Henderson                          abi_ulong passthrough_start,
5566ecc2557SRichard Henderson                          abi_ulong passthrough_last,
5576ecc2557SRichard Henderson                          int flags, int page_flags)
5586ecc2557SRichard Henderson {
5596ecc2557SRichard Henderson     if (flags & MAP_ANONYMOUS) {
5606ecc2557SRichard Henderson         page_flags |= PAGE_ANON;
5616ecc2557SRichard Henderson     }
5626ecc2557SRichard Henderson     page_flags |= PAGE_RESET;
5636ecc2557SRichard Henderson     if (passthrough_start > passthrough_last) {
5646ecc2557SRichard Henderson         page_set_flags(start, last, page_flags);
5656ecc2557SRichard Henderson     } else {
5666ecc2557SRichard Henderson         if (start < passthrough_start) {
5676ecc2557SRichard Henderson             page_set_flags(start, passthrough_start - 1, page_flags);
5686ecc2557SRichard Henderson         }
5696ecc2557SRichard Henderson         page_set_flags(passthrough_start, passthrough_last,
5706ecc2557SRichard Henderson                        page_flags | PAGE_PASSTHROUGH);
5716ecc2557SRichard Henderson         if (passthrough_last < last) {
5726ecc2557SRichard Henderson             page_set_flags(passthrough_last + 1, last, page_flags);
5736ecc2557SRichard Henderson         }
5746ecc2557SRichard Henderson     }
5756ecc2557SRichard Henderson     shm_region_rm_complete(start, last);
5766ecc2557SRichard Henderson     trace_target_mmap_complete(start);
5776ecc2557SRichard Henderson     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
5786ecc2557SRichard Henderson         FILE *f = qemu_log_trylock();
5796ecc2557SRichard Henderson         if (f) {
5806ecc2557SRichard Henderson             fprintf(f, "page layout changed following mmap\n");
5816ecc2557SRichard Henderson             page_dump(f);
5826ecc2557SRichard Henderson             qemu_log_unlock(f);
5836ecc2557SRichard Henderson         }
5846ecc2557SRichard Henderson     }
5856ecc2557SRichard Henderson     return start;
5866ecc2557SRichard Henderson }
5876ecc2557SRichard Henderson 
58868098de9SRichard Henderson /*
58968098de9SRichard Henderson  * Special case host page size == target page size,
59068098de9SRichard Henderson  * where there are no edge conditions.
59168098de9SRichard Henderson  */
mmap_h_eq_g(abi_ulong start,abi_ulong len,int host_prot,int flags,int page_flags,int fd,off_t offset)59268098de9SRichard Henderson static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
59368098de9SRichard Henderson                             int host_prot, int flags, int page_flags,
59468098de9SRichard Henderson                             int fd, off_t offset)
59568098de9SRichard Henderson {
5963aefee3eSRichard Henderson     void *p, *want_p = NULL;
59768098de9SRichard Henderson     abi_ulong last;
59868098de9SRichard Henderson 
5993aefee3eSRichard Henderson     if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
6003aefee3eSRichard Henderson         want_p = g2h_untagged(start);
6013aefee3eSRichard Henderson     }
6023aefee3eSRichard Henderson 
60368098de9SRichard Henderson     p = mmap(want_p, len, host_prot, flags, fd, offset);
60468098de9SRichard Henderson     if (p == MAP_FAILED) {
60568098de9SRichard Henderson         return -1;
60668098de9SRichard Henderson     }
60768098de9SRichard Henderson     /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */
60868098de9SRichard Henderson     if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) {
60968098de9SRichard Henderson         do_munmap(p, len);
61068098de9SRichard Henderson         errno = EEXIST;
61168098de9SRichard Henderson         return -1;
61268098de9SRichard Henderson     }
61368098de9SRichard Henderson 
61468098de9SRichard Henderson     start = h2g(p);
61568098de9SRichard Henderson     last = start + len - 1;
61668098de9SRichard Henderson     return mmap_end(start, last, start, last, flags, page_flags);
61768098de9SRichard Henderson }
61868098de9SRichard Henderson 
6198080b2f8SRichard Henderson /*
6208080b2f8SRichard Henderson  * Special case host page size < target page size.
6218080b2f8SRichard Henderson  *
6228080b2f8SRichard Henderson  * The two special cases are increased guest alignment, and mapping
6238080b2f8SRichard Henderson  * past the end of a file.
6248080b2f8SRichard Henderson  *
6258080b2f8SRichard Henderson  * When mapping files into a memory area larger than the file,
6268080b2f8SRichard Henderson  * accesses to pages beyond the file size will cause a SIGBUS.
6278080b2f8SRichard Henderson  *
6288080b2f8SRichard Henderson  * For example, if mmaping a file of 100 bytes on a host with 4K
6298080b2f8SRichard Henderson  * pages emulating a target with 8K pages, the target expects to
6308080b2f8SRichard Henderson  * be able to access the first 8K. But the host will trap us on
6318080b2f8SRichard Henderson  * any access beyond 4K.
6328080b2f8SRichard Henderson  *
6338080b2f8SRichard Henderson  * When emulating a target with a larger page-size than the hosts,
6348080b2f8SRichard Henderson  * we may need to truncate file maps at EOF and add extra anonymous
6358080b2f8SRichard Henderson  * pages up to the targets page boundary.
6368080b2f8SRichard Henderson  *
6378080b2f8SRichard Henderson  * This workaround only works for files that do not change.
6388080b2f8SRichard Henderson  * If the file is later extended (e.g. ftruncate), the SIGBUS
6398080b2f8SRichard Henderson  * vanishes and the proper behaviour is that changes within the
6408080b2f8SRichard Henderson  * anon page should be reflected in the file.
6418080b2f8SRichard Henderson  *
6428080b2f8SRichard Henderson  * However, this case is rather common with executable images,
6438080b2f8SRichard Henderson  * so the workaround is important for even trivial tests, whereas
6448080b2f8SRichard Henderson  * the mmap of of a file being extended is less common.
6458080b2f8SRichard Henderson  */
mmap_h_lt_g(abi_ulong start,abi_ulong len,int host_prot,int mmap_flags,int page_flags,int fd,off_t offset,int host_page_size)6468080b2f8SRichard Henderson static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot,
6478080b2f8SRichard Henderson                             int mmap_flags, int page_flags, int fd,
6488080b2f8SRichard Henderson                             off_t offset, int host_page_size)
6498080b2f8SRichard Henderson {
6503aefee3eSRichard Henderson     void *p, *want_p = NULL;
6518080b2f8SRichard Henderson     off_t fileend_adj = 0;
6528080b2f8SRichard Henderson     int flags = mmap_flags;
6538080b2f8SRichard Henderson     abi_ulong last, pass_last;
6548080b2f8SRichard Henderson 
6553aefee3eSRichard Henderson     if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
6563aefee3eSRichard Henderson         want_p = g2h_untagged(start);
6573aefee3eSRichard Henderson     }
6583aefee3eSRichard Henderson 
6598080b2f8SRichard Henderson     if (!(flags & MAP_ANONYMOUS)) {
6608080b2f8SRichard Henderson         struct stat sb;
6618080b2f8SRichard Henderson 
6628080b2f8SRichard Henderson         if (fstat(fd, &sb) == -1) {
6638080b2f8SRichard Henderson             return -1;
6648080b2f8SRichard Henderson         }
6658080b2f8SRichard Henderson         if (offset >= sb.st_size) {
6668080b2f8SRichard Henderson             /*
6678080b2f8SRichard Henderson              * The entire map is beyond the end of the file.
6688080b2f8SRichard Henderson              * Transform it to an anonymous mapping.
6698080b2f8SRichard Henderson              */
6708080b2f8SRichard Henderson             flags |= MAP_ANONYMOUS;
6718080b2f8SRichard Henderson             fd = -1;
6728080b2f8SRichard Henderson             offset = 0;
6738080b2f8SRichard Henderson         } else if (offset + len > sb.st_size) {
6748080b2f8SRichard Henderson             /*
6758080b2f8SRichard Henderson              * A portion of the map is beyond the end of the file.
6768080b2f8SRichard Henderson              * Truncate the file portion of the allocation.
6778080b2f8SRichard Henderson              */
6788080b2f8SRichard Henderson             fileend_adj = offset + len - sb.st_size;
6798080b2f8SRichard Henderson         }
6808080b2f8SRichard Henderson     }
6818080b2f8SRichard Henderson 
6828080b2f8SRichard Henderson     if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
6838080b2f8SRichard Henderson         if (fileend_adj) {
6848080b2f8SRichard Henderson             p = mmap(want_p, len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
6858080b2f8SRichard Henderson         } else {
6868080b2f8SRichard Henderson             p = mmap(want_p, len, host_prot, flags, fd, offset);
6878080b2f8SRichard Henderson         }
6888080b2f8SRichard Henderson         if (p != want_p) {
6898080b2f8SRichard Henderson             if (p != MAP_FAILED) {
6908080b2f8SRichard Henderson                 /* Host does not support MAP_FIXED_NOREPLACE: emulate. */
6918080b2f8SRichard Henderson                 do_munmap(p, len);
6928080b2f8SRichard Henderson                 errno = EEXIST;
6938080b2f8SRichard Henderson             }
6948080b2f8SRichard Henderson             return -1;
6958080b2f8SRichard Henderson         }
6968080b2f8SRichard Henderson 
6978080b2f8SRichard Henderson         if (fileend_adj) {
6988080b2f8SRichard Henderson             void *t = mmap(p, len - fileend_adj, host_prot,
6998080b2f8SRichard Henderson                            (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED,
7008080b2f8SRichard Henderson                            fd, offset);
7018080b2f8SRichard Henderson 
7028080b2f8SRichard Henderson             if (t == MAP_FAILED) {
7038080b2f8SRichard Henderson                 int save_errno = errno;
7048080b2f8SRichard Henderson 
7058080b2f8SRichard Henderson                 /*
7068080b2f8SRichard Henderson                  * We failed a map over the top of the successful anonymous
7078080b2f8SRichard Henderson                  * mapping above. The only failure mode is running out of VMAs,
7088080b2f8SRichard Henderson                  * and there's nothing that we can do to detect that earlier.
7098080b2f8SRichard Henderson                  * If we have replaced an existing mapping with MAP_FIXED,
7108080b2f8SRichard Henderson                  * then we cannot properly recover.  It's a coin toss whether
7118080b2f8SRichard Henderson                  * it would be better to exit or continue here.
7128080b2f8SRichard Henderson                  */
7138080b2f8SRichard Henderson                 if (!(flags & MAP_FIXED_NOREPLACE) &&
7148080b2f8SRichard Henderson                     !page_check_range_empty(start, start + len - 1)) {
7158080b2f8SRichard Henderson                     qemu_log("QEMU target_mmap late failure: %s",
7168080b2f8SRichard Henderson                              strerror(save_errno));
7178080b2f8SRichard Henderson                 }
7188080b2f8SRichard Henderson 
7198080b2f8SRichard Henderson                 do_munmap(want_p, len);
7208080b2f8SRichard Henderson                 errno = save_errno;
7218080b2f8SRichard Henderson                 return -1;
7228080b2f8SRichard Henderson             }
7238080b2f8SRichard Henderson         }
7248080b2f8SRichard Henderson     } else {
7258080b2f8SRichard Henderson         size_t host_len, part_len;
7268080b2f8SRichard Henderson 
7278080b2f8SRichard Henderson         /*
7288080b2f8SRichard Henderson          * Take care to align the host memory.  Perform a larger anonymous
7298080b2f8SRichard Henderson          * allocation and extract the aligned portion.  Remap the file on
7308080b2f8SRichard Henderson          * top of that.
7318080b2f8SRichard Henderson          */
7328080b2f8SRichard Henderson         host_len = len + TARGET_PAGE_SIZE - host_page_size;
7338080b2f8SRichard Henderson         p = mmap(want_p, host_len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
7348080b2f8SRichard Henderson         if (p == MAP_FAILED) {
7358080b2f8SRichard Henderson             return -1;
7368080b2f8SRichard Henderson         }
7378080b2f8SRichard Henderson 
7388080b2f8SRichard Henderson         part_len = (uintptr_t)p & (TARGET_PAGE_SIZE - 1);
7398080b2f8SRichard Henderson         if (part_len) {
7408080b2f8SRichard Henderson             part_len = TARGET_PAGE_SIZE - part_len;
7418080b2f8SRichard Henderson             do_munmap(p, part_len);
7428080b2f8SRichard Henderson             p += part_len;
7438080b2f8SRichard Henderson             host_len -= part_len;
7448080b2f8SRichard Henderson         }
7458080b2f8SRichard Henderson         if (len < host_len) {
7468080b2f8SRichard Henderson             do_munmap(p + len, host_len - len);
7478080b2f8SRichard Henderson         }
7488080b2f8SRichard Henderson 
7498080b2f8SRichard Henderson         if (!(flags & MAP_ANONYMOUS)) {
7508080b2f8SRichard Henderson             void *t = mmap(p, len - fileend_adj, host_prot,
7518080b2f8SRichard Henderson                            flags | MAP_FIXED, fd, offset);
7528080b2f8SRichard Henderson 
7538080b2f8SRichard Henderson             if (t == MAP_FAILED) {
7548080b2f8SRichard Henderson                 int save_errno = errno;
7558080b2f8SRichard Henderson                 do_munmap(p, len);
7568080b2f8SRichard Henderson                 errno = save_errno;
7578080b2f8SRichard Henderson                 return -1;
7588080b2f8SRichard Henderson             }
7598080b2f8SRichard Henderson         }
7608080b2f8SRichard Henderson 
7618080b2f8SRichard Henderson         start = h2g(p);
7628080b2f8SRichard Henderson     }
7638080b2f8SRichard Henderson 
7648080b2f8SRichard Henderson     last = start + len - 1;
7658080b2f8SRichard Henderson     if (fileend_adj) {
7668080b2f8SRichard Henderson         pass_last = ROUND_UP(last - fileend_adj, host_page_size) - 1;
7678080b2f8SRichard Henderson     } else {
7688080b2f8SRichard Henderson         pass_last = last;
7698080b2f8SRichard Henderson     }
7708080b2f8SRichard Henderson     return mmap_end(start, last, start, pass_last, mmap_flags, page_flags);
7718080b2f8SRichard Henderson }
7728080b2f8SRichard Henderson 
773eb5027acSRichard Henderson /*
774eb5027acSRichard Henderson  * Special case host page size > target page size.
775eb5027acSRichard Henderson  *
776eb5027acSRichard Henderson  * The two special cases are address and file offsets that are valid
777eb5027acSRichard Henderson  * for the guest that cannot be directly represented by the host.
778eb5027acSRichard Henderson  */
mmap_h_gt_g(abi_ulong start,abi_ulong len,int target_prot,int host_prot,int flags,int page_flags,int fd,off_t offset,int host_page_size)779eb5027acSRichard Henderson static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len,
780eb5027acSRichard Henderson                             int target_prot, int host_prot,
781eb5027acSRichard Henderson                             int flags, int page_flags, int fd,
782eb5027acSRichard Henderson                             off_t offset, int host_page_size)
783eb5027acSRichard Henderson {
7843aefee3eSRichard Henderson     void *p, *want_p = NULL;
785eb5027acSRichard Henderson     off_t host_offset = offset & -host_page_size;
786eb5027acSRichard Henderson     abi_ulong last, real_start, real_last;
787eb5027acSRichard Henderson     bool misaligned_offset = false;
788eb5027acSRichard Henderson     size_t host_len;
789eb5027acSRichard Henderson 
7903aefee3eSRichard Henderson     if (start || (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
7913aefee3eSRichard Henderson         want_p = g2h_untagged(start);
7923aefee3eSRichard Henderson     }
7933aefee3eSRichard Henderson 
794eb5027acSRichard Henderson     if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
795eb5027acSRichard Henderson         /*
796eb5027acSRichard Henderson          * Adjust the offset to something representable on the host.
797eb5027acSRichard Henderson          */
798eb5027acSRichard Henderson         host_len = len + offset - host_offset;
799eb5027acSRichard Henderson         p = mmap(want_p, host_len, host_prot, flags, fd, host_offset);
800eb5027acSRichard Henderson         if (p == MAP_FAILED) {
801eb5027acSRichard Henderson             return -1;
802eb5027acSRichard Henderson         }
803eb5027acSRichard Henderson 
804eb5027acSRichard Henderson         /* Update start to the file position at offset. */
805eb5027acSRichard Henderson         p += offset - host_offset;
806eb5027acSRichard Henderson 
807eb5027acSRichard Henderson         start = h2g(p);
808eb5027acSRichard Henderson         last = start + len - 1;
809eb5027acSRichard Henderson         return mmap_end(start, last, start, last, flags, page_flags);
810eb5027acSRichard Henderson     }
811eb5027acSRichard Henderson 
812eb5027acSRichard Henderson     if (!(flags & MAP_ANONYMOUS)) {
813eb5027acSRichard Henderson         misaligned_offset = (start ^ offset) & (host_page_size - 1);
814eb5027acSRichard Henderson 
815eb5027acSRichard Henderson         /*
816eb5027acSRichard Henderson          * The fallback for misalignment is a private mapping + read.
817eb5027acSRichard Henderson          * This carries none of semantics required of MAP_SHARED.
818eb5027acSRichard Henderson          */
819eb5027acSRichard Henderson         if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) {
820eb5027acSRichard Henderson             errno = EINVAL;
821eb5027acSRichard Henderson             return -1;
822eb5027acSRichard Henderson         }
823eb5027acSRichard Henderson     }
824eb5027acSRichard Henderson 
825eb5027acSRichard Henderson     last = start + len - 1;
826eb5027acSRichard Henderson     real_start = start & -host_page_size;
827eb5027acSRichard Henderson     real_last = ROUND_UP(last, host_page_size) - 1;
828eb5027acSRichard Henderson 
829eb5027acSRichard Henderson     /*
830eb5027acSRichard Henderson      * Handle the start and end of the mapping.
831eb5027acSRichard Henderson      */
832eb5027acSRichard Henderson     if (real_start < start) {
833eb5027acSRichard Henderson         abi_ulong real_page_last = real_start + host_page_size - 1;
834eb5027acSRichard Henderson         if (last <= real_page_last) {
835eb5027acSRichard Henderson             /* Entire allocation a subset of one host page. */
836eb5027acSRichard Henderson             if (!mmap_frag(real_start, start, last, target_prot,
837eb5027acSRichard Henderson                            flags, fd, offset)) {
838eb5027acSRichard Henderson                 return -1;
839eb5027acSRichard Henderson             }
840eb5027acSRichard Henderson             return mmap_end(start, last, -1, 0, flags, page_flags);
841eb5027acSRichard Henderson         }
842eb5027acSRichard Henderson 
843eb5027acSRichard Henderson         if (!mmap_frag(real_start, start, real_page_last, target_prot,
844eb5027acSRichard Henderson                        flags, fd, offset)) {
845eb5027acSRichard Henderson             return -1;
846eb5027acSRichard Henderson         }
847eb5027acSRichard Henderson         real_start = real_page_last + 1;
848eb5027acSRichard Henderson     }
849eb5027acSRichard Henderson 
850eb5027acSRichard Henderson     if (last < real_last) {
851eb5027acSRichard Henderson         abi_ulong real_page_start = real_last - host_page_size + 1;
852eb5027acSRichard Henderson         if (!mmap_frag(real_page_start, real_page_start, last,
853eb5027acSRichard Henderson                        target_prot, flags, fd,
854eb5027acSRichard Henderson                        offset + real_page_start - start)) {
855eb5027acSRichard Henderson             return -1;
856eb5027acSRichard Henderson         }
857eb5027acSRichard Henderson         real_last = real_page_start - 1;
858eb5027acSRichard Henderson     }
859eb5027acSRichard Henderson 
860eb5027acSRichard Henderson     if (real_start > real_last) {
861eb5027acSRichard Henderson         return mmap_end(start, last, -1, 0, flags, page_flags);
862eb5027acSRichard Henderson     }
863eb5027acSRichard Henderson 
864eb5027acSRichard Henderson     /*
865eb5027acSRichard Henderson      * Handle the middle of the mapping.
866eb5027acSRichard Henderson      */
867eb5027acSRichard Henderson 
868eb5027acSRichard Henderson     host_len = real_last - real_start + 1;
869eb5027acSRichard Henderson     want_p += real_start - start;
870eb5027acSRichard Henderson 
871eb5027acSRichard Henderson     if (flags & MAP_ANONYMOUS) {
872eb5027acSRichard Henderson         p = mmap(want_p, host_len, host_prot, flags, -1, 0);
873eb5027acSRichard Henderson     } else if (!misaligned_offset) {
874eb5027acSRichard Henderson         p = mmap(want_p, host_len, host_prot, flags, fd,
875eb5027acSRichard Henderson                  offset + real_start - start);
876eb5027acSRichard Henderson     } else {
877eb5027acSRichard Henderson         p = mmap(want_p, host_len, host_prot | PROT_WRITE,
878eb5027acSRichard Henderson                  flags | MAP_ANONYMOUS, -1, 0);
879eb5027acSRichard Henderson     }
880eb5027acSRichard Henderson     if (p != want_p) {
881eb5027acSRichard Henderson         if (p != MAP_FAILED) {
882eb5027acSRichard Henderson             do_munmap(p, host_len);
883eb5027acSRichard Henderson             errno = EEXIST;
884eb5027acSRichard Henderson         }
885eb5027acSRichard Henderson         return -1;
886eb5027acSRichard Henderson     }
887eb5027acSRichard Henderson 
888eb5027acSRichard Henderson     if (misaligned_offset) {
889*a4ad4a9dSRichard Henderson         if (!mmap_pread(fd, p, host_len, offset + real_start - start, false)) {
890eb5027acSRichard Henderson             do_munmap(p, host_len);
891eb5027acSRichard Henderson             return -1;
892eb5027acSRichard Henderson         }
893eb5027acSRichard Henderson         if (!(host_prot & PROT_WRITE)) {
894eb5027acSRichard Henderson             mprotect(p, host_len, host_prot);
895eb5027acSRichard Henderson         }
896eb5027acSRichard Henderson     }
897eb5027acSRichard Henderson 
898eb5027acSRichard Henderson     return mmap_end(start, last, -1, 0, flags, page_flags);
899eb5027acSRichard Henderson }
900eb5027acSRichard Henderson 
target_mmap__locked(abi_ulong start,abi_ulong len,int target_prot,int flags,int page_flags,int fd,off_t offset)901d558c395SRichard Henderson static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
902e8cec51bSRichard Henderson                                     int target_prot, int flags, int page_flags,
903d558c395SRichard Henderson                                     int fd, off_t offset)
90454936004Sbellard {
905621ac47dSRichard Henderson     int host_page_size = qemu_real_host_page_size();
90668098de9SRichard Henderson     int host_prot;
90754936004Sbellard 
9082b730f79SRichard Henderson     /*
909ad87d26eSRichard Henderson      * For reserved_va, we are in full control of the allocation.
910ad87d26eSRichard Henderson      * Find a suitable hole and convert to MAP_FIXED.
9112b730f79SRichard Henderson      */
91268098de9SRichard Henderson     if (reserved_va) {
91368098de9SRichard Henderson         if (flags & MAP_FIXED_NOREPLACE) {
91468098de9SRichard Henderson             /* Validate that the chosen range is empty. */
91568098de9SRichard Henderson             if (!page_check_range_empty(start, start + len - 1)) {
91668098de9SRichard Henderson                 errno = EEXIST;
91768098de9SRichard Henderson                 return -1;
91868098de9SRichard Henderson             }
91968098de9SRichard Henderson             flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
92068098de9SRichard Henderson         } else if (!(flags & MAP_FIXED)) {
921eb5027acSRichard Henderson             abi_ulong real_start = start & -host_page_size;
922eb5027acSRichard Henderson             off_t host_offset = offset & -host_page_size;
92368098de9SRichard Henderson             size_t real_len = len + offset - host_offset;
92468098de9SRichard Henderson             abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE);
92568098de9SRichard Henderson 
92668098de9SRichard Henderson             start = mmap_find_vma(real_start, real_len, align);
927a5e7ee46SRichard Henderson             if (start == (abi_ulong)-1) {
928a5e7ee46SRichard Henderson                 errno = ENOMEM;
929d558c395SRichard Henderson                 return -1;
930a5e7ee46SRichard Henderson             }
931ad87d26eSRichard Henderson             start += offset - host_offset;
932ad87d26eSRichard Henderson             flags |= MAP_FIXED;
933a5e7ee46SRichard Henderson         }
93468098de9SRichard Henderson     }
93568098de9SRichard Henderson 
93668098de9SRichard Henderson     host_prot = target_to_host_prot(target_prot);
93768098de9SRichard Henderson 
93868098de9SRichard Henderson     if (host_page_size == TARGET_PAGE_SIZE) {
93968098de9SRichard Henderson         return mmap_h_eq_g(start, len, host_prot, flags,
94068098de9SRichard Henderson                            page_flags, fd, offset);
9418080b2f8SRichard Henderson     } else if (host_page_size < TARGET_PAGE_SIZE) {
9428080b2f8SRichard Henderson         return mmap_h_lt_g(start, len, host_prot, flags,
9438080b2f8SRichard Henderson                            page_flags, fd, offset, host_page_size);
944a03e2d42Sbellard     } else {
945eb5027acSRichard Henderson         return mmap_h_gt_g(start, len, target_prot, host_prot, flags,
946eb5027acSRichard Henderson                            page_flags, fd, offset, host_page_size);
94703798605SRichard Henderson     }
948d558c395SRichard Henderson }
949d558c395SRichard Henderson 
950d558c395SRichard Henderson /* NOTE: all the constants are the HOST ones */
target_mmap(abi_ulong start,abi_ulong len,int target_prot,int flags,int fd,off_t offset)951d558c395SRichard Henderson abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
952d558c395SRichard Henderson                      int flags, int fd, off_t offset)
953d558c395SRichard Henderson {
954d558c395SRichard Henderson     abi_long ret;
955e8cec51bSRichard Henderson     int page_flags;
956d558c395SRichard Henderson 
957d558c395SRichard Henderson     trace_target_mmap(start, len, target_prot, flags, fd, offset);
958e8cec51bSRichard Henderson 
959e8cec51bSRichard Henderson     if (!len) {
960e8cec51bSRichard Henderson         errno = EINVAL;
961e8cec51bSRichard Henderson         return -1;
962e8cec51bSRichard Henderson     }
963e8cec51bSRichard Henderson 
964e8cec51bSRichard Henderson     page_flags = validate_prot_to_pageflags(target_prot);
965e8cec51bSRichard Henderson     if (!page_flags) {
966e8cec51bSRichard Henderson         errno = EINVAL;
967e8cec51bSRichard Henderson         return -1;
968e8cec51bSRichard Henderson     }
969e8cec51bSRichard Henderson 
970e8cec51bSRichard Henderson     /* Also check for overflows... */
971e8cec51bSRichard Henderson     len = TARGET_PAGE_ALIGN(len);
972e8cec51bSRichard Henderson     if (!len || len != (size_t)len) {
973e8cec51bSRichard Henderson         errno = ENOMEM;
974e8cec51bSRichard Henderson         return -1;
975e8cec51bSRichard Henderson     }
976e8cec51bSRichard Henderson 
977e8cec51bSRichard Henderson     if (offset & ~TARGET_PAGE_MASK) {
978e8cec51bSRichard Henderson         errno = EINVAL;
979e8cec51bSRichard Henderson         return -1;
980e8cec51bSRichard Henderson     }
981e8cec51bSRichard Henderson     if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
982e8cec51bSRichard Henderson         if (start & ~TARGET_PAGE_MASK) {
983e8cec51bSRichard Henderson             errno = EINVAL;
984e8cec51bSRichard Henderson             return -1;
985e8cec51bSRichard Henderson         }
986e8cec51bSRichard Henderson         if (!guest_range_valid_untagged(start, len)) {
987e8cec51bSRichard Henderson             errno = ENOMEM;
988e8cec51bSRichard Henderson             return -1;
989e8cec51bSRichard Henderson         }
990e8cec51bSRichard Henderson     }
991e8cec51bSRichard Henderson 
992d558c395SRichard Henderson     mmap_lock();
993d558c395SRichard Henderson 
994e8cec51bSRichard Henderson     ret = target_mmap__locked(start, len, target_prot, flags,
995e8cec51bSRichard Henderson                               page_flags, fd, offset);
996d558c395SRichard Henderson 
997c8a706feSpbrook     mmap_unlock();
998e8cec51bSRichard Henderson 
999e8cec51bSRichard Henderson     /*
1000e8cec51bSRichard Henderson      * If we're mapping shared memory, ensure we generate code for parallel
1001e8cec51bSRichard Henderson      * execution and flush old translations.  This will work up to the level
1002e8cec51bSRichard Henderson      * supported by the host -- anything that requires EXCP_ATOMIC will not
1003e8cec51bSRichard Henderson      * be atomic with respect to an external process.
1004e8cec51bSRichard Henderson      */
1005e8cec51bSRichard Henderson     if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
1006e8cec51bSRichard Henderson         CPUState *cpu = thread_cpu;
1007b254c342SPhilippe Mathieu-Daudé         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
1008b254c342SPhilippe Mathieu-Daudé             tcg_cflags_set(cpu, CF_PARALLEL);
1009e8cec51bSRichard Henderson             tb_flush(cpu);
1010e8cec51bSRichard Henderson         }
1011e8cec51bSRichard Henderson     }
1012e8cec51bSRichard Henderson 
1013d558c395SRichard Henderson     return ret;
101454936004Sbellard }
101554936004Sbellard 
mmap_reserve_or_unmap(abi_ulong start,abi_ulong len)1016912ff698SRichard Henderson static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
101768a1c816SPaul Brook {
1018621ac47dSRichard Henderson     int host_page_size = qemu_real_host_page_size();
101968a1c816SPaul Brook     abi_ulong real_start;
1020260561d8SRichard Henderson     abi_ulong real_last;
1021260561d8SRichard Henderson     abi_ulong real_len;
1022260561d8SRichard Henderson     abi_ulong last;
1023260561d8SRichard Henderson     abi_ulong a;
1024558a4411SRichard Henderson     void *host_start;
102568a1c816SPaul Brook     int prot;
102668a1c816SPaul Brook 
1027260561d8SRichard Henderson     last = start + len - 1;
1028621ac47dSRichard Henderson     real_start = start & -host_page_size;
1029b36b2b1dSRichard Henderson     real_last = ROUND_UP(last, host_page_size) - 1;
1030260561d8SRichard Henderson 
1031260561d8SRichard Henderson     /*
1032260561d8SRichard Henderson      * If guest pages remain on the first or last host pages,
1033260561d8SRichard Henderson      * adjust the deallocation to retain those guest pages.
1034260561d8SRichard Henderson      * The single page special case is required for the last page,
1035260561d8SRichard Henderson      * lest real_start overflow to zero.
1036260561d8SRichard Henderson      */
1037621ac47dSRichard Henderson     if (real_last - real_start < host_page_size) {
103868a1c816SPaul Brook         prot = 0;
1039260561d8SRichard Henderson         for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
1040260561d8SRichard Henderson             prot |= page_get_flags(a);
104168a1c816SPaul Brook         }
1042260561d8SRichard Henderson         for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
1043260561d8SRichard Henderson             prot |= page_get_flags(a + 1);
104468a1c816SPaul Brook         }
1045260561d8SRichard Henderson         if (prot != 0) {
1046912ff698SRichard Henderson             return 0;
1047260561d8SRichard Henderson         }
1048260561d8SRichard Henderson     } else {
1049260561d8SRichard Henderson         for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
1050260561d8SRichard Henderson             prot |= page_get_flags(a);
105168a1c816SPaul Brook         }
10522b730f79SRichard Henderson         if (prot != 0) {
1053621ac47dSRichard Henderson             real_start += host_page_size;
105468a1c816SPaul Brook         }
1055260561d8SRichard Henderson 
1056260561d8SRichard Henderson         for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
1057260561d8SRichard Henderson             prot |= page_get_flags(a + 1);
105868a1c816SPaul Brook         }
10592b730f79SRichard Henderson         if (prot != 0) {
1060621ac47dSRichard Henderson             real_last -= host_page_size;
1061260561d8SRichard Henderson         }
1062260561d8SRichard Henderson 
1063260561d8SRichard Henderson         if (real_last < real_start) {
1064912ff698SRichard Henderson             return 0;
106568a1c816SPaul Brook         }
10662b730f79SRichard Henderson     }
1067260561d8SRichard Henderson 
1068260561d8SRichard Henderson     real_len = real_last - real_start + 1;
1069260561d8SRichard Henderson     host_start = g2h_untagged(real_start);
1070260561d8SRichard Henderson 
10712952b642SRichard Henderson     return do_munmap(host_start, real_len);
107268a1c816SPaul Brook }
107368a1c816SPaul Brook 
target_munmap(abi_ulong start,abi_ulong len)1074992f48a0Sblueswir1 int target_munmap(abi_ulong start, abi_ulong len)
107554936004Sbellard {
1076912ff698SRichard Henderson     int ret;
1077912ff698SRichard Henderson 
1078b7b18d26SAlex Bennée     trace_target_munmap(start, len);
1079b7b18d26SAlex Bennée 
10802b730f79SRichard Henderson     if (start & ~TARGET_PAGE_MASK) {
1081912ff698SRichard Henderson         errno = EINVAL;
1082912ff698SRichard Henderson         return -1;
10832b730f79SRichard Henderson     }
108454936004Sbellard     len = TARGET_PAGE_ALIGN(len);
108546b12f46SRichard Henderson     if (len == 0 || !guest_range_valid_untagged(start, len)) {
1086912ff698SRichard Henderson         errno = EINVAL;
1087912ff698SRichard Henderson         return -1;
1088ebf9a363SMax Filippov     }
1089ebf9a363SMax Filippov 
1090c8a706feSpbrook     mmap_lock();
1091912ff698SRichard Henderson     ret = mmap_reserve_or_unmap(start, len);
1092912ff698SRichard Henderson     if (likely(ret == 0)) {
109349840a4aSRichard Henderson         page_set_flags(start, start + len - 1, 0);
1094044e95c8SRichard Henderson         shm_region_rm_complete(start, start + len - 1);
1095912ff698SRichard Henderson     }
1096c8a706feSpbrook     mmap_unlock();
1097d7b0c5d0SRichard Henderson 
1098912ff698SRichard Henderson     return ret;
109954936004Sbellard }
110054936004Sbellard 
target_mremap(abi_ulong old_addr,abi_ulong old_size,abi_ulong new_size,unsigned long flags,abi_ulong new_addr)1101992f48a0Sblueswir1 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
1102992f48a0Sblueswir1                        abi_ulong new_size, unsigned long flags,
1103992f48a0Sblueswir1                        abi_ulong new_addr)
110454936004Sbellard {
110554936004Sbellard     int prot;
1106f19412a2Saurel32     void *host_addr;
110754936004Sbellard 
110846b12f46SRichard Henderson     if (!guest_range_valid_untagged(old_addr, old_size) ||
1109ebf9a363SMax Filippov         ((flags & MREMAP_FIXED) &&
111046b12f46SRichard Henderson          !guest_range_valid_untagged(new_addr, new_size)) ||
1111ccc5ccc1SRichard Purdie         ((flags & MREMAP_MAYMOVE) == 0 &&
111246b12f46SRichard Henderson          !guest_range_valid_untagged(old_addr, new_size))) {
1113ebf9a363SMax Filippov         errno = ENOMEM;
1114ebf9a363SMax Filippov         return -1;
1115ebf9a363SMax Filippov     }
1116ebf9a363SMax Filippov 
1117c8a706feSpbrook     mmap_lock();
1118f19412a2Saurel32 
111968a1c816SPaul Brook     if (flags & MREMAP_FIXED) {
11203e8f1628SRichard Henderson         host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
11213e8f1628SRichard Henderson                            flags, g2h_untagged(new_addr));
112268a1c816SPaul Brook 
1123b76f21a7SLaurent Vivier         if (reserved_va && host_addr != MAP_FAILED) {
11242b730f79SRichard Henderson             /*
11252b730f79SRichard Henderson              * If new and old addresses overlap then the above mremap will
11262b730f79SRichard Henderson              * already have failed with EINVAL.
11272b730f79SRichard Henderson              */
1128558a4411SRichard Henderson             mmap_reserve_or_unmap(old_addr, old_size);
112968a1c816SPaul Brook         }
113068a1c816SPaul Brook     } else if (flags & MREMAP_MAYMOVE) {
1131f19412a2Saurel32         abi_ulong mmap_start;
1132f19412a2Saurel32 
113330ab9ef2SRichard Henderson         mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
1134f19412a2Saurel32 
1135f19412a2Saurel32         if (mmap_start == -1) {
1136f19412a2Saurel32             errno = ENOMEM;
1137f19412a2Saurel32             host_addr = MAP_FAILED;
113868a1c816SPaul Brook         } else {
11393e8f1628SRichard Henderson             host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
11403e8f1628SRichard Henderson                                flags | MREMAP_FIXED,
11413e8f1628SRichard Henderson                                g2h_untagged(mmap_start));
1142b76f21a7SLaurent Vivier             if (reserved_va) {
1143558a4411SRichard Henderson                 mmap_reserve_or_unmap(old_addr, old_size);
114468a1c816SPaul Brook             }
1145c65ffe6dSamateur         }
11463af72a4dSblueswir1     } else {
1147ea800033SLaurent Vivier         int page_flags = 0;
1148b76f21a7SLaurent Vivier         if (reserved_va && old_size < new_size) {
114968a1c816SPaul Brook             abi_ulong addr;
115068a1c816SPaul Brook             for (addr = old_addr + old_size;
115168a1c816SPaul Brook                  addr < old_addr + new_size;
115268a1c816SPaul Brook                  addr++) {
1153ea800033SLaurent Vivier                 page_flags |= page_get_flags(addr);
115468a1c816SPaul Brook             }
115568a1c816SPaul Brook         }
1156ea800033SLaurent Vivier         if (page_flags == 0) {
11573e8f1628SRichard Henderson             host_addr = mremap(g2h_untagged(old_addr),
11583e8f1628SRichard Henderson                                old_size, new_size, flags);
115956d19084STobias Koch 
116056d19084STobias Koch             if (host_addr != MAP_FAILED) {
116156d19084STobias Koch                 /* Check if address fits target address space */
116246b12f46SRichard Henderson                 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
116356d19084STobias Koch                     /* Revert mremap() changes */
11643e8f1628SRichard Henderson                     host_addr = mremap(g2h_untagged(old_addr),
11653e8f1628SRichard Henderson                                        new_size, old_size, flags);
116668a1c816SPaul Brook                     errno = ENOMEM;
116768a1c816SPaul Brook                     host_addr = MAP_FAILED;
116856d19084STobias Koch                 } else if (reserved_va && old_size > new_size) {
1169558a4411SRichard Henderson                     mmap_reserve_or_unmap(old_addr + old_size,
1170558a4411SRichard Henderson                                           old_size - new_size);
117168a1c816SPaul Brook                 }
117256d19084STobias Koch             }
117356d19084STobias Koch         } else {
1174f19412a2Saurel32             errno = ENOMEM;
1175f19412a2Saurel32             host_addr = MAP_FAILED;
1176f19412a2Saurel32         }
1177f19412a2Saurel32     }
1178f19412a2Saurel32 
1179f19412a2Saurel32     if (host_addr == MAP_FAILED) {
1180c8a706feSpbrook         new_addr = -1;
1181c8a706feSpbrook     } else {
1182a5b85f79Sths         new_addr = h2g(host_addr);
118354936004Sbellard         prot = page_get_flags(old_addr);
118449840a4aSRichard Henderson         page_set_flags(old_addr, old_addr + old_size - 1, 0);
1185044e95c8SRichard Henderson         shm_region_rm_complete(old_addr, old_addr + old_size - 1);
118649840a4aSRichard Henderson         page_set_flags(new_addr, new_addr + new_size - 1,
1187d9c58585SRichard Henderson                        prot | PAGE_VALID | PAGE_RESET);
1188044e95c8SRichard Henderson         shm_region_rm_complete(new_addr, new_addr + new_size - 1);
1189c8a706feSpbrook     }
1190c8a706feSpbrook     mmap_unlock();
119154936004Sbellard     return new_addr;
119254936004Sbellard }
1193892a4f6aSIlya Leoshkevich 
target_madvise(abi_ulong start,abi_ulong len_in,int advice)1194892a4f6aSIlya Leoshkevich abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
1195892a4f6aSIlya Leoshkevich {
1196e230ec09SRichard Henderson     abi_ulong len;
1197892a4f6aSIlya Leoshkevich     int ret = 0;
1198892a4f6aSIlya Leoshkevich 
1199892a4f6aSIlya Leoshkevich     if (start & ~TARGET_PAGE_MASK) {
1200892a4f6aSIlya Leoshkevich         return -TARGET_EINVAL;
1201892a4f6aSIlya Leoshkevich     }
1202e230ec09SRichard Henderson     if (len_in == 0) {
1203892a4f6aSIlya Leoshkevich         return 0;
1204892a4f6aSIlya Leoshkevich     }
1205e230ec09SRichard Henderson     len = TARGET_PAGE_ALIGN(len_in);
1206e230ec09SRichard Henderson     if (len == 0 || !guest_range_valid_untagged(start, len)) {
1207892a4f6aSIlya Leoshkevich         return -TARGET_EINVAL;
1208892a4f6aSIlya Leoshkevich     }
1209892a4f6aSIlya Leoshkevich 
12104530deb1SHelge Deller     /* Translate for some architectures which have different MADV_xxx values */
12114530deb1SHelge Deller     switch (advice) {
12124530deb1SHelge Deller     case TARGET_MADV_DONTNEED:      /* alpha */
12134530deb1SHelge Deller         advice = MADV_DONTNEED;
12144530deb1SHelge Deller         break;
12154530deb1SHelge Deller     case TARGET_MADV_WIPEONFORK:    /* parisc */
12164530deb1SHelge Deller         advice = MADV_WIPEONFORK;
12174530deb1SHelge Deller         break;
12184530deb1SHelge Deller     case TARGET_MADV_KEEPONFORK:    /* parisc */
12194530deb1SHelge Deller         advice = MADV_KEEPONFORK;
12204530deb1SHelge Deller         break;
12214530deb1SHelge Deller     /* we do not care about the other MADV_xxx values yet */
12224530deb1SHelge Deller     }
12234530deb1SHelge Deller 
1224892a4f6aSIlya Leoshkevich     /*
12254530deb1SHelge Deller      * Most advice values are hints, so ignoring and returning success is ok.
1226892a4f6aSIlya Leoshkevich      *
12274530deb1SHelge Deller      * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
12284530deb1SHelge Deller      * MADV_KEEPONFORK are not hints and need to be emulated.
1229892a4f6aSIlya Leoshkevich      *
12304530deb1SHelge Deller      * A straight passthrough for those may not be safe because qemu sometimes
12314530deb1SHelge Deller      * turns private file-backed mappings into anonymous mappings.
1232ecb796dbSRichard Henderson      * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
1233ecb796dbSRichard Henderson      * same semantics for the host as for the guest.
12344530deb1SHelge Deller      *
12354530deb1SHelge Deller      * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
12364530deb1SHelge Deller      * return failure if not.
12374530deb1SHelge Deller      *
12384530deb1SHelge Deller      * MADV_DONTNEED is passed through as well, if possible.
12394530deb1SHelge Deller      * If passthrough isn't possible, we nevertheless (wrongly!) return
12404530deb1SHelge Deller      * success, which is broken but some userspace programs fail to work
12414530deb1SHelge Deller      * otherwise. Completely implementing such emulation is quite complicated
12424530deb1SHelge Deller      * though.
1243892a4f6aSIlya Leoshkevich      */
1244892a4f6aSIlya Leoshkevich     mmap_lock();
12454530deb1SHelge Deller     switch (advice) {
12464530deb1SHelge Deller     case MADV_WIPEONFORK:
12474530deb1SHelge Deller     case MADV_KEEPONFORK:
12484530deb1SHelge Deller         ret = -EINVAL;
12494530deb1SHelge Deller         /* fall through */
12504530deb1SHelge Deller     case MADV_DONTNEED:
1251ecb796dbSRichard Henderson         if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
12524530deb1SHelge Deller             ret = get_errno(madvise(g2h_untagged(start), len, advice));
12534530deb1SHelge Deller             if ((advice == MADV_DONTNEED) && (ret == 0)) {
125410310cbdSRichard Henderson                 page_reset_target_data(start, start + len - 1);
1255dbbf8975SVitaly Buka             }
1256892a4f6aSIlya Leoshkevich         }
12574530deb1SHelge Deller     }
1258892a4f6aSIlya Leoshkevich     mmap_unlock();
1259892a4f6aSIlya Leoshkevich 
1260892a4f6aSIlya Leoshkevich     return ret;
1261892a4f6aSIlya Leoshkevich }
1262225a206cSRichard Henderson 
1263225a206cSRichard Henderson #ifndef TARGET_FORCE_SHMLBA
1264225a206cSRichard Henderson /*
1265225a206cSRichard Henderson  * For most architectures, SHMLBA is the same as the page size;
1266225a206cSRichard Henderson  * some architectures have larger values, in which case they should
1267225a206cSRichard Henderson  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
1268225a206cSRichard Henderson  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
1269225a206cSRichard Henderson  * and defining its own value for SHMLBA.
1270225a206cSRichard Henderson  *
1271225a206cSRichard Henderson  * The kernel also permits SHMLBA to be set by the architecture to a
1272225a206cSRichard Henderson  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
1273225a206cSRichard Henderson  * this means that addresses are rounded to the large size if
1274225a206cSRichard Henderson  * SHM_RND is set but addresses not aligned to that size are not rejected
1275225a206cSRichard Henderson  * as long as they are at least page-aligned. Since the only architecture
1276225a206cSRichard Henderson  * which uses this is ia64 this code doesn't provide for that oddity.
1277225a206cSRichard Henderson  */
target_shmlba(CPUArchState * cpu_env)1278225a206cSRichard Henderson static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
1279225a206cSRichard Henderson {
1280225a206cSRichard Henderson     return TARGET_PAGE_SIZE;
1281225a206cSRichard Henderson }
1282225a206cSRichard Henderson #endif
1283225a206cSRichard Henderson 
128478bc8ed9SRichard Henderson #if defined(__arm__) || defined(__mips__) || defined(__sparc__)
128578bc8ed9SRichard Henderson #define HOST_FORCE_SHMLBA 1
128678bc8ed9SRichard Henderson #else
128778bc8ed9SRichard Henderson #define HOST_FORCE_SHMLBA 0
128878bc8ed9SRichard Henderson #endif
128978bc8ed9SRichard Henderson 
target_shmat(CPUArchState * cpu_env,int shmid,abi_ulong shmaddr,int shmflg)1290225a206cSRichard Henderson abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
1291225a206cSRichard Henderson                        abi_ulong shmaddr, int shmflg)
1292225a206cSRichard Henderson {
1293225a206cSRichard Henderson     CPUState *cpu = env_cpu(cpu_env);
1294225a206cSRichard Henderson     struct shmid_ds shm_info;
129569fa2708SRichard Henderson     int ret;
129678bc8ed9SRichard Henderson     int h_pagesize;
129778bc8ed9SRichard Henderson     int t_shmlba, h_shmlba, m_shmlba;
129878bc8ed9SRichard Henderson     size_t t_len, h_len, m_len;
1299225a206cSRichard Henderson 
1300225a206cSRichard Henderson     /* shmat pointers are always untagged */
1301225a206cSRichard Henderson 
130278bc8ed9SRichard Henderson     /*
130378bc8ed9SRichard Henderson      * Because we can't use host shmat() unless the address is sufficiently
130478bc8ed9SRichard Henderson      * aligned for the host, we'll need to check both.
130578bc8ed9SRichard Henderson      * TODO: Could be fixed with softmmu.
130678bc8ed9SRichard Henderson      */
130778bc8ed9SRichard Henderson     t_shmlba = target_shmlba(cpu_env);
130878bc8ed9SRichard Henderson     h_pagesize = qemu_real_host_page_size();
130978bc8ed9SRichard Henderson     h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize);
131078bc8ed9SRichard Henderson     m_shmlba = MAX(t_shmlba, h_shmlba);
131178bc8ed9SRichard Henderson 
131278bc8ed9SRichard Henderson     if (shmaddr) {
131378bc8ed9SRichard Henderson         if (shmaddr & (m_shmlba - 1)) {
131478bc8ed9SRichard Henderson             if (shmflg & SHM_RND) {
131578bc8ed9SRichard Henderson                 /*
131678bc8ed9SRichard Henderson                  * The guest is allowing the kernel to round the address.
131778bc8ed9SRichard Henderson                  * Assume that the guest is ok with us rounding to the
131878bc8ed9SRichard Henderson                  * host required alignment too.  Anyway if we don't, we'll
131978bc8ed9SRichard Henderson                  * get an error from the kernel.
132078bc8ed9SRichard Henderson                  */
132178bc8ed9SRichard Henderson                 shmaddr &= ~(m_shmlba - 1);
132278bc8ed9SRichard Henderson                 if (shmaddr == 0 && (shmflg & SHM_REMAP)) {
132378bc8ed9SRichard Henderson                     return -TARGET_EINVAL;
132478bc8ed9SRichard Henderson                 }
132578bc8ed9SRichard Henderson             } else {
132678bc8ed9SRichard Henderson                 int require = TARGET_PAGE_SIZE;
132778bc8ed9SRichard Henderson #ifdef TARGET_FORCE_SHMLBA
132878bc8ed9SRichard Henderson                 require = t_shmlba;
132978bc8ed9SRichard Henderson #endif
133078bc8ed9SRichard Henderson                 /*
133178bc8ed9SRichard Henderson                  * Include host required alignment, as otherwise we cannot
133278bc8ed9SRichard Henderson                  * use host shmat at all.
133378bc8ed9SRichard Henderson                  */
133478bc8ed9SRichard Henderson                 require = MAX(require, h_shmlba);
133578bc8ed9SRichard Henderson                 if (shmaddr & (require - 1)) {
133678bc8ed9SRichard Henderson                     return -TARGET_EINVAL;
133778bc8ed9SRichard Henderson                 }
133878bc8ed9SRichard Henderson             }
133978bc8ed9SRichard Henderson         }
134078bc8ed9SRichard Henderson     } else {
134178bc8ed9SRichard Henderson         if (shmflg & SHM_REMAP) {
134278bc8ed9SRichard Henderson             return -TARGET_EINVAL;
134378bc8ed9SRichard Henderson         }
134478bc8ed9SRichard Henderson     }
134578bc8ed9SRichard Henderson     /* All rounding now manually concluded. */
134678bc8ed9SRichard Henderson     shmflg &= ~SHM_RND;
134778bc8ed9SRichard Henderson 
134878bc8ed9SRichard Henderson     /* Find out the length of the shared memory segment. */
1349225a206cSRichard Henderson     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
1350225a206cSRichard Henderson     if (is_error(ret)) {
1351225a206cSRichard Henderson         /* can't get length, bail out */
1352225a206cSRichard Henderson         return ret;
1353225a206cSRichard Henderson     }
135478bc8ed9SRichard Henderson     t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz);
135578bc8ed9SRichard Henderson     h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize);
135678bc8ed9SRichard Henderson     m_len = MAX(t_len, h_len);
1357225a206cSRichard Henderson 
135878bc8ed9SRichard Henderson     if (!guest_range_valid_untagged(shmaddr, m_len)) {
1359225a206cSRichard Henderson         return -TARGET_EINVAL;
1360225a206cSRichard Henderson     }
1361225a206cSRichard Henderson 
136269fa2708SRichard Henderson     WITH_MMAP_LOCK_GUARD() {
136378bc8ed9SRichard Henderson         bool mapped = false;
136478bc8ed9SRichard Henderson         void *want, *test;
1365044e95c8SRichard Henderson         abi_ulong last;
136669fa2708SRichard Henderson 
136778bc8ed9SRichard Henderson         if (!shmaddr) {
136878bc8ed9SRichard Henderson             shmaddr = mmap_find_vma(0, m_len, m_shmlba);
136978bc8ed9SRichard Henderson             if (shmaddr == -1) {
137069fa2708SRichard Henderson                 return -TARGET_ENOMEM;
137169fa2708SRichard Henderson             }
137278bc8ed9SRichard Henderson             mapped = !reserved_va;
137378bc8ed9SRichard Henderson         } else if (shmflg & SHM_REMAP) {
137478bc8ed9SRichard Henderson             /*
137578bc8ed9SRichard Henderson              * If host page size > target page size, the host shmat may map
137678bc8ed9SRichard Henderson              * more memory than the guest expects.  Reject a mapping that
137778bc8ed9SRichard Henderson              * would replace memory in the unexpected gap.
137878bc8ed9SRichard Henderson              * TODO: Could be fixed with softmmu.
137978bc8ed9SRichard Henderson              */
138078bc8ed9SRichard Henderson             if (t_len < h_len &&
138178bc8ed9SRichard Henderson                 !page_check_range_empty(shmaddr + t_len,
138278bc8ed9SRichard Henderson                                         shmaddr + h_len - 1)) {
138378bc8ed9SRichard Henderson                 return -TARGET_EINVAL;
138478bc8ed9SRichard Henderson             }
138578bc8ed9SRichard Henderson         } else {
138678bc8ed9SRichard Henderson             if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) {
138778bc8ed9SRichard Henderson                 return -TARGET_EINVAL;
138878bc8ed9SRichard Henderson             }
138969fa2708SRichard Henderson         }
139069fa2708SRichard Henderson 
139178bc8ed9SRichard Henderson         /* All placement is now complete. */
139278bc8ed9SRichard Henderson         want = (void *)g2h_untagged(shmaddr);
139369fa2708SRichard Henderson 
139478bc8ed9SRichard Henderson         /*
139578bc8ed9SRichard Henderson          * Map anonymous pages across the entire range, then remap with
139678bc8ed9SRichard Henderson          * the shared memory.  This is required for a number of corner
139778bc8ed9SRichard Henderson          * cases for which host and guest page sizes differ.
139878bc8ed9SRichard Henderson          */
139978bc8ed9SRichard Henderson         if (h_len != t_len) {
140078bc8ed9SRichard Henderson             int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE);
140178bc8ed9SRichard Henderson             int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS
1402fa527b44SIlya Leoshkevich                        | (reserved_va || mapped || (shmflg & SHM_REMAP)
140378bc8ed9SRichard Henderson                           ? MAP_FIXED : MAP_FIXED_NOREPLACE);
140478bc8ed9SRichard Henderson 
140578bc8ed9SRichard Henderson             test = mmap(want, m_len, mmap_p, mmap_f, -1, 0);
140678bc8ed9SRichard Henderson             if (unlikely(test != want)) {
140778bc8ed9SRichard Henderson                 /* shmat returns EINVAL not EEXIST like mmap. */
140878bc8ed9SRichard Henderson                 ret = (test == MAP_FAILED && errno != EEXIST
140978bc8ed9SRichard Henderson                        ? get_errno(-1) : -TARGET_EINVAL);
141078bc8ed9SRichard Henderson                 if (mapped) {
141178bc8ed9SRichard Henderson                     do_munmap(want, m_len);
141278bc8ed9SRichard Henderson                 }
141378bc8ed9SRichard Henderson                 return ret;
141478bc8ed9SRichard Henderson             }
141578bc8ed9SRichard Henderson             mapped = true;
141678bc8ed9SRichard Henderson         }
141778bc8ed9SRichard Henderson 
141878bc8ed9SRichard Henderson         if (reserved_va || mapped) {
141978bc8ed9SRichard Henderson             shmflg |= SHM_REMAP;
142078bc8ed9SRichard Henderson         }
142178bc8ed9SRichard Henderson         test = shmat(shmid, want, shmflg);
142278bc8ed9SRichard Henderson         if (test == MAP_FAILED) {
142378bc8ed9SRichard Henderson             ret = get_errno(-1);
142478bc8ed9SRichard Henderson             if (mapped) {
142578bc8ed9SRichard Henderson                 do_munmap(want, m_len);
142678bc8ed9SRichard Henderson             }
142778bc8ed9SRichard Henderson             return ret;
142878bc8ed9SRichard Henderson         }
142978bc8ed9SRichard Henderson         assert(test == want);
143078bc8ed9SRichard Henderson 
143178bc8ed9SRichard Henderson         last = shmaddr + m_len - 1;
143278bc8ed9SRichard Henderson         page_set_flags(shmaddr, last,
143369fa2708SRichard Henderson                        PAGE_VALID | PAGE_RESET | PAGE_READ |
143478bc8ed9SRichard Henderson                        (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) |
143578bc8ed9SRichard Henderson                        (shmflg & SHM_EXEC ? PAGE_EXEC : 0));
143669fa2708SRichard Henderson 
143778bc8ed9SRichard Henderson         shm_region_rm_complete(shmaddr, last);
143878bc8ed9SRichard Henderson         shm_region_add(shmaddr, last);
143969fa2708SRichard Henderson     }
1440225a206cSRichard Henderson 
1441225a206cSRichard Henderson     /*
1442225a206cSRichard Henderson      * We're mapping shared memory, so ensure we generate code for parallel
1443225a206cSRichard Henderson      * execution and flush old translations.  This will work up to the level
1444225a206cSRichard Henderson      * supported by the host -- anything that requires EXCP_ATOMIC will not
1445225a206cSRichard Henderson      * be atomic with respect to an external process.
1446225a206cSRichard Henderson      */
1447b254c342SPhilippe Mathieu-Daudé     if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
1448b254c342SPhilippe Mathieu-Daudé         tcg_cflags_set(cpu, CF_PARALLEL);
1449225a206cSRichard Henderson         tb_flush(cpu);
1450225a206cSRichard Henderson     }
1451225a206cSRichard Henderson 
145278bc8ed9SRichard Henderson     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
145378bc8ed9SRichard Henderson         FILE *f = qemu_log_trylock();
145478bc8ed9SRichard Henderson         if (f) {
145578bc8ed9SRichard Henderson             fprintf(f, "page layout changed following shmat\n");
145678bc8ed9SRichard Henderson             page_dump(f);
145778bc8ed9SRichard Henderson             qemu_log_unlock(f);
145878bc8ed9SRichard Henderson         }
145978bc8ed9SRichard Henderson     }
146078bc8ed9SRichard Henderson     return shmaddr;
1461225a206cSRichard Henderson }
1462225a206cSRichard Henderson 
target_shmdt(abi_ulong shmaddr)1463225a206cSRichard Henderson abi_long target_shmdt(abi_ulong shmaddr)
1464225a206cSRichard Henderson {
1465225a206cSRichard Henderson     abi_long rv;
1466225a206cSRichard Henderson 
1467225a206cSRichard Henderson     /* shmdt pointers are always untagged */
1468225a206cSRichard Henderson 
146969fa2708SRichard Henderson     WITH_MMAP_LOCK_GUARD() {
1470044e95c8SRichard Henderson         abi_ulong last = shm_region_find(shmaddr);
1471044e95c8SRichard Henderson         if (last == 0) {
1472ceda5688SRichard Henderson             return -TARGET_EINVAL;
1473ceda5688SRichard Henderson         }
1474ceda5688SRichard Henderson 
1475225a206cSRichard Henderson         rv = get_errno(shmdt(g2h_untagged(shmaddr)));
1476ceda5688SRichard Henderson         if (rv == 0) {
1477044e95c8SRichard Henderson             abi_ulong size = last - shmaddr + 1;
1478ceda5688SRichard Henderson 
1479044e95c8SRichard Henderson             page_set_flags(shmaddr, last, 0);
1480044e95c8SRichard Henderson             shm_region_rm_complete(shmaddr, last);
1481ceda5688SRichard Henderson             mmap_reserve_or_unmap(shmaddr, size);
1482ceda5688SRichard Henderson         }
148369fa2708SRichard Henderson     }
1484225a206cSRichard Henderson     return rv;
1485225a206cSRichard Henderson }
1486